# limitations under the License.
##
+from copy import deepcopy
from http import HTTPStatus
from itertools import product
import logging
"image": Ns._process_image_params,
"flavor": Ns._process_flavor_params,
"vdu": Ns._process_vdu_params,
+ "classification": Ns._process_classification_params,
+ "sfi": Ns._process_sfi_params,
+ "sf": Ns._process_sf_params,
+ "sfp": Ns._process_sfp_params,
"affinity-or-anti-affinity-group": Ns._process_affinity_group_params,
+ "shared-volumes": Ns._process_shared_volumes_params,
}
self.db_path_map = {
"net": "vld",
"image": "image",
"flavor": "flavor",
"vdu": "vdur",
+ "classification": "classification",
+ "sfi": "sfi",
+ "sf": "sf",
+ "sfp": "sfp",
"affinity-or-anti-affinity-group": "affinity-or-anti-affinity-group",
+ "shared-volumes": "shared-volumes",
}
def init_db(self, target_version):
# Pinning policy "ISOLATE" uses cores as host should not support SMT architecture
# Pinning policy "PREFER" uses threads in case host supports SMT architecture
numa[
- "cores"
- if guest_epa_quota.get("cpu-thread-pinning-policy") == "ISOLATE"
- else "threads"
+ (
+ "cores"
+ if guest_epa_quota.get("cpu-thread-pinning-policy") == "ISOLATE"
+ else "threads"
+ )
] = max(vcpu_count, 1)
local_epa_vcpu_set = True
db = kwargs.get("db")
target_vdur = {}
+ for vnf in indata.get("vnf", []):
+ for vdur in vnf.get("vdur", []):
+ if vdur.get("ns-flavor-id") == target_flavor.get("id"):
+ target_vdur = vdur
+
+ vim_flavor_id = (
+ target_vdur.get("additionalParams", {}).get("OSM", {}).get("vim_flavor_id")
+ )
+ if vim_flavor_id: # vim-flavor-id was passed so flavor won't be created
+ return {"find_params": {"vim_flavor_id": vim_flavor_id}}
+
flavor_data = {
"disk": int(target_flavor["storage-gb"]),
"ram": int(target_flavor["memory-mb"]),
"vcpus": int(target_flavor["vcpu-count"]),
}
- for vnf in indata.get("vnf", []):
- for vdur in vnf.get("vdur", []):
- if vdur.get("ns-flavor-id") == target_flavor.get("id"):
- target_vdur = vdur
-
if db and isinstance(indata.get("vnf"), list):
vnfd_id = indata.get("vnf")[0].get("vnfd-id")
vnfd = db.get_one("vnfds", {"_id": vnfd_id})
flavor_data_name = flavor_data.copy()
flavor_data_name["name"] = target_flavor["name"]
extra_dict["params"] = {"flavor_data": flavor_data_name}
+ return extra_dict
+
+ @staticmethod
+ def _prefix_ip_address(ip_address):
+ if "/" not in ip_address:
+ ip_address += "/32"
+ return ip_address
+
+ @staticmethod
+ def _process_ip_proto(ip_proto):
+ if ip_proto:
+ if ip_proto == 1:
+ ip_proto = "icmp"
+ elif ip_proto == 6:
+ ip_proto = "tcp"
+ elif ip_proto == 17:
+ ip_proto = "udp"
+ return ip_proto
+
+ @staticmethod
+ def _process_classification_params(
+ target_classification: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """[summary]
+
+ Args:
+ target_classification (Dict[str, Any]): Classification dictionary parameters that needs to be processed to create resource on VIM
+ indata (Dict[str, Any]): Deployment info
+ vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+ Returns:
+ Dict[str, Any]: Return parameters required to create classification and Items on which classification is dependent.
+ """
+ vnfr_id = target_classification["vnfr_id"]
+ vdur_id = target_classification["vdur_id"]
+ port_index = target_classification["ingress_port_index"]
+ extra_dict = {}
+
+ classification_data = {
+ "name": target_classification["id"],
+ "source_port_range_min": target_classification["source-port"],
+ "source_port_range_max": target_classification["source-port"],
+ "destination_port_range_min": target_classification["destination-port"],
+ "destination_port_range_max": target_classification["destination-port"],
+ }
+
+ classification_data["source_ip_prefix"] = Ns._prefix_ip_address(
+ target_classification["source-ip-address"]
+ )
+
+ classification_data["destination_ip_prefix"] = Ns._prefix_ip_address(
+ target_classification["destination-ip-address"]
+ )
+
+ classification_data["protocol"] = Ns._process_ip_proto(
+ int(target_classification["ip-proto"])
+ )
+
+ db = kwargs.get("db")
+ vdu_text = Ns._get_vnfr_vdur_text(db, vnfr_id, vdur_id)
+
+ extra_dict = {"depends_on": [vdu_text]}
+
+ extra_dict = {"depends_on": [vdu_text]}
+ classification_data["logical_source_port"] = "TASK-" + vdu_text
+ classification_data["logical_source_port_index"] = port_index
+
+ extra_dict["params"] = classification_data
+
+ return extra_dict
+
+ @staticmethod
+ def _process_sfi_params(
+ target_sfi: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """[summary]
+
+ Args:
+ target_sfi (Dict[str, Any]): SFI dictionary parameters that needs to be processed to create resource on VIM
+ indata (Dict[str, Any]): deployment info
+ vim_info (Dict[str, Any]): To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+ Returns:
+ Dict[str, Any]: Return parameters required to create SFI and Items on which SFI is dependent.
+ """
+
+ vnfr_id = target_sfi["vnfr_id"]
+ vdur_id = target_sfi["vdur_id"]
+
+ sfi_data = {
+ "name": target_sfi["id"],
+ "ingress_port_index": target_sfi["ingress_port_index"],
+ "egress_port_index": target_sfi["egress_port_index"],
+ }
+
+ db = kwargs.get("db")
+ vdu_text = Ns._get_vnfr_vdur_text(db, vnfr_id, vdur_id)
+
+ extra_dict = {"depends_on": [vdu_text]}
+ sfi_data["ingress_port"] = "TASK-" + vdu_text
+ sfi_data["egress_port"] = "TASK-" + vdu_text
+
+ extra_dict["params"] = sfi_data
+
+ return extra_dict
+
+ @staticmethod
+ def _get_vnfr_vdur_text(db, vnfr_id, vdur_id):
+ vnf_preffix = "vnfrs:{}".format(vnfr_id)
+ db_vnfr = db.get_one("vnfrs", {"_id": vnfr_id})
+ vdur_list = []
+ vdu_text = ""
+
+ if db_vnfr:
+ vdur_list = [
+ vdur["id"] for vdur in db_vnfr["vdur"] if vdur["vdu-id-ref"] == vdur_id
+ ]
+
+ if vdur_list:
+ vdu_text = vnf_preffix + ":vdur." + vdur_list[0]
+
+ return vdu_text
+
+ @staticmethod
+ def _process_sf_params(
+ target_sf: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """[summary]
+
+ Args:
+ target_sf (Dict[str, Any]): SF dictionary parameters that needs to be processed to create resource on VIM
+ indata (Dict[str, Any]): Deployment info.
+ vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+ Returns:
+ Dict[str, Any]: Return parameters required to create SF and Items on which SF is dependent.
+ """
+
+ nsr_id = kwargs.get("nsr_id", "")
+ sfis = target_sf["sfis"]
+ ns_preffix = "nsrs:{}".format(nsr_id)
+ extra_dict = {"depends_on": [], "params": []}
+ sf_data = {"name": target_sf["id"], "sfis": sfis}
+
+ for count, sfi in enumerate(sfis):
+ sfi_text = ns_preffix + ":sfi." + sfi
+ sfis[count] = "TASK-" + sfi_text
+ extra_dict["depends_on"].append(sfi_text)
+
+ extra_dict["params"] = sf_data
+
+ return extra_dict
+
+ @staticmethod
+ def _process_sfp_params(
+ target_sfp: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """[summary]
+
+ Args:
+ target_sfp (Dict[str, Any]): SFP dictionary parameters that needs to be processed to create resource on VIM.
+ indata (Dict[str, Any]): Deployment info
+ vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+ Returns:
+ Dict[str, Any]: Return parameters required to create SFP and Items on which SFP is dependent.
+ """
+
+ nsr_id = kwargs.get("nsr_id")
+ sfs = target_sfp["sfs"]
+ classifications = target_sfp["classifications"]
+ ns_preffix = "nsrs:{}".format(nsr_id)
+ extra_dict = {"depends_on": [], "params": []}
+ sfp_data = {
+ "name": target_sfp["id"],
+ "sfs": sfs,
+ "classifications": classifications,
+ }
+
+ for count, sf in enumerate(sfs):
+ sf_text = ns_preffix + ":sf." + sf
+ sfs[count] = "TASK-" + sf_text
+ extra_dict["depends_on"].append(sf_text)
+
+ for count, classi in enumerate(classifications):
+ classi_text = ns_preffix + ":classification." + classi
+ classifications[count] = "TASK-" + classi_text
+ extra_dict["depends_on"].append(classi_text)
+
+ extra_dict["params"] = sfp_data
return extra_dict
disk_list.append(persistent_root_disk[vsd["id"]])
return persistent_root_disk
+ return persistent_root_disk
@staticmethod
def find_persistent_volumes(
if not virtual_storage_desc.get("vdu-storage-requirements"):
return False
for item in virtual_storage_desc.get("vdu-storage-requirements", {}):
- if item.get("key") == "keep-volume" and item.get("value") == "true":
+ if item.get("key") == "keep-volume" and item.get("value").lower() == "true":
return True
return False
+ @staticmethod
+ def is_shared_volume(
+ virtual_storage_desc: Dict[str, Any], vnfd_id: str
+ ) -> (str, bool):
+ """Function to decide if the volume type is multi attached or not .
+
+ Args:
+ virtual_storage_desc (Dict[str, Any]): virtual storage description dictionary
+ vnfd_id (str): vnfd id
+
+ Returns:
+ bool (True/False)
+ name (str) New name if it is a multiattach disk
+ """
+
+ if vdu_storage_requirements := virtual_storage_desc.get(
+ "vdu-storage-requirements", {}
+ ):
+ for item in vdu_storage_requirements:
+ if (
+ item.get("key") == "multiattach"
+ and item.get("value").lower() == "true"
+ ):
+ name = f"shared-{virtual_storage_desc['id']}-{vnfd_id}"
+ return name, True
+ return virtual_storage_desc["id"], False
+
@staticmethod
def _sort_vdu_interfaces(target_vdu: dict) -> None:
"""Sort the interfaces according to position number.
"size": root_disk["size-of-storage"],
"keep": Ns.is_volume_keeping_required(root_disk),
}
-
disk_list.append(persistent_root_disk[vsd["id"]])
break
persistent_root_disk: dict,
persistent_ordinary_disk: dict,
disk_list: list,
+ extra_dict: dict,
+ vnf_id: str = None,
+ nsr_id: str = None,
) -> None:
"""Fill the disk list by adding persistent ordinary disks.
== "persistent-storage:persistent-storage"
and disk["id"] not in persistent_root_disk.keys()
):
+ name, multiattach = Ns.is_shared_volume(disk, vnf_id)
persistent_ordinary_disk[disk["id"]] = {
+ "name": name,
"size": disk["size-of-storage"],
"keep": Ns.is_volume_keeping_required(disk),
+ "multiattach": multiattach,
}
disk_list.append(persistent_ordinary_disk[disk["id"]])
+ if multiattach: # VDU creation has to wait for shared volumes
+ extra_dict["depends_on"].append(
+ f"nsrs:{nsr_id}:shared-volumes.{name}"
+ )
@staticmethod
def _prepare_vdu_affinity_group_list(
vnf_preffix = "vnfrs:{}".format(vnfr_id)
ns_preffix = "nsrs:{}".format(nsr_id)
image_text = ns_preffix + ":image." + target_vdu["ns-image-id"]
- extra_dict = {"depends_on": [image_text]}
+ flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
+ extra_dict = {"depends_on": [image_text, flavor_text]}
net_list = []
-
persistent_root_disk = {}
persistent_ordinary_disk = {}
vdu_instantiation_volumes_list = []
- vdu_instantiation_flavor_id = None
disk_list = []
vnfd_id = vnfr["vnfd-id"]
vnfd = db.get_one("vnfds", {"_id": vnfd_id})
-
# If the position info is provided for all the interfaces, it will be sorted
# according to position number ascendingly.
if all(
vdu_instantiation_volumes_list = (
target_vdu.get("additionalParams").get("OSM", {}).get("vdu_volumes")
)
- vdu_instantiation_flavor_id = (
- target_vdu.get("additionalParams").get("OSM", {}).get("vim_flavor_id")
- )
-
- # flavor id
- if vdu_instantiation_flavor_id:
- flavor_id = vdu_instantiation_flavor_id
- else:
- flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
- flavor_id = "TASK-" + flavor_text
- extra_dict["depends_on"].append(flavor_text)
if vdu_instantiation_volumes_list:
# Find the root volumes and add to the disk_list
)
# Add the persistent non-root disks to disk_list
Ns._add_persistent_ordinary_disks_to_disk_list(
- target_vdu, persistent_root_disk, persistent_ordinary_disk, disk_list
+ target_vdu,
+ persistent_root_disk,
+ persistent_ordinary_disk,
+ disk_list,
+ extra_dict,
+ vnfd["id"],
+ nsr_id,
)
affinity_group_list = Ns._prepare_vdu_affinity_group_list(
extra_dict["params"] = {
"name": "{}-{}-{}-{}".format(
- indata["name"][:16],
- vnfr["member-vnf-index-ref"][:16],
- target_vdu["vdu-name"][:32],
+ indata["name"],
+ vnfr["member-vnf-index-ref"],
+ target_vdu["vdu-name"],
target_vdu.get("count-index") or 0,
),
"description": target_vdu["vdu-name"],
"start": True,
"image_id": "TASK-" + image_text,
- "flavor_id": flavor_id,
+ "flavor_id": "TASK-" + flavor_text,
"affinity_group_list": affinity_group_list,
"net_list": net_list,
"cloud_config": cloud_config or None,
"availability_zone_index": None, # TODO
"availability_zone_list": None, # TODO
}
+ return extra_dict
+ @staticmethod
+ def _process_shared_volumes_params(
+ target_shared_volume: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ extra_dict = {}
+ shared_volume_data = {
+ "size": target_shared_volume["size-of-storage"],
+ "name": target_shared_volume["id"],
+ "type": target_shared_volume["type-of-storage"],
+ "keep": Ns.is_volume_keeping_required(target_shared_volume),
+ }
+ extra_dict["params"] = shared_volume_data
return extra_dict
@staticmethod
extra_dict["params"] = {
"affinity_group_data": affinity_group_data,
}
-
return extra_dict
@staticmethod
vim_details = {}
vim_details_text = existing_vdu["vim_info"][target_id].get("vim_details", None)
+
if vim_details_text:
vim_details = yaml.safe_load(f"{vim_details_text}")
net_item["model"] = interface.get("type")
if interface.get("ip-address"):
- net_item["ip_address"] = interface["ip-address"]
+ dual_ip = interface.get("ip-address").split(";")
+ if len(dual_ip) == 2:
+ net_item["ip_address"] = dual_ip
+ else:
+ net_item["ip_address"] = interface["ip-address"]
if interface.get("mac-address"):
net_item["mac_address"] = interface["mac-address"]
extra_dict["params"] = {
"name": "{}-{}-{}-{}".format(
- db_nsr["name"][:16],
- vnfr["member-vnf-index-ref"][:16],
- existing_vdu["vdu-name"][:32],
+ db_nsr["name"],
+ vnfr["member-vnf-index-ref"],
+ existing_vdu["vdu-name"],
existing_vdu.get("count-index") or 0,
),
"description": existing_vdu["vdu-name"],
process_params = None
vdu2cloud_init = indata.get("cloud_init_content") or {}
ro_nsr_public_key = db_ro_nsr["public_key"]
-
# According to the type of item, the path, the target_list,
# the existing_list and the method to process params are set
db_path = self.db_path_map[item]
process_params = self.process_params_function_map[item]
- if item in ("net", "vdu"):
+
+ if item in ("sfp", "classification", "sf", "sfi"):
+ db_record = "nsrs:{}:{}".format(nsr_id, db_path)
+ target_vnffg = indata.get("vnffg", [])[0]
+ target_list = target_vnffg[item]
+ existing_list = db_nsr.get(item, [])
+ elif item in ("net", "vdu"):
# This case is specific for the NS VLD (not applied to VDU)
if vnfr is None:
db_record = "nsrs:{}:{}".format(nsr_id, db_path)
)
target_list = target_vnf.get(db_path, []) if target_vnf else []
existing_list = vnfr.get(db_path, [])
- elif item in ("image", "flavor", "affinity-or-anti-affinity-group"):
+ elif item in (
+ "image",
+ "flavor",
+ "affinity-or-anti-affinity-group",
+ "shared-volumes",
+ ):
db_record = "nsrs:{}:{}".format(nsr_id, db_path)
target_list = indata.get(item, [])
existing_list = db_nsr.get(item, [])
else:
raise NsException("Item not supported: {}", item)
-
# ensure all the target_list elements has an "id". If not assign the index as id
if target_list is None:
target_list = []
for target_index, tl in enumerate(target_list):
if tl and not tl.get("id"):
tl["id"] = str(target_index)
-
# step 1 items (networks,vdus,...) to be deleted/updated
for item_index, existing_item in enumerate(existing_list):
target_item = next(
(t for t in target_list if t["id"] == existing_item["id"]),
None,
)
-
for target_vim, existing_viminfo in existing_item.get(
"vim_info", {}
).items():
# step 2 items (networks,vdus,...) to be created
for target_item in target_list:
item_index = -1
-
for item_index, existing_item in enumerate(existing_list):
if existing_item["id"] == target_item["id"]:
break
}
)
self.logger.debug("calculate_diff_items kwargs={}".format(kwargs))
+ if (
+ process_params == Ns._process_sfi_params
+ or Ns._process_sf_params
+ or Ns._process_classification_params
+ or Ns._process_sfp_params
+ ):
+ kwargs.update({"nsr_id": nsr_id, "db": self.db})
+
+ self.logger.debug("calculate_diff_items kwargs={}".format(kwargs))
extra_dict = process_params(
target_item,
return diff_items, task_index
+ def _process_vnfgd_sfp(self, sfp):
+ processed_sfp = {}
+ # getting sfp name, sfs and classifications in sfp to store it in processed_sfp
+ processed_sfp["id"] = sfp["id"]
+ sfs_in_sfp = [
+ sf["id"] for sf in sfp.get("position-desc-id", [])[0].get("cp-profile-id")
+ ]
+ classifications_in_sfp = [
+ classi["id"]
+ for classi in sfp.get("position-desc-id", [])[0].get("match-attributes")
+ ]
+
+ # creating a list of sfp with sfs and classifications
+ processed_sfp["sfs"] = sfs_in_sfp
+ processed_sfp["classifications"] = classifications_in_sfp
+
+ return processed_sfp
+
+ def _process_vnfgd_sf(self, sf):
+ processed_sf = {}
+ # getting name of sf
+ processed_sf["id"] = sf["id"]
+ # getting sfis in sf
+ sfis_in_sf = sf.get("constituent-profile-elements")
+ sorted_sfis = sorted(sfis_in_sf, key=lambda i: i["order"])
+ # getting sfis names
+ processed_sf["sfis"] = [sfi["id"] for sfi in sorted_sfis]
+
+ return processed_sf
+
+ def _process_vnfgd_sfi(self, sfi, db_vnfrs):
+ processed_sfi = {}
+ # getting name of sfi
+ processed_sfi["id"] = sfi["id"]
+
+ # getting ports in sfi
+ ingress_port = sfi["ingress-constituent-cpd-id"]
+ egress_port = sfi["egress-constituent-cpd-id"]
+ sfi_vnf_member_index = sfi["constituent-base-element-id"]
+
+ processed_sfi["ingress_port"] = ingress_port
+ processed_sfi["egress_port"] = egress_port
+
+ all_vnfrs = db_vnfrs.values()
+
+ sfi_vnfr = [
+ element
+ for element in all_vnfrs
+ if element["member-vnf-index-ref"] == sfi_vnf_member_index
+ ]
+ processed_sfi["vnfr_id"] = sfi_vnfr[0]["id"]
+
+ sfi_vnfr_cp = sfi_vnfr[0]["connection-point"]
+
+ ingress_port_index = [
+ c for c, element in enumerate(sfi_vnfr_cp) if element["id"] == ingress_port
+ ]
+ ingress_port_index = ingress_port_index[0]
+
+ processed_sfi["vdur_id"] = sfi_vnfr_cp[ingress_port_index][
+ "connection-point-vdu-id"
+ ]
+ processed_sfi["ingress_port_index"] = ingress_port_index
+ processed_sfi["egress_port_index"] = ingress_port_index
+
+ if egress_port != ingress_port:
+ egress_port_index = [
+ c
+ for c, element in enumerate(sfi_vnfr_cp)
+ if element["id"] == egress_port
+ ]
+ processed_sfi["egress_port_index"] = egress_port_index
+
+ return processed_sfi
+
+ def _process_vnfgd_classification(self, classification, db_vnfrs):
+ processed_classification = {}
+
+ processed_classification = deepcopy(classification)
+ classi_vnf_member_index = processed_classification[
+ "constituent-base-element-id"
+ ]
+ logical_source_port = processed_classification["constituent-cpd-id"]
+
+ all_vnfrs = db_vnfrs.values()
+
+ classi_vnfr = [
+ element
+ for element in all_vnfrs
+ if element["member-vnf-index-ref"] == classi_vnf_member_index
+ ]
+ processed_classification["vnfr_id"] = classi_vnfr[0]["id"]
+
+ classi_vnfr_cp = classi_vnfr[0]["connection-point"]
+
+ ingress_port_index = [
+ c
+ for c, element in enumerate(classi_vnfr_cp)
+ if element["id"] == logical_source_port
+ ]
+ ingress_port_index = ingress_port_index[0]
+
+ processed_classification["ingress_port_index"] = ingress_port_index
+ processed_classification["vdur_id"] = classi_vnfr_cp[ingress_port_index][
+ "connection-point-vdu-id"
+ ]
+
+ return processed_classification
+
+ def _update_db_nsr_with_vnffg(self, processed_vnffg, vim_info, nsr_id):
+ """This method used to add viminfo dict to sfi, sf sfp and classification in indata and count info in db_nsr.
+
+ Args:
+ processed_vnffg (Dict[str, Any]): deployment info
+ vim_info (Dict): dictionary to store VIM resource information
+ nsr_id (str): NSR id
+
+ Returns: None
+ """
+
+ nsr_sfi = {}
+ nsr_sf = {}
+ nsr_sfp = {}
+ nsr_classification = {}
+ db_nsr_vnffg = deepcopy(processed_vnffg)
+
+ for count, sfi in enumerate(processed_vnffg["sfi"]):
+ sfi["vim_info"] = vim_info
+ sfi_count = "sfi.{}".format(count)
+ nsr_sfi[sfi_count] = db_nsr_vnffg["sfi"][count]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sfi)
+
+ for count, sf in enumerate(processed_vnffg["sf"]):
+ sf["vim_info"] = vim_info
+ sf_count = "sf.{}".format(count)
+ nsr_sf[sf_count] = db_nsr_vnffg["sf"][count]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sf)
+
+ for count, sfp in enumerate(processed_vnffg["sfp"]):
+ sfp["vim_info"] = vim_info
+ sfp_count = "sfp.{}".format(count)
+ nsr_sfp[sfp_count] = db_nsr_vnffg["sfp"][count]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sfp)
+
+ for count, classi in enumerate(processed_vnffg["classification"]):
+ classi["vim_info"] = vim_info
+ classification_count = "classification.{}".format(count)
+ nsr_classification[classification_count] = db_nsr_vnffg["classification"][
+ count
+ ]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_classification)
+
+ def process_vnffgd_descriptor(
+ self,
+ indata: dict,
+ nsr_id: str,
+ db_nsr: dict,
+ db_vnfrs: dict,
+ ) -> dict:
+ """This method used to process vnffgd parameters from descriptor.
+
+ Args:
+ indata (Dict[str, Any]): deployment info
+ nsr_id (str): NSR id
+ db_nsr: NSR record from DB
+ db_vnfrs: VNFRS record from DB
+
+ Returns:
+ Dict: Processed vnffg parameters.
+ """
+
+ processed_vnffg = {}
+ vnffgd = db_nsr.get("nsd", {}).get("vnffgd")
+ vnf_list = indata.get("vnf", [])
+ vim_text = ""
+
+ if vnf_list:
+ vim_text = "vim:" + vnf_list[0].get("vim-account-id", "")
+
+ vim_info = {}
+ vim_info[vim_text] = {}
+ processed_sfps = []
+ processed_classifications = []
+ processed_sfs = []
+ processed_sfis = []
+
+ # setting up intial empty entries for vnffg items in mongodb.
+ self.db.set_list(
+ "nsrs",
+ {"_id": nsr_id},
+ {
+ "sfi": [],
+ "sf": [],
+ "sfp": [],
+ "classification": [],
+ },
+ )
+
+ vnffg = vnffgd[0]
+ # getting sfps
+ sfps = vnffg.get("nfpd")
+ for sfp in sfps:
+ processed_sfp = self._process_vnfgd_sfp(sfp)
+ # appending the list of processed sfps
+ processed_sfps.append(processed_sfp)
+
+ # getting sfs in sfp
+ sfs = sfp.get("position-desc-id")[0].get("cp-profile-id")
+ for sf in sfs:
+ processed_sf = self._process_vnfgd_sf(sf)
+
+ # appending the list of processed sfs
+ processed_sfs.append(processed_sf)
+
+ # getting sfis in sf
+ sfis_in_sf = sf.get("constituent-profile-elements")
+ sorted_sfis = sorted(sfis_in_sf, key=lambda i: i["order"])
+
+ for sfi in sorted_sfis:
+ processed_sfi = self._process_vnfgd_sfi(sfi, db_vnfrs)
+
+ processed_sfis.append(processed_sfi)
+
+ classifications = sfp.get("position-desc-id")[0].get("match-attributes")
+ # getting classifications from sfp
+ for classification in classifications:
+ processed_classification = self._process_vnfgd_classification(
+ classification, db_vnfrs
+ )
+
+ processed_classifications.append(processed_classification)
+
+ processed_vnffg["sfi"] = processed_sfis
+ processed_vnffg["sf"] = processed_sfs
+ processed_vnffg["classification"] = processed_classifications
+ processed_vnffg["sfp"] = processed_sfps
+
+ # adding viminfo dict to sfi, sf sfp and classification
+ self._update_db_nsr_with_vnffg(processed_vnffg, vim_info, nsr_id)
+
+ # updating indata with vnffg porcessed parameters
+ indata["vnffg"].append(processed_vnffg)
+
def calculate_all_differences_to_deploy(
self,
indata,
# set list with diffs:
changes_list = []
+ # processing vnffg from descriptor parameter
+ vnffgd = db_nsr.get("nsd").get("vnffgd")
+ if vnffgd is not None:
+ indata["vnffg"] = []
+ vnf_list = indata["vnf"]
+ processed_vnffg = {}
+
+ # in case of ns-delete
+ if not vnf_list:
+ processed_vnffg["sfi"] = []
+ processed_vnffg["sf"] = []
+ processed_vnffg["classification"] = []
+ processed_vnffg["sfp"] = []
+
+ indata["vnffg"].append(processed_vnffg)
+
+ else:
+ self.process_vnffgd_descriptor(
+ indata=indata,
+ nsr_id=nsr_id,
+ db_nsr=db_nsr,
+ db_vnfrs=db_vnfrs,
+ )
+
+ # getting updated db_nsr having vnffg parameters
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+
+ self.logger.debug(
+ "After processing vnffd parameters indata={} nsr={}".format(
+ indata, db_nsr
+ )
+ )
+
+ for item in ["sfp", "classification", "sf", "sfi"]:
+ self.logger.debug("process NS={} {}".format(nsr_id, item))
+ diff_items, task_index = self.calculate_diff_items(
+ indata=indata,
+ db_nsr=db_nsr,
+ db_ro_nsr=db_ro_nsr,
+ db_nsr_update=db_nsr_update,
+ item=item,
+ tasks_by_target_record_id=tasks_by_target_record_id,
+ action_id=action_id,
+ nsr_id=nsr_id,
+ task_index=task_index,
+ vnfr_id=None,
+ )
+ changes_list += diff_items
+
# NS vld, image and flavor
- for item in ["net", "image", "flavor", "affinity-or-anti-affinity-group"]:
+ for item in [
+ "net",
+ "image",
+ "flavor",
+ "affinity-or-anti-affinity-group",
+ ]:
self.logger.debug("process NS={} {}".format(nsr_id, item))
diff_items, task_index = self.calculate_diff_items(
indata=indata,
# VNF vlds and vdus
for vnfr_id, vnfr in db_vnfrs.items():
# vnfr_id need to be set as global variable for among others nested method _process_vdu_params
- for item in ["net", "vdu"]:
+ for item in ["net", "vdu", "shared-volumes"]:
self.logger.debug("process VNF={} {}".format(vnfr_id, item))
diff_items, task_index = self.calculate_diff_items(
indata=indata,
# Check each VNF of the target
for target_vnf in target_list:
- # Find this VNF in the list from DB
- vnfr_id = target_vnf.get("vnfInstanceId", None)
- if vnfr_id:
- existing_vnf = db_vnfrs.get(vnfr_id)
- db_record = "vnfrs:{}:{}".format(vnfr_id, db_path)
- # vim_account_id = existing_vnf.get("vim-account-id", "")
+ # Find this VNF in the list from DB, raise exception if vnfInstanceId is not found
+ vnfr_id = target_vnf["vnfInstanceId"]
+ existing_vnf = db_vnfrs.get(vnfr_id, {})
+ db_record = "vnfrs:{}:{}".format(vnfr_id, db_path)
+ # vim_account_id = existing_vnf.get("vim-account-id", "")
+ target_vdus = target_vnf.get("additionalParams", {}).get("vdu", [])
# Check each VDU of this VNF
- for target_vdu in target_vnf["additionalParams"].get("vdu", None):
+ if not target_vdus:
+ # Create target_vdu_list from DB, if VDUs are not specified
+ target_vdus = []
+ for existing_vdu in existing_vnf.get("vdur"):
+ vdu_name = existing_vdu.get("vdu-name", None)
+ vdu_index = existing_vdu.get("count-index", 0)
+ vdu_to_be_healed = {"vdu-id": vdu_name, "count-index": vdu_index}
+ target_vdus.append(vdu_to_be_healed)
+ for target_vdu in target_vdus:
vdu_name = target_vdu.get("vdu-id", None)
# For multi instance VDU count-index is mandatory
# For single session VDU count-indes is 0
count_index = target_vdu.get("count-index", 0)
item_index = 0
- existing_instance = None
- for instance in existing_vnf.get("vdur", None):
- if (
- instance["vdu-name"] == vdu_name
- and instance["count-index"] == count_index
- ):
- existing_instance = instance
- break
- else:
- item_index += 1
+ existing_instance = {}
+ if existing_vnf:
+ for instance in existing_vnf.get("vdur", {}):
+ if (
+ instance["vdu-name"] == vdu_name
+ and instance["count-index"] == count_index
+ ):
+ existing_instance = instance
+ break
+ else:
+ item_index += 1
target_record_id = "{}.{}".format(db_record, existing_instance["id"])
return_data = {
"status": global_status,
- "details": ". ".join(details)
- if details
- else "progress {}/{}".format(done, total),
+ "details": (
+ ". ".join(details) if details else "progress {}/{}".format(done, total)
+ ),
"nsr_id": nsr_id,
"action_id": action_id,
"tasks": task_list,
extra_dict,
):
self._assign_vim(target_vim)
- target_record = "vnfrs:{}:vdur.{}".format(vnf_id, vdu_index)
+ target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+ vnf_id, vdu_index, target_vim
+ )
target_record_id = "vnfrs:{}:vdur.{}".format(vnf_id, vdu_id)
deployment_info = {
"action_id": action_id,
):
target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
self._assign_vim(target_vim)
- target_record = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu_index)
+ target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+ vnf["_id"], vdu_index, target_vim
+ )
target_record_id = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu["id"])
deployment_info = {
"action_id": action_id,
):
target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
self._assign_vim(target_vim)
- target_record = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu_index)
+ ns_preffix = "nsrs:{}".format(nsr_id)
+ flavor_text = ns_preffix + ":flavor." + vdu["ns-flavor-id"]
+ extra_dict["depends_on"] = [flavor_text]
+ extra_dict["params"].update({"flavor_id": "TASK-" + flavor_text})
+ target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+ vnf["_id"], vdu_index, target_vim
+ )
target_record_id = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu["id"])
deployment_info = {
"action_id": action_id,
)
return task
+ def verticalscale_flavor_task(
+ self, vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict
+ ):
+ target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
+ self._assign_vim(target_vim)
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ target_record = "nsrs:{}:flavor.{}.vim_info.{}".format(
+ nsr_id, len(db_nsr["flavor"]) - 1, target_vim
+ )
+ target_record_id = "nsrs:{}:flavor.{}".format(nsr_id, len(db_nsr["flavor"]) - 1)
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+ task = Ns._create_task(
+ deployment_info=deployment_info,
+ target_id=target_vim,
+ item="flavor",
+ action="CREATE",
+ target_record=target_record,
+ target_record_id=target_record_id,
+ extra_dict=extra_dict,
+ )
+ return task
+
def verticalscale(self, session, indata, version, nsr_id, *args, **kwargs):
task_index = 0
extra_dict = {}
+ flavor_extra_dict = {}
now = time()
action_id = indata.get("action_id", str(uuid4()))
step = ""
"vcpus": numVirtualCpu,
"disk": sizeOfStorage,
}
+ flavor_data = {
+ "ram": virtualMemory,
+ "vcpus": numVirtualCpu,
+ "disk": sizeOfStorage,
+ }
+ flavor_extra_dict["find_params"] = {"flavor_data": flavor_data}
+ flavor_extra_dict["params"] = {"flavor_data": flavor_dict}
db_new_tasks = []
step = "Creating Tasks for vertical scaling"
with self.write_lock:
extra_dict["params"] = {
"vim_vm_id": vdu["vim-id"],
"flavor_dict": flavor_dict,
+ "vdu-id-ref": vdu["vdu-id-ref"],
+ "count-index": vdu["count-index"],
+ "vnf_instance_id": vnf_instance_id,
}
+ task = self.verticalscale_flavor_task(
+ vdu,
+ db_vnfr,
+ vdu_index,
+ action_id,
+ nsr_id,
+ task_index,
+ flavor_extra_dict,
+ )
+ db_new_tasks.append(task)
+ task_index += 1
task = self.verticalscale_task(
vdu,
db_vnfr,