# limitations under the License.
##
+from copy import deepcopy
from http import HTTPStatus
+from itertools import product
import logging
from random import choice as random_choice
from threading import Lock
from time import time
from traceback import format_exc as traceback_format_exc
-from typing import Any, Dict, Tuple, Type
+from typing import Any, Dict, List, Optional, Tuple, Type
from uuid import uuid4
from cryptography.hazmat.backends import default_backend as crypto_default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from jinja2 import (
Environment,
+ select_autoescape,
StrictUndefined,
TemplateError,
TemplateNotFound,
from osm_common.msgbase import MsgException
from osm_ng_ro.ns_thread import deep_get, NsWorker, NsWorkerException
from osm_ng_ro.validation import deploy_schema, validate_input
+import yaml
__author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
min_common_version = "0.1.16"
if text_id:
return text_id
- except Exception:
- pass
+ except Exception as error:
+ logging.exception(f"{error} occured while getting process id")
# Return a random id
return "".join(random_choice("0123456789abcdef") for _ in range(12))
"image": Ns._process_image_params,
"flavor": Ns._process_flavor_params,
"vdu": Ns._process_vdu_params,
+ "classification": Ns._process_classification_params,
+ "sfi": Ns._process_sfi_params,
+ "sf": Ns._process_sf_params,
+ "sfp": Ns._process_sfp_params,
"affinity-or-anti-affinity-group": Ns._process_affinity_group_params,
+ "shared-volumes": Ns._process_shared_volumes_params,
}
self.db_path_map = {
"net": "vld",
"image": "image",
"flavor": "flavor",
"vdu": "vdur",
+ "classification": "classification",
+ "sfi": "sfi",
+ "sf": "sf",
+ "sfp": "sfp",
"affinity-or-anti-affinity-group": "affinity-or-anti-affinity-group",
+ "shared-volumes": "shared-volumes",
}
def init_db(self, target_version):
str: [description]
"""
try:
- env = Environment(undefined=StrictUndefined)
+ env = Environment(
+ undefined=StrictUndefined,
+ autoescape=select_autoescape(default_for_string=True, default=True),
+ )
template = env.from_string(cloud_init_content)
return template.render(params or {})
"vim_name": None,
"vim_status": None,
"vim_details": None,
+ "vim_message": None,
"refresh_at": None,
},
"modified_at": now,
Tuple[Dict[str, Any], bool]: [description]
"""
numa = {}
+ numa_list = []
epa_vcpu_set = False
if guest_epa_quota.get("numa-node-policy"):
numa_node_policy = guest_epa_quota.get("numa-node-policy")
if numa_node_policy.get("node"):
- numa_node = numa_node_policy["node"][0]
-
- if numa_node.get("num-cores"):
- numa["cores"] = numa_node["num-cores"]
- epa_vcpu_set = True
-
- paired_threads = numa_node.get("paired-threads", {})
- if paired_threads.get("num-paired-threads"):
- numa["paired-threads"] = int(
- numa_node["paired-threads"]["num-paired-threads"]
- )
- epa_vcpu_set = True
+ for numa_node in numa_node_policy["node"]:
+ vcpu_list = []
+ if numa_node.get("id"):
+ numa["id"] = int(numa_node["id"])
+
+ if numa_node.get("vcpu"):
+ for vcpu in numa_node.get("vcpu"):
+ vcpu_id = int(vcpu.get("id"))
+ vcpu_list.append(vcpu_id)
+ numa["vcpu"] = vcpu_list
+
+ if numa_node.get("num-cores"):
+ numa["cores"] = numa_node["num-cores"]
+ epa_vcpu_set = True
+
+ paired_threads = numa_node.get("paired-threads", {})
+ if paired_threads.get("num-paired-threads"):
+ numa["paired_threads"] = int(
+ numa_node["paired-threads"]["num-paired-threads"]
+ )
+ epa_vcpu_set = True
- if paired_threads.get("paired-thread-ids"):
- numa["paired-threads-id"] = []
+ if paired_threads.get("paired-thread-ids"):
+ numa["paired-threads-id"] = []
- for pair in paired_threads["paired-thread-ids"]:
- numa["paired-threads-id"].append(
- (
- str(pair["thread-a"]),
- str(pair["thread-b"]),
+ for pair in paired_threads["paired-thread-ids"]:
+ numa["paired-threads-id"].append(
+ (
+ str(pair["thread-a"]),
+ str(pair["thread-b"]),
+ )
)
- )
- if numa_node.get("num-threads"):
- numa["threads"] = int(numa_node["num-threads"])
- epa_vcpu_set = True
+ if numa_node.get("num-threads"):
+ numa["threads"] = int(numa_node["num-threads"])
+ epa_vcpu_set = True
+
+ if numa_node.get("memory-mb"):
+ numa["memory"] = max(int(int(numa_node["memory-mb"]) / 1024), 1)
- if numa_node.get("memory-mb"):
- numa["memory"] = max(int(int(numa_node["memory-mb"]) / 1024), 1)
+ numa_list.append(numa)
+ numa = {}
- return numa, epa_vcpu_set
+ return numa_list, epa_vcpu_set
@staticmethod
def _process_guest_epa_cpu_pinning_params(
guest_epa_quota.get("cpu-pinning-policy") == "DEDICATED"
and not epa_vcpu_set
):
+ # Pinning policy "REQUIRE" uses threads as host should support SMT architecture
+ # Pinning policy "ISOLATE" uses cores as host should not support SMT architecture
+ # Pinning policy "PREFER" uses threads in case host supports SMT architecture
numa[
- "cores"
- if guest_epa_quota.get("cpu-thread-pinning-policy") != "PREFER"
- else "threads"
+ (
+ "cores"
+ if guest_epa_quota.get("cpu-thread-pinning-policy") == "ISOLATE"
+ else "threads"
+ )
] = max(vcpu_count, 1)
local_epa_vcpu_set = True
"""
extended = {}
numa = {}
+ numa_list = []
if target_flavor.get("guest-epa"):
guest_epa = target_flavor["guest-epa"]
- numa, epa_vcpu_set = Ns._process_guest_epa_numa_params(
+ numa_list, epa_vcpu_set = Ns._process_guest_epa_numa_params(
guest_epa_quota=guest_epa
)
if guest_epa.get("mempage-size"):
extended["mempage-size"] = guest_epa.get("mempage-size")
+ if guest_epa.get("cpu-pinning-policy"):
+ extended["cpu-pinning-policy"] = guest_epa.get("cpu-pinning-policy")
+
+ if guest_epa.get("cpu-thread-pinning-policy"):
+ extended["cpu-thread-pinning-policy"] = guest_epa.get(
+ "cpu-thread-pinning-policy"
+ )
+
+ if guest_epa.get("numa-node-policy"):
+ if guest_epa.get("numa-node-policy").get("mem-policy"):
+ extended["mem-policy"] = guest_epa.get("numa-node-policy").get(
+ "mem-policy"
+ )
+
tmp_numa, epa_vcpu_set = Ns._process_guest_epa_cpu_pinning_params(
guest_epa_quota=guest_epa,
vcpu_count=int(target_flavor.get("vcpu-count", 1)),
epa_vcpu_set=epa_vcpu_set,
)
- numa.update(tmp_numa)
+ for numa in numa_list:
+ numa.update(tmp_numa)
extended.update(
Ns._process_guest_epa_quota_params(
)
if numa:
- extended["numas"] = [numa]
+ extended["numas"] = numa_list
return extended
Returns:
Dict[str, Any]: [description]
"""
+ db = kwargs.get("db")
+ target_vdur = {}
+
+ for vnf in indata.get("vnf", []):
+ for vdur in vnf.get("vdur", []):
+ if vdur.get("ns-flavor-id") == target_flavor.get("id"):
+ target_vdur = vdur
+
+ vim_flavor_id = (
+ target_vdur.get("additionalParams", {}).get("OSM", {}).get("vim_flavor_id")
+ )
+ if vim_flavor_id: # vim-flavor-id was passed so flavor won't be created
+ return {"find_params": {"vim_flavor_id": vim_flavor_id}}
+
flavor_data = {
"disk": int(target_flavor["storage-gb"]),
"ram": int(target_flavor["memory-mb"]),
"vcpus": int(target_flavor["vcpu-count"]),
}
- target_vdur = {}
- for vnf in indata.get("vnf", []):
- for vdur in vnf.get("vdur", []):
- if vdur.get("ns-flavor-id") == target_flavor["id"]:
- target_vdur = vdur
+ if db and isinstance(indata.get("vnf"), list):
+ vnfd_id = indata.get("vnf")[0].get("vnfd-id")
+ vnfd = db.get_one("vnfds", {"_id": vnfd_id})
+ # check if there is persistent root disk
+ for vdu in vnfd.get("vdu", ()):
+ if vdu["name"] == target_vdur.get("vdu-name"):
+ for vsd in vnfd.get("virtual-storage-desc", ()):
+ if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
+ root_disk = vsd
+ if root_disk.get("type-of-storage", "").endswith(
+ "persistent-storage"
+ ):
+ flavor_data["disk"] = 0
for storage in target_vdur.get("virtual-storages", []):
if (
flavor_data_name = flavor_data.copy()
flavor_data_name["name"] = target_flavor["name"]
extra_dict["params"] = {"flavor_data": flavor_data_name}
+ return extra_dict
+
+ @staticmethod
+ def _prefix_ip_address(ip_address):
+ if "/" not in ip_address:
+ ip_address += "/32"
+ return ip_address
+
+ @staticmethod
+ def _process_ip_proto(ip_proto):
+ if ip_proto:
+ if ip_proto == 1:
+ ip_proto = "icmp"
+ elif ip_proto == 6:
+ ip_proto = "tcp"
+ elif ip_proto == 17:
+ ip_proto = "udp"
+ return ip_proto
+
+ @staticmethod
+ def _process_classification_params(
+ target_classification: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """[summary]
+
+ Args:
+ target_classification (Dict[str, Any]): Classification dictionary parameters that needs to be processed to create resource on VIM
+ indata (Dict[str, Any]): Deployment info
+ vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+ Returns:
+ Dict[str, Any]: Return parameters required to create classification and Items on which classification is dependent.
+ """
+ vnfr_id = target_classification["vnfr_id"]
+ vdur_id = target_classification["vdur_id"]
+ port_index = target_classification["ingress_port_index"]
+ extra_dict = {}
+
+ classification_data = {
+ "name": target_classification["id"],
+ "source_port_range_min": target_classification["source-port"],
+ "source_port_range_max": target_classification["source-port"],
+ "destination_port_range_min": target_classification["destination-port"],
+ "destination_port_range_max": target_classification["destination-port"],
+ }
+
+ classification_data["source_ip_prefix"] = Ns._prefix_ip_address(
+ target_classification["source-ip-address"]
+ )
+
+ classification_data["destination_ip_prefix"] = Ns._prefix_ip_address(
+ target_classification["destination-ip-address"]
+ )
+
+ classification_data["protocol"] = Ns._process_ip_proto(
+ int(target_classification["ip-proto"])
+ )
+
+ db = kwargs.get("db")
+ vdu_text = Ns._get_vnfr_vdur_text(db, vnfr_id, vdur_id)
+
+ extra_dict = {"depends_on": [vdu_text]}
+
+ extra_dict = {"depends_on": [vdu_text]}
+ classification_data["logical_source_port"] = "TASK-" + vdu_text
+ classification_data["logical_source_port_index"] = port_index
+
+ extra_dict["params"] = classification_data
+
+ return extra_dict
+
+ @staticmethod
+ def _process_sfi_params(
+ target_sfi: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """[summary]
+
+ Args:
+ target_sfi (Dict[str, Any]): SFI dictionary parameters that needs to be processed to create resource on VIM
+ indata (Dict[str, Any]): deployment info
+ vim_info (Dict[str, Any]): To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+ Returns:
+ Dict[str, Any]: Return parameters required to create SFI and Items on which SFI is dependent.
+ """
+
+ vnfr_id = target_sfi["vnfr_id"]
+ vdur_id = target_sfi["vdur_id"]
+
+ sfi_data = {
+ "name": target_sfi["id"],
+ "ingress_port_index": target_sfi["ingress_port_index"],
+ "egress_port_index": target_sfi["egress_port_index"],
+ }
+
+ db = kwargs.get("db")
+ vdu_text = Ns._get_vnfr_vdur_text(db, vnfr_id, vdur_id)
+
+ extra_dict = {"depends_on": [vdu_text]}
+ sfi_data["ingress_port"] = "TASK-" + vdu_text
+ sfi_data["egress_port"] = "TASK-" + vdu_text
+
+ extra_dict["params"] = sfi_data
return extra_dict
@staticmethod
- def _ip_profile_to_ro(
- ip_profile: Dict[str, Any],
+ def _get_vnfr_vdur_text(db, vnfr_id, vdur_id):
+ vnf_preffix = "vnfrs:{}".format(vnfr_id)
+ db_vnfr = db.get_one("vnfrs", {"_id": vnfr_id})
+ vdur_list = []
+ vdu_text = ""
+
+ if db_vnfr:
+ vdur_list = [
+ vdur["id"] for vdur in db_vnfr["vdur"] if vdur["vdu-id-ref"] == vdur_id
+ ]
+
+ if vdur_list:
+ vdu_text = vnf_preffix + ":vdur." + vdur_list[0]
+
+ return vdu_text
+
+ @staticmethod
+ def _process_sf_params(
+ target_sf: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
) -> Dict[str, Any]:
"""[summary]
Args:
- ip_profile (Dict[str, Any]): [description]
+ target_sf (Dict[str, Any]): SF dictionary parameters that needs to be processed to create resource on VIM
+ indata (Dict[str, Any]): Deployment info.
+ vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
Returns:
- Dict[str, Any]: [description]
+ Dict[str, Any]: Return parameters required to create SF and Items on which SF is dependent.
"""
- if not ip_profile:
- return None
-
- ro_ip_profile = {
- "ip_version": "IPv4"
- if "v4" in ip_profile.get("ip-version", "ipv4")
- else "IPv6",
- "subnet_address": ip_profile.get("subnet-address"),
- "gateway_address": ip_profile.get("gateway-address"),
- "dhcp_enabled": ip_profile.get("dhcp-params", {}).get("enabled", False),
- "dhcp_start_address": ip_profile.get("dhcp-params", {}).get(
- "start-address", None
- ),
- "dhcp_count": ip_profile.get("dhcp-params", {}).get("count", None),
+
+ nsr_id = kwargs.get("nsr_id", "")
+ sfis = target_sf["sfis"]
+ ns_preffix = "nsrs:{}".format(nsr_id)
+ extra_dict = {"depends_on": [], "params": []}
+ sf_data = {"name": target_sf["id"], "sfis": sfis}
+
+ for count, sfi in enumerate(sfis):
+ sfi_text = ns_preffix + ":sfi." + sfi
+ sfis[count] = "TASK-" + sfi_text
+ extra_dict["depends_on"].append(sfi_text)
+
+ extra_dict["params"] = sf_data
+
+ return extra_dict
+
+ @staticmethod
+ def _process_sfp_params(
+ target_sfp: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """[summary]
+
+ Args:
+ target_sfp (Dict[str, Any]): SFP dictionary parameters that needs to be processed to create resource on VIM.
+ indata (Dict[str, Any]): Deployment info
+ vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+ Returns:
+ Dict[str, Any]: Return parameters required to create SFP and Items on which SFP is dependent.
+ """
+
+ nsr_id = kwargs.get("nsr_id")
+ sfs = target_sfp["sfs"]
+ classifications = target_sfp["classifications"]
+ ns_preffix = "nsrs:{}".format(nsr_id)
+ extra_dict = {"depends_on": [], "params": []}
+ sfp_data = {
+ "name": target_sfp["id"],
+ "sfs": sfs,
+ "classifications": classifications,
}
- if ip_profile.get("dns-server"):
- ro_ip_profile["dns_address"] = ";".join(
- [v["address"] for v in ip_profile["dns-server"] if v.get("address")]
- )
+ for count, sf in enumerate(sfs):
+ sf_text = ns_preffix + ":sf." + sf
+ sfs[count] = "TASK-" + sf_text
+ extra_dict["depends_on"].append(sf_text)
- if ip_profile.get("security-group"):
- ro_ip_profile["security_group"] = ip_profile["security-group"]
+ for count, classi in enumerate(classifications):
+ classi_text = ns_preffix + ":classification." + classi
+ classifications[count] = "TASK-" + classi_text
+ extra_dict["depends_on"].append(classi_text)
- return ro_ip_profile
+ extra_dict["params"] = sfp_data
+
+ return extra_dict
@staticmethod
def _process_net_params(
"id": vim_info.get("vim_network_id"),
},
}
- elif target_vld.get("mgmt-network"):
+ elif target_vld.get("mgmt-network") and not vim_info.get("provider_network"):
extra_dict["find_params"] = {
"mgmt": True,
"name": target_vld["id"],
"net_name": (
f"{indata.get('name')[:16]}-{target_vld.get('name', target_vld.get('id'))[:16]}"
),
- "ip_profile": Ns._ip_profile_to_ro(vim_info.get("ip_profile")),
+ "ip_profile": vim_info.get("ip_profile"),
"provider_network_profile": vim_info.get("provider_network"),
}
return extra_dict
@staticmethod
- def _process_vdu_params(
- target_vdu: Dict[str, Any],
- indata: Dict[str, Any],
- vim_info: Dict[str, Any],
- target_record_id: str,
- **kwargs: Dict[str, Any],
- ) -> Dict[str, Any]:
- """Function to process VDU parameters.
+ def find_persistent_root_volumes(
+ vnfd: dict,
+ target_vdu: dict,
+ vdu_instantiation_volumes_list: list,
+ disk_list: list,
+ ) -> Dict[str, any]:
+ """Find the persistent root volumes and add them to the disk_list
+ by parsing the instantiation parameters.
Args:
- target_vdu (Dict[str, Any]): [description]
- indata (Dict[str, Any]): [description]
- vim_info (Dict[str, Any]): [description]
- target_record_id (str): [description]
+ vnfd (dict): VNF descriptor
+ target_vdu (dict): processed VDU
+ vdu_instantiation_volumes_list (list): instantiation parameters for the each VDU as a list
+ disk_list (list): to be filled up
Returns:
- Dict[str, Any]: [description]
+ persistent_root_disk (dict): Details of persistent root disk
+
"""
- vnfr_id = kwargs.get("vnfr_id")
- nsr_id = kwargs.get("nsr_id")
- vnfr = kwargs.get("vnfr")
- vdu2cloud_init = kwargs.get("vdu2cloud_init")
- tasks_by_target_record_id = kwargs.get("tasks_by_target_record_id")
- logger = kwargs.get("logger")
- db = kwargs.get("db")
- fs = kwargs.get("fs")
- ro_nsr_public_key = kwargs.get("ro_nsr_public_key")
+ persistent_root_disk = {}
+ # There can be only one root disk, when we find it, it will return the result
- vnf_preffix = "vnfrs:{}".format(vnfr_id)
- ns_preffix = "nsrs:{}".format(nsr_id)
- image_text = ns_preffix + ":image." + target_vdu["ns-image-id"]
- flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
- extra_dict = {"depends_on": [image_text, flavor_text]}
- net_list = []
+ for vdu, vsd in product(
+ vnfd.get("vdu", ()), vnfd.get("virtual-storage-desc", ())
+ ):
+ if (
+ vdu["name"] == target_vdu["vdu-name"]
+ and vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]
+ ):
+ root_disk = vsd
+ if root_disk.get("type-of-storage", "").endswith("persistent-storage"):
+ for vdu_volume in vdu_instantiation_volumes_list:
+ if (
+ vdu_volume["vim-volume-id"]
+ and root_disk["id"] == vdu_volume["name"]
+ ):
+ persistent_root_disk[vsd["id"]] = {
+ "vim_volume_id": vdu_volume["vim-volume-id"],
+ "image_id": vdu.get("sw-image-desc"),
+ }
- for iface_index, interface in enumerate(target_vdu["interfaces"]):
- if interface.get("ns-vld-id"):
- net_text = ns_preffix + ":vld." + interface["ns-vld-id"]
- elif interface.get("vnf-vld-id"):
- net_text = vnf_preffix + ":vld." + interface["vnf-vld-id"]
- else:
- logger.error(
- "Interface {} from vdu {} not connected to any vld".format(
- iface_index, target_vdu["vdu-name"]
- )
- )
+ disk_list.append(persistent_root_disk[vsd["id"]])
- continue # interface not connected to any vld
+ return persistent_root_disk
- extra_dict["depends_on"].append(net_text)
+ else:
+ if root_disk.get("size-of-storage"):
+ persistent_root_disk[vsd["id"]] = {
+ "image_id": vdu.get("sw-image-desc"),
+ "size": root_disk.get("size-of-storage"),
+ "keep": Ns.is_volume_keeping_required(root_disk),
+ }
- if "port-security-enabled" in interface:
- interface["port_security"] = interface.pop("port-security-enabled")
+ disk_list.append(persistent_root_disk[vsd["id"]])
- if "port-security-disable-strategy" in interface:
- interface["port_security_disable_strategy"] = interface.pop(
- "port-security-disable-strategy"
- )
+ return persistent_root_disk
+ return persistent_root_disk
- net_item = {
- x: v
- for x, v in interface.items()
- if x
- in (
- "name",
- "vpci",
- "port_security",
- "port_security_disable_strategy",
- "floating_ip",
- )
- }
- net_item["net_id"] = "TASK-" + net_text
- net_item["type"] = "virtual"
+ @staticmethod
+ def find_persistent_volumes(
+ persistent_root_disk: dict,
+ target_vdu: dict,
+ vdu_instantiation_volumes_list: list,
+ disk_list: list,
+ ) -> None:
+ """Find the ordinary persistent volumes and add them to the disk_list
+ by parsing the instantiation parameters.
- # TODO mac_address: used for SR-IOV ifaces #TODO for other types
- # TODO floating_ip: True/False (or it can be None)
- if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
- # mark the net create task as type data
- if deep_get(
- tasks_by_target_record_id,
- net_text,
- "extra_dict",
- "params",
- "net_type",
- ):
- tasks_by_target_record_id[net_text]["extra_dict"]["params"][
- "net_type"
- ] = "data"
+ Args:
+ persistent_root_disk: persistent root disk dictionary
+ target_vdu: processed VDU
+ vdu_instantiation_volumes_list: instantiation parameters for the each VDU as a list
+ disk_list: to be filled up
- net_item["use"] = "data"
- net_item["model"] = interface["type"]
- net_item["type"] = interface["type"]
- elif (
- interface.get("type") == "OM-MGMT"
- or interface.get("mgmt-interface")
- or interface.get("mgmt-vnf")
+ """
+ # Find the ordinary volumes which are not added to the persistent_root_disk
+ persistent_disk = {}
+ for disk in target_vdu.get("virtual-storages", {}):
+ if (
+ disk.get("type-of-storage", "").endswith("persistent-storage")
+ and disk["id"] not in persistent_root_disk.keys()
):
- net_item["use"] = "mgmt"
- else:
- # if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
- net_item["use"] = "bridge"
- net_item["model"] = interface.get("type")
+ for vdu_volume in vdu_instantiation_volumes_list:
+ if vdu_volume["vim-volume-id"] and disk["id"] == vdu_volume["name"]:
+ persistent_disk[disk["id"]] = {
+ "vim_volume_id": vdu_volume["vim-volume-id"],
+ }
+ disk_list.append(persistent_disk[disk["id"]])
- if interface.get("ip-address"):
- net_item["ip_address"] = interface["ip-address"]
+ else:
+ if disk["id"] not in persistent_disk.keys():
+ persistent_disk[disk["id"]] = {
+ "size": disk.get("size-of-storage"),
+ "keep": Ns.is_volume_keeping_required(disk),
+ }
+ disk_list.append(persistent_disk[disk["id"]])
- if interface.get("mac-address"):
- net_item["mac_address"] = interface["mac-address"]
+ @staticmethod
+ def is_volume_keeping_required(virtual_storage_desc: Dict[str, Any]) -> bool:
+ """Function to decide keeping persistent volume
+ upon VDU deletion.
- net_list.append(net_item)
+ Args:
+ virtual_storage_desc (Dict[str, Any]): virtual storage description dictionary
- if interface.get("mgmt-vnf"):
- extra_dict["mgmt_vnf_interface"] = iface_index
- elif interface.get("mgmt-interface"):
- extra_dict["mgmt_vdu_interface"] = iface_index
+ Returns:
+ bool (True/False)
+ """
- # cloud config
- cloud_config = {}
+ if not virtual_storage_desc.get("vdu-storage-requirements"):
+ return False
+ for item in virtual_storage_desc.get("vdu-storage-requirements", {}):
+ if item.get("key") == "keep-volume" and item.get("value").lower() == "true":
+ return True
+ return False
- if target_vdu.get("cloud-init"):
- if target_vdu["cloud-init"] not in vdu2cloud_init:
- vdu2cloud_init[target_vdu["cloud-init"]] = Ns._get_cloud_init(
- db=db,
- fs=fs,
- location=target_vdu["cloud-init"],
- )
+ @staticmethod
+ def is_shared_volume(
+ virtual_storage_desc: Dict[str, Any], vnfd_id: str
+ ) -> (str, bool):
+ """Function to decide if the volume type is multi attached or not .
- cloud_content_ = vdu2cloud_init[target_vdu["cloud-init"]]
- cloud_config["user-data"] = Ns._parse_jinja2(
- cloud_init_content=cloud_content_,
- params=target_vdu.get("additionalParams"),
- context=target_vdu["cloud-init"],
- )
+ Args:
+ virtual_storage_desc (Dict[str, Any]): virtual storage description dictionary
+ vnfd_id (str): vnfd id
- if target_vdu.get("boot-data-drive"):
- cloud_config["boot-data-drive"] = target_vdu.get("boot-data-drive")
+ Returns:
+ bool (True/False)
+ name (str) New name if it is a multiattach disk
+ """
- ssh_keys = []
+ if vdu_storage_requirements := virtual_storage_desc.get(
+ "vdu-storage-requirements", {}
+ ):
+ for item in vdu_storage_requirements:
+ if (
+ item.get("key") == "multiattach"
+ and item.get("value").lower() == "true"
+ ):
+ name = f"shared-{virtual_storage_desc['id']}-{vnfd_id}"
+ return name, True
+ return virtual_storage_desc["id"], False
- if target_vdu.get("ssh-keys"):
- ssh_keys += target_vdu.get("ssh-keys")
+ @staticmethod
+ def _sort_vdu_interfaces(target_vdu: dict) -> None:
+ """Sort the interfaces according to position number.
+
+ Args:
+ target_vdu (dict): Details of VDU to be created
+
+ """
+ # If the position info is provided for all the interfaces, it will be sorted
+ # according to position number ascendingly.
+ sorted_interfaces = sorted(
+ target_vdu["interfaces"],
+ key=lambda x: (x.get("position") is None, x.get("position")),
+ )
+ target_vdu["interfaces"] = sorted_interfaces
+
+ @staticmethod
+ def _partially_locate_vdu_interfaces(target_vdu: dict) -> None:
+ """Only place the interfaces which has specific position.
+
+ Args:
+ target_vdu (dict): Details of VDU to be created
+
+ """
+ # If the position info is provided for some interfaces but not all of them, the interfaces
+ # which has specific position numbers will be placed and others' positions will not be taken care.
+ if any(
+ i.get("position") + 1
+ for i in target_vdu["interfaces"]
+ if i.get("position") is not None
+ ):
+ n = len(target_vdu["interfaces"])
+ sorted_interfaces = [-1] * n
+ k, m = 0, 0
+
+ while k < n:
+ if target_vdu["interfaces"][k].get("position") is not None:
+ if any(i.get("position") == 0 for i in target_vdu["interfaces"]):
+ idx = target_vdu["interfaces"][k]["position"] + 1
+ else:
+ idx = target_vdu["interfaces"][k]["position"]
+ sorted_interfaces[idx - 1] = target_vdu["interfaces"][k]
+ k += 1
+
+ while m < n:
+ if target_vdu["interfaces"][m].get("position") is None:
+ idy = sorted_interfaces.index(-1)
+ sorted_interfaces[idy] = target_vdu["interfaces"][m]
+ m += 1
+
+ target_vdu["interfaces"] = sorted_interfaces
+
+ @staticmethod
+ def _prepare_vdu_cloud_init(
+ target_vdu: dict, vdu2cloud_init: dict, db: object, fs: object
+ ) -> Dict:
+ """Fill cloud_config dict with cloud init details.
+
+ Args:
+ target_vdu (dict): Details of VDU to be created
+ vdu2cloud_init (dict): Cloud init dict
+ db (object): DB object
+ fs (object): FS object
+
+ Returns:
+ cloud_config (dict): Cloud config details of VDU
+
+ """
+ # cloud config
+ cloud_config = {}
+
+ if target_vdu.get("cloud-init"):
+ if target_vdu["cloud-init"] not in vdu2cloud_init:
+ vdu2cloud_init[target_vdu["cloud-init"]] = Ns._get_cloud_init(
+ db=db,
+ fs=fs,
+ location=target_vdu["cloud-init"],
+ )
+
+ cloud_content_ = vdu2cloud_init[target_vdu["cloud-init"]]
+ cloud_config["user-data"] = Ns._parse_jinja2(
+ cloud_init_content=cloud_content_,
+ params=target_vdu.get("additionalParams"),
+ context=target_vdu["cloud-init"],
+ )
+
+ if target_vdu.get("boot-data-drive"):
+ cloud_config["boot-data-drive"] = target_vdu.get("boot-data-drive")
+
+ return cloud_config
+
+ @staticmethod
+ def _check_vld_information_of_interfaces(
+ interface: dict, ns_preffix: str, vnf_preffix: str
+ ) -> Optional[str]:
+ """Prepare the net_text by the virtual link information for vnf and ns level.
+ Args:
+ interface (dict): Interface details
+ ns_preffix (str): Prefix of NS
+ vnf_preffix (str): Prefix of VNF
+
+ Returns:
+ net_text (str): information of net
+
+ """
+ net_text = ""
+ if interface.get("ns-vld-id"):
+ net_text = ns_preffix + ":vld." + interface["ns-vld-id"]
+ elif interface.get("vnf-vld-id"):
+ net_text = vnf_preffix + ":vld." + interface["vnf-vld-id"]
+
+ return net_text
+
+ @staticmethod
+ def _prepare_interface_port_security(interface: dict) -> None:
+ """
+
+ Args:
+ interface (dict): Interface details
+
+ """
+ if "port-security-enabled" in interface:
+ interface["port_security"] = interface.pop("port-security-enabled")
+
+ if "port-security-disable-strategy" in interface:
+ interface["port_security_disable_strategy"] = interface.pop(
+ "port-security-disable-strategy"
+ )
+
+ @staticmethod
+ def _create_net_item_of_interface(interface: dict, net_text: str) -> dict:
+ """Prepare net item including name, port security, floating ip etc.
+
+ Args:
+ interface (dict): Interface details
+ net_text (str): information of net
+
+ Returns:
+ net_item (dict): Dict including net details
+
+ """
+
+ net_item = {
+ x: v
+ for x, v in interface.items()
+ if x
+ in (
+ "name",
+ "vpci",
+ "port_security",
+ "port_security_disable_strategy",
+ "floating_ip",
+ )
+ }
+ net_item["net_id"] = "TASK-" + net_text
+ net_item["type"] = "virtual"
+
+ return net_item
+
+ @staticmethod
+ def _prepare_type_of_interface(
+ interface: dict, tasks_by_target_record_id: dict, net_text: str, net_item: dict
+ ) -> None:
+ """Fill the net item type by interface type such as SR-IOV, OM-MGMT, bridge etc.
+
+ Args:
+ interface (dict): Interface details
+ tasks_by_target_record_id (dict): Task details
+ net_text (str): information of net
+ net_item (dict): Dict including net details
+
+ """
+ # TODO mac_address: used for SR-IOV ifaces #TODO for other types
+ # TODO floating_ip: True/False (or it can be None)
+
+ if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
+ # Mark the net create task as type data
+ if deep_get(
+ tasks_by_target_record_id,
+ net_text,
+ "extra_dict",
+ "params",
+ "net_type",
+ ):
+ tasks_by_target_record_id[net_text]["extra_dict"]["params"][
+ "net_type"
+ ] = "data"
+
+ net_item["use"] = "data"
+ net_item["model"] = interface["type"]
+ net_item["type"] = interface["type"]
+
+ elif (
+ interface.get("type") == "OM-MGMT"
+ or interface.get("mgmt-interface")
+ or interface.get("mgmt-vnf")
+ ):
+ net_item["use"] = "mgmt"
+
+ else:
+ # If interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
+ net_item["use"] = "bridge"
+ net_item["model"] = interface.get("type")
+
+ @staticmethod
+ def _prepare_vdu_interfaces(
+ target_vdu: dict,
+ extra_dict: dict,
+ ns_preffix: str,
+ vnf_preffix: str,
+ logger: object,
+ tasks_by_target_record_id: dict,
+ net_list: list,
+ ) -> None:
+ """Prepare the net_item and add net_list, add mgmt interface to extra_dict.
+
+ Args:
+ target_vdu (dict): VDU to be created
+ extra_dict (dict): Dictionary to be filled
+ ns_preffix (str): NS prefix as string
+ vnf_preffix (str): VNF prefix as string
+ logger (object): Logger Object
+ tasks_by_target_record_id (dict): Task details
+ net_list (list): Net list of VDU
+ """
+ for iface_index, interface in enumerate(target_vdu["interfaces"]):
+ net_text = Ns._check_vld_information_of_interfaces(
+ interface, ns_preffix, vnf_preffix
+ )
+ if not net_text:
+ # Interface not connected to any vld
+ logger.error(
+ "Interface {} from vdu {} not connected to any vld".format(
+ iface_index, target_vdu["vdu-name"]
+ )
+ )
+ continue
+
+ extra_dict["depends_on"].append(net_text)
+
+ Ns._prepare_interface_port_security(interface)
+
+ net_item = Ns._create_net_item_of_interface(interface, net_text)
+
+ Ns._prepare_type_of_interface(
+ interface, tasks_by_target_record_id, net_text, net_item
+ )
+
+ if interface.get("ip-address"):
+ net_item["ip_address"] = interface["ip-address"]
+
+ if interface.get("mac-address"):
+ net_item["mac_address"] = interface["mac-address"]
+
+ net_list.append(net_item)
+
+ if interface.get("mgmt-vnf"):
+ extra_dict["mgmt_vnf_interface"] = iface_index
+ elif interface.get("mgmt-interface"):
+ extra_dict["mgmt_vdu_interface"] = iface_index
+
+ @staticmethod
+ def _prepare_vdu_ssh_keys(
+ target_vdu: dict, ro_nsr_public_key: dict, cloud_config: dict
+ ) -> None:
+ """Add ssh keys to cloud config.
+
+ Args:
+ target_vdu (dict): Details of VDU to be created
+ ro_nsr_public_key (dict): RO NSR public Key
+ cloud_config (dict): Cloud config details
+
+ """
+ ssh_keys = []
+
+ if target_vdu.get("ssh-keys"):
+ ssh_keys += target_vdu.get("ssh-keys")
if target_vdu.get("ssh-access-required"):
ssh_keys.append(ro_nsr_public_key)
if ssh_keys:
cloud_config["key-pairs"] = ssh_keys
- persistent_root_disk = {}
- disk_list = []
- vnfd_id = vnfr["vnfd-id"]
- vnfd = db.get_one("vnfds", {"_id": vnfd_id})
+ @staticmethod
+ def _select_persistent_root_disk(vsd: dict, vdu: dict) -> dict:
+ """Selects the persistent root disk if exists.
+ Args:
+ vsd (dict): Virtual storage descriptors in VNFD
+ vdu (dict): VNF descriptor
+
+ Returns:
+ root_disk (dict): Selected persistent root disk
+ """
+ if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
+ root_disk = vsd
+ if root_disk.get("type-of-storage", "").endswith(
+ "persistent-storage"
+ ) and root_disk.get("size-of-storage"):
+ return root_disk
+
+ @staticmethod
+ def _add_persistent_root_disk_to_disk_list(
+ vnfd: dict, target_vdu: dict, persistent_root_disk: dict, disk_list: list
+ ) -> None:
+ """Find the persistent root disk and add to disk list.
+
+ Args:
+ vnfd (dict): VNF descriptor
+ target_vdu (dict): Details of VDU to be created
+ persistent_root_disk (dict): Details of persistent root disk
+ disk_list (list): Disks of VDU
+
+ """
for vdu in vnfd.get("vdu", ()):
if vdu["name"] == target_vdu["vdu-name"]:
for vsd in vnfd.get("virtual-storage-desc", ()):
- if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
- root_disk = vsd
- if root_disk.get(
- "type-of-storage"
- ) == "persistent-storage:persistent-storage" and root_disk.get(
- "size-of-storage"
- ):
- persistent_root_disk[vsd["id"]] = {
- "image_id": vdu.get("sw-image-desc"),
- "size": root_disk["size-of-storage"],
- }
- disk_list.append(persistent_root_disk[vsd["id"]])
+ root_disk = Ns._select_persistent_root_disk(vsd, vdu)
+ if not root_disk:
+ continue
+
+ persistent_root_disk[vsd["id"]] = {
+ "image_id": vdu.get("sw-image-desc"),
+ "size": root_disk["size-of-storage"],
+ "keep": Ns.is_volume_keeping_required(root_disk),
+ }
+ disk_list.append(persistent_root_disk[vsd["id"]])
+ break
+
+ @staticmethod
+ def _add_persistent_ordinary_disks_to_disk_list(
+ target_vdu: dict,
+ persistent_root_disk: dict,
+ persistent_ordinary_disk: dict,
+ disk_list: list,
+ extra_dict: dict,
+ vnf_id: str = None,
+ nsr_id: str = None,
+ ) -> None:
+ """Fill the disk list by adding persistent ordinary disks.
+
+ Args:
+ target_vdu (dict): Details of VDU to be created
+ persistent_root_disk (dict): Details of persistent root disk
+ persistent_ordinary_disk (dict): Details of persistent ordinary disk
+ disk_list (list): Disks of VDU
+ """
if target_vdu.get("virtual-storages"):
for disk in target_vdu["virtual-storages"]:
if (
- disk.get("type-of-storage")
- == "persistent-storage:persistent-storage"
+ disk.get("type-of-storage", "").endswith("persistent-storage")
and disk["id"] not in persistent_root_disk.keys()
):
- disk_list.append({"size": disk["size-of-storage"]})
+ name, multiattach = Ns.is_shared_volume(disk, vnf_id)
+ persistent_ordinary_disk[disk["id"]] = {
+ "name": name,
+ "size": disk["size-of-storage"],
+ "keep": Ns.is_volume_keeping_required(disk),
+ "multiattach": multiattach,
+ }
+ disk_list.append(persistent_ordinary_disk[disk["id"]])
+ if multiattach: # VDU creation has to wait for shared volumes
+ extra_dict["depends_on"].append(
+ f"nsrs:{nsr_id}:shared-volumes.{name}"
+ )
+
+ @staticmethod
+ def _prepare_vdu_affinity_group_list(
+ target_vdu: dict, extra_dict: dict, ns_preffix: str
+ ) -> List[Dict[str, any]]:
+ """Process affinity group details to prepare affinity group list.
+
+ Args:
+ target_vdu (dict): Details of VDU to be created
+ extra_dict (dict): Dictionary to be filled
+ ns_preffix (str): Prefix as string
+ Returns:
+
+ affinity_group_list (list): Affinity group details
+
+ """
affinity_group_list = []
if target_vdu.get("affinity-or-anti-affinity-group-id"):
- affinity_group = {}
for affinity_group_id in target_vdu["affinity-or-anti-affinity-group-id"]:
+ affinity_group = {}
affinity_group_text = (
ns_preffix + ":affinity-or-anti-affinity-group." + affinity_group_id
)
+ if not isinstance(extra_dict.get("depends_on"), list):
+ raise NsException("Invalid extra_dict format.")
+
extra_dict["depends_on"].append(affinity_group_text)
affinity_group["affinity_group_id"] = "TASK-" + affinity_group_text
affinity_group_list.append(affinity_group)
+ return affinity_group_list
+
+ @staticmethod
+ def _process_vdu_params(
+ target_vdu: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """Function to process VDU parameters.
+
+ Args:
+ target_vdu (Dict[str, Any]): [description]
+ indata (Dict[str, Any]): [description]
+ vim_info (Dict[str, Any]): [description]
+ target_record_id (str): [description]
+
+ Returns:
+ Dict[str, Any]: [description]
+ """
+ vnfr_id = kwargs.get("vnfr_id")
+ nsr_id = kwargs.get("nsr_id")
+ vnfr = kwargs.get("vnfr")
+ vdu2cloud_init = kwargs.get("vdu2cloud_init")
+ tasks_by_target_record_id = kwargs.get("tasks_by_target_record_id")
+ logger = kwargs.get("logger")
+ db = kwargs.get("db")
+ fs = kwargs.get("fs")
+ ro_nsr_public_key = kwargs.get("ro_nsr_public_key")
+
+ vnf_preffix = "vnfrs:{}".format(vnfr_id)
+ ns_preffix = "nsrs:{}".format(nsr_id)
+ image_text = ns_preffix + ":image." + target_vdu["ns-image-id"]
+ flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
+ extra_dict = {"depends_on": [image_text, flavor_text]}
+ net_list = []
+ persistent_root_disk = {}
+ persistent_ordinary_disk = {}
+ vdu_instantiation_volumes_list = []
+ disk_list = []
+ vnfd_id = vnfr["vnfd-id"]
+ vnfd = db.get_one("vnfds", {"_id": vnfd_id})
+ # If the position info is provided for all the interfaces, it will be sorted
+ # according to position number ascendingly.
+ if all(
+ True if i.get("position") is not None else False
+ for i in target_vdu["interfaces"]
+ ):
+ Ns._sort_vdu_interfaces(target_vdu)
+
+ # If the position info is provided for some interfaces but not all of them, the interfaces
+ # which has specific position numbers will be placed and others' positions will not be taken care.
+ else:
+ Ns._partially_locate_vdu_interfaces(target_vdu)
+
+ # If the position info is not provided for the interfaces, interfaces will be attached
+ # according to the order in the VNFD.
+ Ns._prepare_vdu_interfaces(
+ target_vdu,
+ extra_dict,
+ ns_preffix,
+ vnf_preffix,
+ logger,
+ tasks_by_target_record_id,
+ net_list,
+ )
+
+ # cloud config
+ cloud_config = Ns._prepare_vdu_cloud_init(target_vdu, vdu2cloud_init, db, fs)
+
+ # Prepare VDU ssh keys
+ Ns._prepare_vdu_ssh_keys(target_vdu, ro_nsr_public_key, cloud_config)
+
+ if target_vdu.get("additionalParams"):
+ vdu_instantiation_volumes_list = (
+ target_vdu.get("additionalParams").get("OSM", {}).get("vdu_volumes")
+ )
+
+ if vdu_instantiation_volumes_list:
+ # Find the root volumes and add to the disk_list
+ persistent_root_disk = Ns.find_persistent_root_volumes(
+ vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
+ )
+
+ # Find the ordinary volumes which are not added to the persistent_root_disk
+ # and put them to the disk list
+ Ns.find_persistent_volumes(
+ persistent_root_disk,
+ target_vdu,
+ vdu_instantiation_volumes_list,
+ disk_list,
+ )
+
+ else:
+ # Vdu_instantiation_volumes_list is empty
+ # First get add the persistent root disks to disk_list
+ Ns._add_persistent_root_disk_to_disk_list(
+ vnfd, target_vdu, persistent_root_disk, disk_list
+ )
+ # Add the persistent non-root disks to disk_list
+ Ns._add_persistent_ordinary_disks_to_disk_list(
+ target_vdu,
+ persistent_root_disk,
+ persistent_ordinary_disk,
+ disk_list,
+ extra_dict,
+ vnfd["id"],
+ nsr_id,
+ )
+
+ affinity_group_list = Ns._prepare_vdu_affinity_group_list(
+ target_vdu, extra_dict, ns_preffix
+ )
+
+ instance_name = "{}-{}-{}-{}".format(
+ indata["name"],
+ vnfr["member-vnf-index-ref"],
+ target_vdu["vdu-name"],
+ target_vdu.get("count-index") or 0,
+ )
+ if additional_params := target_vdu.get("additionalParams"):
+ if additional_params.get("OSM", {}).get("instance_name"):
+ instance_name = additional_params.get("OSM", {}).get("instance_name")
+ if count_index := target_vdu.get("count-index"):
+ if count_index >= 1:
+ instance_name = "{}-{}".format(instance_name, count_index)
+
extra_dict["params"] = {
- "name": "{}-{}-{}-{}".format(
- indata["name"][:16],
- vnfr["member-vnf-index-ref"][:16],
- target_vdu["vdu-name"][:32],
- target_vdu.get("count-index") or 0,
- ),
+ "name": instance_name,
"description": target_vdu["vdu-name"],
"start": True,
"image_id": "TASK-" + image_text,
"availability_zone_index": None, # TODO
"availability_zone_list": None, # TODO
}
+ return extra_dict
+ @staticmethod
+ def _process_shared_volumes_params(
+ target_shared_volume: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ extra_dict = {}
+ shared_volume_data = {
+ "size": target_shared_volume["size-of-storage"],
+ "name": target_shared_volume["id"],
+ "type": target_shared_volume["type-of-storage"],
+ "keep": Ns.is_volume_keeping_required(target_shared_volume),
+ }
+ extra_dict["params"] = shared_volume_data
return extra_dict
@staticmethod
Returns:
Dict[str, Any]: [description]
"""
- extra_dict = {}
+ extra_dict = {}
affinity_group_data = {
"name": target_affinity_group["name"],
"type": target_affinity_group["type"],
extra_dict["params"] = {
"affinity_group_data": affinity_group_data,
}
+ return extra_dict
+
+ @staticmethod
+ def _process_recreate_vdu_params(
+ existing_vdu: Dict[str, Any],
+ db_nsr: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ target_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """Function to process VDU parameters to recreate.
+
+ Args:
+ existing_vdu (Dict[str, Any]): [description]
+ db_nsr (Dict[str, Any]): [description]
+ vim_info (Dict[str, Any]): [description]
+ target_record_id (str): [description]
+ target_id (str): [description]
+
+ Returns:
+ Dict[str, Any]: [description]
+ """
+ vnfr = kwargs.get("vnfr")
+ vdu2cloud_init = kwargs.get("vdu2cloud_init")
+ # logger = kwargs.get("logger")
+ db = kwargs.get("db")
+ fs = kwargs.get("fs")
+ ro_nsr_public_key = kwargs.get("ro_nsr_public_key")
+
+ extra_dict = {}
+ net_list = []
+
+ vim_details = {}
+ vim_details_text = existing_vdu["vim_info"][target_id].get("vim_details", None)
+
+ if vim_details_text:
+ vim_details = yaml.safe_load(f"{vim_details_text}")
+
+ for iface_index, interface in enumerate(existing_vdu["interfaces"]):
+ if "port-security-enabled" in interface:
+ interface["port_security"] = interface.pop("port-security-enabled")
+
+ if "port-security-disable-strategy" in interface:
+ interface["port_security_disable_strategy"] = interface.pop(
+ "port-security-disable-strategy"
+ )
+
+ net_item = {
+ x: v
+ for x, v in interface.items()
+ if x
+ in (
+ "name",
+ "vpci",
+ "port_security",
+ "port_security_disable_strategy",
+ "floating_ip",
+ )
+ }
+ existing_ifaces = existing_vdu["vim_info"][target_id].get(
+ "interfaces_backup", []
+ )
+ net_id = next(
+ (
+ i["vim_net_id"]
+ for i in existing_ifaces
+ if i["ip_address"] == interface["ip-address"]
+ ),
+ None,
+ )
+
+ net_item["net_id"] = net_id
+ net_item["type"] = "virtual"
+
+ # TODO mac_address: used for SR-IOV ifaces #TODO for other types
+ # TODO floating_ip: True/False (or it can be None)
+ if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
+ net_item["use"] = "data"
+ net_item["model"] = interface["type"]
+ net_item["type"] = interface["type"]
+ elif (
+ interface.get("type") == "OM-MGMT"
+ or interface.get("mgmt-interface")
+ or interface.get("mgmt-vnf")
+ ):
+ net_item["use"] = "mgmt"
+ else:
+ # if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
+ net_item["use"] = "bridge"
+ net_item["model"] = interface.get("type")
+
+ if interface.get("ip-address"):
+ dual_ip = interface.get("ip-address").split(";")
+ if len(dual_ip) == 2:
+ net_item["ip_address"] = dual_ip
+ else:
+ net_item["ip_address"] = interface["ip-address"]
+
+ if interface.get("mac-address"):
+ net_item["mac_address"] = interface["mac-address"]
+
+ net_list.append(net_item)
+
+ if interface.get("mgmt-vnf"):
+ extra_dict["mgmt_vnf_interface"] = iface_index
+ elif interface.get("mgmt-interface"):
+ extra_dict["mgmt_vdu_interface"] = iface_index
+
+ # cloud config
+ cloud_config = {}
+
+ if existing_vdu.get("cloud-init"):
+ if existing_vdu["cloud-init"] not in vdu2cloud_init:
+ vdu2cloud_init[existing_vdu["cloud-init"]] = Ns._get_cloud_init(
+ db=db,
+ fs=fs,
+ location=existing_vdu["cloud-init"],
+ )
+
+ cloud_content_ = vdu2cloud_init[existing_vdu["cloud-init"]]
+ cloud_config["user-data"] = Ns._parse_jinja2(
+ cloud_init_content=cloud_content_,
+ params=existing_vdu.get("additionalParams"),
+ context=existing_vdu["cloud-init"],
+ )
+
+ if existing_vdu.get("boot-data-drive"):
+ cloud_config["boot-data-drive"] = existing_vdu.get("boot-data-drive")
+
+ ssh_keys = []
+
+ if existing_vdu.get("ssh-keys"):
+ ssh_keys += existing_vdu.get("ssh-keys")
+
+ if existing_vdu.get("ssh-access-required"):
+ ssh_keys.append(ro_nsr_public_key)
+
+ if ssh_keys:
+ cloud_config["key-pairs"] = ssh_keys
+
+ disk_list = []
+ for vol_id in vim_details.get("os-extended-volumes:volumes_attached", []):
+ disk_list.append({"vim_id": vol_id["id"]})
+
+ affinity_group_list = []
+
+ if existing_vdu.get("affinity-or-anti-affinity-group-id"):
+ affinity_group = {}
+ for affinity_group_id in existing_vdu["affinity-or-anti-affinity-group-id"]:
+ for group in db_nsr.get("affinity-or-anti-affinity-group"):
+ if (
+ group["id"] == affinity_group_id
+ and group["vim_info"][target_id].get("vim_id", None) is not None
+ ):
+ affinity_group["affinity_group_id"] = group["vim_info"][
+ target_id
+ ].get("vim_id", None)
+ affinity_group_list.append(affinity_group)
+
+ instance_name = "{}-{}-{}-{}".format(
+ db_nsr["name"],
+ vnfr["member-vnf-index-ref"],
+ existing_vdu["vdu-name"],
+ existing_vdu.get("count-index") or 0,
+ )
+ if additional_params := existing_vdu.get("additionalParams"):
+ if additional_params.get("OSM", {}).get("instance_name"):
+ instance_name = additional_params.get("OSM", {}).get("instance_name")
+ if count_index := existing_vdu.get("count-index"):
+ if count_index >= 1:
+ instance_name = "{}-{}".format(instance_name, count_index)
+
+ extra_dict["params"] = {
+ "name": instance_name,
+ "description": existing_vdu["vdu-name"],
+ "start": True,
+ "image_id": vim_details["image"]["id"],
+ "flavor_id": vim_details["flavor"]["id"],
+ "affinity_group_list": affinity_group_list,
+ "net_list": net_list,
+ "cloud_config": cloud_config or None,
+ "disk_list": disk_list,
+ "availability_zone_index": None, # TODO
+ "availability_zone_list": None, # TODO
+ }
return extra_dict
related to a specific item `item` to be done. This function should be
called for NS instantiation, NS termination, NS update to add a new VNF
or a new VLD, remove a VNF or VLD, etc.
- Item can be `net, `flavor`, `image` or `vdu`.
+ Item can be `net`, `flavor`, `image` or `vdu`.
It takes a list of target items from indata (which came from the REST API)
and compares with the existing items from db_ro_nsr, identifying the
incremental changes to be done. During the comparison, it calls the method
process_params = None
vdu2cloud_init = indata.get("cloud_init_content") or {}
ro_nsr_public_key = db_ro_nsr["public_key"]
-
# According to the type of item, the path, the target_list,
# the existing_list and the method to process params are set
db_path = self.db_path_map[item]
process_params = self.process_params_function_map[item]
- if item in ("net", "vdu"):
+
+ if item in ("sfp", "classification", "sf", "sfi"):
+ db_record = "nsrs:{}:{}".format(nsr_id, db_path)
+ target_vnffg = indata.get("vnffg", [])[0]
+ target_list = target_vnffg[item]
+ existing_list = db_nsr.get(item, [])
+ elif item in ("net", "vdu"):
+ # This case is specific for the NS VLD (not applied to VDU)
if vnfr is None:
db_record = "nsrs:{}:{}".format(nsr_id, db_path)
target_list = indata.get("ns", []).get(db_path, [])
existing_list = db_nsr.get(db_path, [])
+ # This case is common for VNF VLDs and VNF VDUs
else:
db_record = "vnfrs:{}:{}".format(vnfr_id, db_path)
target_vnf = next(
)
target_list = target_vnf.get(db_path, []) if target_vnf else []
existing_list = vnfr.get(db_path, [])
- elif item in ("image", "flavor", "affinity-or-anti-affinity-group"):
+ elif item in (
+ "image",
+ "flavor",
+ "affinity-or-anti-affinity-group",
+ "shared-volumes",
+ ):
db_record = "nsrs:{}:{}".format(nsr_id, db_path)
target_list = indata.get(item, [])
existing_list = db_nsr.get(item, [])
else:
raise NsException("Item not supported: {}", item)
-
# ensure all the target_list elements has an "id". If not assign the index as id
if target_list is None:
target_list = []
for target_index, tl in enumerate(target_list):
if tl and not tl.get("id"):
tl["id"] = str(target_index)
-
# step 1 items (networks,vdus,...) to be deleted/updated
for item_index, existing_item in enumerate(existing_list):
target_item = next(
(t for t in target_list if t["id"] == existing_item["id"]),
None,
)
-
for target_vim, existing_viminfo in existing_item.get(
"vim_info", {}
).items():
target_record_id = "{}.{}".format(db_record, existing_item["id"])
item_ = item
- if target_vim.startswith("sdn"):
+ if target_vim.startswith("sdn") or target_vim.startswith("wim"):
# item must be sdn-net instead of net if target_vim is a sdn
item_ = "sdn_net"
target_record_id += ".sdn"
# step 2 items (networks,vdus,...) to be created
for target_item in target_list:
item_index = -1
-
for item_index, existing_item in enumerate(existing_list):
if existing_item["id"] == target_item["id"]:
break
target_record_id = "{}.{}".format(db_record, target_item["id"])
item_ = item
- if target_vim.startswith("sdn"):
+ if target_vim.startswith("sdn") or target_vim.startswith("wim"):
# item must be sdn-net instead of net if target_vim is a sdn
item_ = "sdn_net"
target_record_id += ".sdn"
kwargs = {}
+ self.logger.debug(
+ "ns.calculate_diff_items target_item={}".format(target_item)
+ )
+ if process_params == Ns._process_flavor_params:
+ kwargs.update(
+ {
+ "db": self.db,
+ }
+ )
+ self.logger.debug(
+ "calculate_diff_items for flavor kwargs={}".format(kwargs)
+ )
+
if process_params == Ns._process_vdu_params:
+ self.logger.debug("calculate_diff_items self.fs={}".format(self.fs))
kwargs.update(
{
"vnfr_id": vnfr_id,
"ro_nsr_public_key": ro_nsr_public_key,
}
)
+ self.logger.debug("calculate_diff_items kwargs={}".format(kwargs))
+ if (
+ process_params == Ns._process_sfi_params
+ or Ns._process_sf_params
+ or Ns._process_classification_params
+ or Ns._process_sfp_params
+ ):
+ kwargs.update({"nsr_id": nsr_id, "db": self.db})
+
+ self.logger.debug("calculate_diff_items kwargs={}".format(kwargs))
+
+ extra_dict = process_params(
+ target_item,
+ indata,
+ target_viminfo,
+ target_record_id,
+ **kwargs,
+ )
+ self._assign_vim(target_vim)
+
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+
+ new_item = {
+ "deployment_info": deployment_info,
+ "target_id": target_vim,
+ "item": item_,
+ "action": "CREATE",
+ "target_record": f"{db_record}.{item_index}.vim_info.{target_vim}",
+ "target_record_id": target_record_id,
+ "extra_dict": extra_dict,
+ "common_id": target_item.get("common_id", None),
+ }
+ diff_items.append(new_item)
+ tasks_by_target_record_id[target_record_id] = new_item
+ task_index += 1
+
+ db_nsr_update[db_path + ".{}".format(item_index)] = target_item
+
+ return diff_items, task_index
+
+ def _process_vnfgd_sfp(self, sfp):
+ processed_sfp = {}
+ # getting sfp name, sfs and classifications in sfp to store it in processed_sfp
+ processed_sfp["id"] = sfp["id"]
+ sfs_in_sfp = [
+ sf["id"] for sf in sfp.get("position-desc-id", [])[0].get("cp-profile-id")
+ ]
+ classifications_in_sfp = [
+ classi["id"]
+ for classi in sfp.get("position-desc-id", [])[0].get("match-attributes")
+ ]
+
+ # creating a list of sfp with sfs and classifications
+ processed_sfp["sfs"] = sfs_in_sfp
+ processed_sfp["classifications"] = classifications_in_sfp
+
+ return processed_sfp
+
+ def _process_vnfgd_sf(self, sf):
+ processed_sf = {}
+ # getting name of sf
+ processed_sf["id"] = sf["id"]
+ # getting sfis in sf
+ sfis_in_sf = sf.get("constituent-profile-elements")
+ sorted_sfis = sorted(sfis_in_sf, key=lambda i: i["order"])
+ # getting sfis names
+ processed_sf["sfis"] = [sfi["id"] for sfi in sorted_sfis]
+
+ return processed_sf
+
+ def _process_vnfgd_sfi(self, sfi, db_vnfrs):
+ processed_sfi = {}
+ # getting name of sfi
+ processed_sfi["id"] = sfi["id"]
+
+ # getting ports in sfi
+ ingress_port = sfi["ingress-constituent-cpd-id"]
+ egress_port = sfi["egress-constituent-cpd-id"]
+ sfi_vnf_member_index = sfi["constituent-base-element-id"]
+
+ processed_sfi["ingress_port"] = ingress_port
+ processed_sfi["egress_port"] = egress_port
+
+ all_vnfrs = db_vnfrs.values()
+
+ sfi_vnfr = [
+ element
+ for element in all_vnfrs
+ if element["member-vnf-index-ref"] == sfi_vnf_member_index
+ ]
+ processed_sfi["vnfr_id"] = sfi_vnfr[0]["id"]
+
+ sfi_vnfr_cp = sfi_vnfr[0]["connection-point"]
+
+ ingress_port_index = [
+ c for c, element in enumerate(sfi_vnfr_cp) if element["id"] == ingress_port
+ ]
+ ingress_port_index = ingress_port_index[0]
+
+ processed_sfi["vdur_id"] = sfi_vnfr_cp[ingress_port_index][
+ "connection-point-vdu-id"
+ ]
+ processed_sfi["ingress_port_index"] = ingress_port_index
+ processed_sfi["egress_port_index"] = ingress_port_index
+
+ if egress_port != ingress_port:
+ egress_port_index = [
+ c
+ for c, element in enumerate(sfi_vnfr_cp)
+ if element["id"] == egress_port
+ ]
+ processed_sfi["egress_port_index"] = egress_port_index
+
+ return processed_sfi
+
+ def _process_vnfgd_classification(self, classification, db_vnfrs):
+ processed_classification = {}
+
+ processed_classification = deepcopy(classification)
+ classi_vnf_member_index = processed_classification[
+ "constituent-base-element-id"
+ ]
+ logical_source_port = processed_classification["constituent-cpd-id"]
+
+ all_vnfrs = db_vnfrs.values()
+
+ classi_vnfr = [
+ element
+ for element in all_vnfrs
+ if element["member-vnf-index-ref"] == classi_vnf_member_index
+ ]
+ processed_classification["vnfr_id"] = classi_vnfr[0]["id"]
+
+ classi_vnfr_cp = classi_vnfr[0]["connection-point"]
+
+ ingress_port_index = [
+ c
+ for c, element in enumerate(classi_vnfr_cp)
+ if element["id"] == logical_source_port
+ ]
+ ingress_port_index = ingress_port_index[0]
+
+ processed_classification["ingress_port_index"] = ingress_port_index
+ processed_classification["vdur_id"] = classi_vnfr_cp[ingress_port_index][
+ "connection-point-vdu-id"
+ ]
+
+ return processed_classification
+
+ def _update_db_nsr_with_vnffg(self, processed_vnffg, vim_info, nsr_id):
+ """This method used to add viminfo dict to sfi, sf sfp and classification in indata and count info in db_nsr.
+
+ Args:
+ processed_vnffg (Dict[str, Any]): deployment info
+ vim_info (Dict): dictionary to store VIM resource information
+ nsr_id (str): NSR id
+
+ Returns: None
+ """
+
+ nsr_sfi = {}
+ nsr_sf = {}
+ nsr_sfp = {}
+ nsr_classification = {}
+ db_nsr_vnffg = deepcopy(processed_vnffg)
+
+ for count, sfi in enumerate(processed_vnffg["sfi"]):
+ sfi["vim_info"] = vim_info
+ sfi_count = "sfi.{}".format(count)
+ nsr_sfi[sfi_count] = db_nsr_vnffg["sfi"][count]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sfi)
+
+ for count, sf in enumerate(processed_vnffg["sf"]):
+ sf["vim_info"] = vim_info
+ sf_count = "sf.{}".format(count)
+ nsr_sf[sf_count] = db_nsr_vnffg["sf"][count]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sf)
+
+ for count, sfp in enumerate(processed_vnffg["sfp"]):
+ sfp["vim_info"] = vim_info
+ sfp_count = "sfp.{}".format(count)
+ nsr_sfp[sfp_count] = db_nsr_vnffg["sfp"][count]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sfp)
+
+ for count, classi in enumerate(processed_vnffg["classification"]):
+ classi["vim_info"] = vim_info
+ classification_count = "classification.{}".format(count)
+ nsr_classification[classification_count] = db_nsr_vnffg["classification"][
+ count
+ ]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_classification)
+
+ def process_vnffgd_descriptor(
+ self,
+ indata: dict,
+ nsr_id: str,
+ db_nsr: dict,
+ db_vnfrs: dict,
+ ) -> dict:
+ """This method used to process vnffgd parameters from descriptor.
+
+ Args:
+ indata (Dict[str, Any]): deployment info
+ nsr_id (str): NSR id
+ db_nsr: NSR record from DB
+ db_vnfrs: VNFRS record from DB
+
+ Returns:
+ Dict: Processed vnffg parameters.
+ """
- extra_dict = process_params(
- target_item,
- indata,
- target_viminfo,
- target_record_id,
- **kwargs,
+ processed_vnffg = {}
+ vnffgd = db_nsr.get("nsd", {}).get("vnffgd")
+ vnf_list = indata.get("vnf", [])
+ vim_text = ""
+
+ if vnf_list:
+ vim_text = "vim:" + vnf_list[0].get("vim-account-id", "")
+
+ vim_info = {}
+ vim_info[vim_text] = {}
+ processed_sfps = []
+ processed_classifications = []
+ processed_sfs = []
+ processed_sfis = []
+
+ # setting up intial empty entries for vnffg items in mongodb.
+ self.db.set_list(
+ "nsrs",
+ {"_id": nsr_id},
+ {
+ "sfi": [],
+ "sf": [],
+ "sfp": [],
+ "classification": [],
+ },
+ )
+
+ vnffg = vnffgd[0]
+ # getting sfps
+ sfps = vnffg.get("nfpd")
+ for sfp in sfps:
+ processed_sfp = self._process_vnfgd_sfp(sfp)
+ # appending the list of processed sfps
+ processed_sfps.append(processed_sfp)
+
+ # getting sfs in sfp
+ sfs = sfp.get("position-desc-id")[0].get("cp-profile-id")
+ for sf in sfs:
+ processed_sf = self._process_vnfgd_sf(sf)
+
+ # appending the list of processed sfs
+ processed_sfs.append(processed_sf)
+
+ # getting sfis in sf
+ sfis_in_sf = sf.get("constituent-profile-elements")
+ sorted_sfis = sorted(sfis_in_sf, key=lambda i: i["order"])
+
+ for sfi in sorted_sfis:
+ processed_sfi = self._process_vnfgd_sfi(sfi, db_vnfrs)
+
+ processed_sfis.append(processed_sfi)
+
+ classifications = sfp.get("position-desc-id")[0].get("match-attributes")
+ # getting classifications from sfp
+ for classification in classifications:
+ processed_classification = self._process_vnfgd_classification(
+ classification, db_vnfrs
)
- self._assign_vim(target_vim)
- deployment_info = {
- "action_id": action_id,
- "nsr_id": nsr_id,
- "task_index": task_index,
- }
+ processed_classifications.append(processed_classification)
- new_item = {
- "deployment_info": deployment_info,
- "target_id": target_vim,
- "item": item_,
- "action": "CREATE",
- "target_record": f"{db_record}.{item_index}.vim_info.{target_vim}",
- "target_record_id": target_record_id,
- "extra_dict": extra_dict,
- "common_id": target_item.get("common_id", None),
- }
- diff_items.append(new_item)
- tasks_by_target_record_id[target_record_id] = new_item
- task_index += 1
+ processed_vnffg["sfi"] = processed_sfis
+ processed_vnffg["sf"] = processed_sfs
+ processed_vnffg["classification"] = processed_classifications
+ processed_vnffg["sfp"] = processed_sfps
- db_nsr_update[db_path + ".{}".format(item_index)] = target_item
+ # adding viminfo dict to sfi, sf sfp and classification
+ self._update_db_nsr_with_vnffg(processed_vnffg, vim_info, nsr_id)
- return diff_items, task_index
+ # updating indata with vnffg porcessed parameters
+ indata["vnffg"].append(processed_vnffg)
def calculate_all_differences_to_deploy(
self,
# set list with diffs:
changes_list = []
+ # processing vnffg from descriptor parameter
+ vnffgd = db_nsr.get("nsd").get("vnffgd")
+ if vnffgd is not None:
+ indata["vnffg"] = []
+ vnf_list = indata["vnf"]
+ processed_vnffg = {}
+
+ # in case of ns-delete
+ if not vnf_list:
+ processed_vnffg["sfi"] = []
+ processed_vnffg["sf"] = []
+ processed_vnffg["classification"] = []
+ processed_vnffg["sfp"] = []
+
+ indata["vnffg"].append(processed_vnffg)
+
+ else:
+ self.process_vnffgd_descriptor(
+ indata=indata,
+ nsr_id=nsr_id,
+ db_nsr=db_nsr,
+ db_vnfrs=db_vnfrs,
+ )
+
+ # getting updated db_nsr having vnffg parameters
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+
+ self.logger.debug(
+ "After processing vnffd parameters indata={} nsr={}".format(
+ indata, db_nsr
+ )
+ )
+
+ for item in ["sfp", "classification", "sf", "sfi"]:
+ self.logger.debug("process NS={} {}".format(nsr_id, item))
+ diff_items, task_index = self.calculate_diff_items(
+ indata=indata,
+ db_nsr=db_nsr,
+ db_ro_nsr=db_ro_nsr,
+ db_nsr_update=db_nsr_update,
+ item=item,
+ tasks_by_target_record_id=tasks_by_target_record_id,
+ action_id=action_id,
+ nsr_id=nsr_id,
+ task_index=task_index,
+ vnfr_id=None,
+ )
+ changes_list += diff_items
+
# NS vld, image and flavor
- for item in ["net", "image", "flavor", "affinity-or-anti-affinity-group"]:
+ for item in [
+ "net",
+ "image",
+ "flavor",
+ "affinity-or-anti-affinity-group",
+ ]:
self.logger.debug("process NS={} {}".format(nsr_id, item))
diff_items, task_index = self.calculate_diff_items(
indata=indata,
# VNF vlds and vdus
for vnfr_id, vnfr in db_vnfrs.items():
# vnfr_id need to be set as global variable for among others nested method _process_vdu_params
- for item in ["net", "vdu"]:
+ for item in ["net", "vdu", "shared-volumes"]:
self.logger.debug("process VNF={} {}".format(vnfr_id, item))
diff_items, task_index = self.calculate_diff_items(
indata=indata,
extra_dict=change.get("extra_dict", None),
)
+ self.logger.debug("ns.define_all_tasks task={}".format(task))
tasks_by_target_record_id[change["target_record_id"]] = task
db_new_tasks.append(task)
- if change.get("common_id"):
- task["common_id"] = change["common_id"]
+ if change.get("common_id"):
+ task["common_id"] = change["common_id"]
+
+ def upload_all_tasks(
+ self,
+ db_new_tasks,
+ now,
+ ):
+ """Function to save all tasks in the common DB
+
+ Args:
+ db_new_tasks (List): tasks list to be created
+ now (time): current time
+
+ """
+
+ nb_ro_tasks = 0 # for logging
+
+ for db_task in db_new_tasks:
+ target_id = db_task.pop("target_id")
+ common_id = db_task.get("common_id")
+
+ # Do not chek tasks with vim_status DELETED
+ # because in manual heealing there are two tasks for the same vdur:
+ # one with vim_status deleted and the other one with the actual VM status.
+
+ if common_id:
+ if self.db.set_one(
+ "ro_tasks",
+ q_filter={
+ "target_id": target_id,
+ "tasks.common_id": common_id,
+ "vim_info.vim_status.ne": "DELETED",
+ },
+ update_dict={"to_check_at": now, "modified_at": now},
+ push={"tasks": db_task},
+ fail_on_empty=False,
+ ):
+ continue
+
+ if not self.db.set_one(
+ "ro_tasks",
+ q_filter={
+ "target_id": target_id,
+ "tasks.target_record": db_task["target_record"],
+ "vim_info.vim_status.ne": "DELETED",
+ },
+ update_dict={"to_check_at": now, "modified_at": now},
+ push={"tasks": db_task},
+ fail_on_empty=False,
+ ):
+ # Create a ro_task
+ self.logger.debug("Updating database, Creating ro_tasks")
+ db_ro_task = Ns._create_ro_task(target_id, db_task)
+ nb_ro_tasks += 1
+ self.db.create("ro_tasks", db_ro_task)
+
+ self.logger.debug(
+ "Created {} ro_tasks; {} tasks - db_new_tasks={}".format(
+ nb_ro_tasks, len(db_new_tasks), db_new_tasks
+ )
+ )
+
+ def upload_recreate_tasks(
+ self,
+ db_new_tasks,
+ now,
+ ):
+ """Function to save recreate tasks in the common DB
+
+ Args:
+ db_new_tasks (List): tasks list to be created
+ now (time): current time
+
+ """
+
+ nb_ro_tasks = 0 # for logging
+
+ for db_task in db_new_tasks:
+ target_id = db_task.pop("target_id")
+ self.logger.debug("target_id={} db_task={}".format(target_id, db_task))
+
+ action = db_task.get("action", None)
+
+ # Create a ro_task
+ self.logger.debug("Updating database, Creating ro_tasks")
+ db_ro_task = Ns._create_ro_task(target_id, db_task)
+
+ # If DELETE task: the associated created items should be removed
+ # (except persistent volumes):
+ if action == "DELETE":
+ db_ro_task["vim_info"]["created"] = True
+ db_ro_task["vim_info"]["created_items"] = db_task.get(
+ "created_items", {}
+ )
+ db_ro_task["vim_info"]["volumes_to_hold"] = db_task.get(
+ "volumes_to_hold", []
+ )
+ db_ro_task["vim_info"]["vim_id"] = db_task.get("vim_id", None)
+
+ nb_ro_tasks += 1
+ self.logger.debug("upload_all_tasks db_ro_task={}".format(db_ro_task))
+ self.db.create("ro_tasks", db_ro_task)
+
+ self.logger.debug(
+ "Created {} ro_tasks; {} tasks - db_new_tasks={}".format(
+ nb_ro_tasks, len(db_new_tasks), db_new_tasks
+ )
+ )
+
+ def _prepare_created_items_for_healing(
+ self,
+ nsr_id,
+ target_record,
+ ):
+ created_items = {}
+ # Get created_items from ro_task
+ ro_tasks = self.db.get_list("ro_tasks", {"tasks.nsr_id": nsr_id})
+ for ro_task in ro_tasks:
+ for task in ro_task["tasks"]:
+ if (
+ task["target_record"] == target_record
+ and task["action"] == "CREATE"
+ and ro_task["vim_info"]["created_items"]
+ ):
+ created_items = ro_task["vim_info"]["created_items"]
+ break
+
+ return created_items
+
+ def _prepare_persistent_volumes_for_healing(
+ self,
+ target_id,
+ existing_vdu,
+ ):
+ # The associated volumes of the VM shouldn't be removed
+ volumes_list = []
+ vim_details = {}
+ vim_details_text = existing_vdu["vim_info"][target_id].get("vim_details", None)
+ if vim_details_text:
+ vim_details = yaml.safe_load(f"{vim_details_text}")
+
+ for vol_id in vim_details.get("os-extended-volumes:volumes_attached", []):
+ volumes_list.append(vol_id["id"])
+
+ return volumes_list
+
+ def prepare_changes_to_recreate(
+ self,
+ indata,
+ nsr_id,
+ db_nsr,
+ db_vnfrs,
+ db_ro_nsr,
+ action_id,
+ tasks_by_target_record_id,
+ ):
+ """This method will obtain an ordered list of items (`changes_list`)
+ to be created and deleted to meet the recreate request.
+ """
+
+ self.logger.debug(
+ "ns.prepare_changes_to_recreate nsr_id={} indata={}".format(nsr_id, indata)
+ )
+
+ task_index = 0
+ # set list with diffs:
+ changes_list = []
+ db_path = self.db_path_map["vdu"]
+ target_list = indata.get("healVnfData", {})
+ vdu2cloud_init = indata.get("cloud_init_content") or {}
+ ro_nsr_public_key = db_ro_nsr["public_key"]
+
+ # Check each VNF of the target
+ for target_vnf in target_list:
+ # Find this VNF in the list from DB, raise exception if vnfInstanceId is not found
+ vnfr_id = target_vnf["vnfInstanceId"]
+ existing_vnf = db_vnfrs.get(vnfr_id, {})
+ db_record = "vnfrs:{}:{}".format(vnfr_id, db_path)
+ # vim_account_id = existing_vnf.get("vim-account-id", "")
+
+ target_vdus = target_vnf.get("additionalParams", {}).get("vdu", [])
+ # Check each VDU of this VNF
+ if not target_vdus:
+ # Create target_vdu_list from DB, if VDUs are not specified
+ target_vdus = []
+ for existing_vdu in existing_vnf.get("vdur"):
+ vdu_name = existing_vdu.get("vdu-name", None)
+ vdu_index = existing_vdu.get("count-index", 0)
+ vdu_to_be_healed = {"vdu-id": vdu_name, "count-index": vdu_index}
+ target_vdus.append(vdu_to_be_healed)
+ for target_vdu in target_vdus:
+ vdu_name = target_vdu.get("vdu-id", None)
+ # For multi instance VDU count-index is mandatory
+ # For single session VDU count-indes is 0
+ count_index = target_vdu.get("count-index", 0)
+ item_index = 0
+ existing_instance = {}
+ if existing_vnf:
+ for instance in existing_vnf.get("vdur", {}):
+ if (
+ instance["vdu-name"] == vdu_name
+ and instance["count-index"] == count_index
+ ):
+ existing_instance = instance
+ break
+ else:
+ item_index += 1
+
+ target_record_id = "{}.{}".format(db_record, existing_instance["id"])
+
+ # The target VIM is the one already existing in DB to recreate
+ for target_vim, target_viminfo in existing_instance.get(
+ "vim_info", {}
+ ).items():
+ # step 1 vdu to be deleted
+ self._assign_vim(target_vim)
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+
+ target_record = f"{db_record}.{item_index}.vim_info.{target_vim}"
+ created_items = self._prepare_created_items_for_healing(
+ nsr_id, target_record
+ )
+
+ volumes_to_hold = self._prepare_persistent_volumes_for_healing(
+ target_vim, existing_instance
+ )
+
+ # Specific extra params for recreate tasks:
+ extra_dict = {
+ "created_items": created_items,
+ "vim_id": existing_instance["vim-id"],
+ "volumes_to_hold": volumes_to_hold,
+ }
+
+ changes_list.append(
+ {
+ "deployment_info": deployment_info,
+ "target_id": target_vim,
+ "item": "vdu",
+ "action": "DELETE",
+ "target_record": target_record,
+ "target_record_id": target_record_id,
+ "extra_dict": extra_dict,
+ }
+ )
+ delete_task_id = f"{action_id}:{task_index}"
+ task_index += 1
+
+ # step 2 vdu to be created
+ kwargs = {}
+ kwargs.update(
+ {
+ "vnfr_id": vnfr_id,
+ "nsr_id": nsr_id,
+ "vnfr": existing_vnf,
+ "vdu2cloud_init": vdu2cloud_init,
+ "tasks_by_target_record_id": tasks_by_target_record_id,
+ "logger": self.logger,
+ "db": self.db,
+ "fs": self.fs,
+ "ro_nsr_public_key": ro_nsr_public_key,
+ }
+ )
+
+ extra_dict = self._process_recreate_vdu_params(
+ existing_instance,
+ db_nsr,
+ target_viminfo,
+ target_record_id,
+ target_vim,
+ **kwargs,
+ )
+
+ # The CREATE task depens on the DELETE task
+ extra_dict["depends_on"] = [delete_task_id]
+
+ # Add volumes created from created_items if any
+ # Ports should be deleted with delete task and automatically created with create task
+ volumes = {}
+ for k, v in created_items.items():
+ try:
+ k_item, _, k_id = k.partition(":")
+ if k_item == "volume":
+ volumes[k] = v
+ except Exception as e:
+ self.logger.error(
+ "Error evaluating created item {}: {}".format(k, e)
+ )
+ extra_dict["previous_created_volumes"] = volumes
+
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+ self._assign_vim(target_vim)
+
+ new_item = {
+ "deployment_info": deployment_info,
+ "target_id": target_vim,
+ "item": "vdu",
+ "action": "CREATE",
+ "target_record": target_record,
+ "target_record_id": target_record_id,
+ "extra_dict": extra_dict,
+ }
+ changes_list.append(new_item)
+ tasks_by_target_record_id[target_record_id] = new_item
+ task_index += 1
+
+ return changes_list
+
+ def recreate(self, session, indata, version, nsr_id, *args, **kwargs):
+ self.logger.debug("ns.recreate nsr_id={} indata={}".format(nsr_id, indata))
+ # TODO: validate_input(indata, recreate_schema)
+ action_id = indata.get("action_id", str(uuid4()))
+ # get current deployment
+ db_vnfrs = {} # vnf's info indexed by _id
+ step = ""
+ logging_text = "Recreate nsr_id={} action_id={} indata={}".format(
+ nsr_id, action_id, indata
+ )
+ self.logger.debug(logging_text + "Enter")
+
+ try:
+ step = "Getting ns and vnfr record from db"
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ db_new_tasks = []
+ tasks_by_target_record_id = {}
+ # read from db: vnf's of this ns
+ step = "Getting vnfrs from db"
+ db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+ self.logger.debug("ns.recreate: db_vnfrs_list={}".format(db_vnfrs_list))
+
+ if not db_vnfrs_list:
+ raise NsException("Cannot obtain associated VNF for ns")
+
+ for vnfr in db_vnfrs_list:
+ db_vnfrs[vnfr["_id"]] = vnfr
- def upload_all_tasks(
- self,
- db_new_tasks,
- now,
- ):
- """Function to save all tasks in the common DB
+ now = time()
+ db_ro_nsr = self.db.get_one("ro_nsrs", {"_id": nsr_id}, fail_on_empty=False)
+ self.logger.debug("ns.recreate: db_ro_nsr={}".format(db_ro_nsr))
- Args:
- db_new_tasks (List): tasks list to be created
- now (time): current time
+ if not db_ro_nsr:
+ db_ro_nsr = self._create_db_ro_nsrs(nsr_id, now)
- """
+ with self.write_lock:
+ # NS
+ step = "process NS elements"
+ changes_list = self.prepare_changes_to_recreate(
+ indata=indata,
+ nsr_id=nsr_id,
+ db_nsr=db_nsr,
+ db_vnfrs=db_vnfrs,
+ db_ro_nsr=db_ro_nsr,
+ action_id=action_id,
+ tasks_by_target_record_id=tasks_by_target_record_id,
+ )
- nb_ro_tasks = 0 # for logging
+ self.define_all_tasks(
+ changes_list=changes_list,
+ db_new_tasks=db_new_tasks,
+ tasks_by_target_record_id=tasks_by_target_record_id,
+ )
- for db_task in db_new_tasks:
- target_id = db_task.pop("target_id")
- common_id = db_task.get("common_id")
+ # Delete all ro_tasks registered for the targets vdurs (target_record)
+ # If task of type CREATE exist then vim will try to get info form deleted VMs.
+ # So remove all task related to target record.
+ ro_tasks = self.db.get_list("ro_tasks", {"tasks.nsr_id": nsr_id})
+ for change in changes_list:
+ for ro_task in ro_tasks:
+ for task in ro_task["tasks"]:
+ if task["target_record"] == change["target_record"]:
+ self.db.del_one(
+ "ro_tasks",
+ q_filter={
+ "_id": ro_task["_id"],
+ "modified_at": ro_task["modified_at"],
+ },
+ fail_on_empty=False,
+ )
- if common_id:
- if self.db.set_one(
- "ro_tasks",
- q_filter={
- "target_id": target_id,
- "tasks.common_id": common_id,
- },
- update_dict={"to_check_at": now, "modified_at": now},
- push={"tasks": db_task},
- fail_on_empty=False,
- ):
- continue
+ step = "Updating database, Appending tasks to ro_tasks"
+ self.upload_recreate_tasks(
+ db_new_tasks=db_new_tasks,
+ now=now,
+ )
- if not self.db.set_one(
- "ro_tasks",
- q_filter={
- "target_id": target_id,
- "tasks.target_record": db_task["target_record"],
- },
- update_dict={"to_check_at": now, "modified_at": now},
- push={"tasks": db_task},
- fail_on_empty=False,
- ):
- # Create a ro_task
- self.logger.debug("Updating database, Creating ro_tasks")
- db_ro_task = Ns._create_ro_task(target_id, db_task)
- nb_ro_tasks += 1
- self.db.create("ro_tasks", db_ro_task)
+ self.logger.debug(
+ logging_text + "Exit. Created {} tasks".format(len(db_new_tasks))
+ )
- self.logger.debug(
- "Created {} ro_tasks; {} tasks - db_new_tasks={}".format(
- nb_ro_tasks, len(db_new_tasks), db_new_tasks
+ return (
+ {"status": "ok", "nsr_id": nsr_id, "action_id": action_id},
+ action_id,
+ True,
)
- )
+ except Exception as e:
+ if isinstance(e, (DbException, NsException)):
+ self.logger.error(
+ logging_text + "Exit Exception while '{}': {}".format(step, e)
+ )
+ else:
+ e = traceback_format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception while '{}': {}".format(step, e),
+ exc_info=True,
+ )
+
+ raise NsException(e)
def deploy(self, session, indata, version, nsr_id, *args, **kwargs):
self.logger.debug("ns.deploy nsr_id={} indata={}".format(nsr_id, indata))
try:
step = "Getting ns and vnfr record from db"
db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ self.logger.debug("ns.deploy: db_nsr={}".format(db_nsr))
db_new_tasks = []
tasks_by_target_record_id = {}
# read from db: vnf's of this ns
for vnfr in db_vnfrs_list:
db_vnfrs[vnfr["_id"]] = vnfr
db_vnfrs_update[vnfr["_id"]] = {}
+ self.logger.debug("ns.deploy db_vnfrs={}".format(db_vnfrs))
now = time()
db_ro_nsr = self.db.get_one("ro_nsrs", {"_id": nsr_id}, fail_on_empty=False)
return None, None, True
def status(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
- # self.logger.debug("ns.status version={} nsr_id={}, action_id={} indata={}"
- # .format(version, nsr_id, action_id, indata))
+ self.logger.debug(
+ "ns.status version={} nsr_id={}, action_id={} indata={}".format(
+ version, nsr_id, action_id, indata
+ )
+ )
task_list = []
done = 0
total = 0
error_text = "Error at {} {}: {}".format(
task["action"].lower(),
task["item"],
- ro_task["vim_info"].get("vim_details") or "unknown",
+ ro_task["vim_info"].get("vim_message") or "unknown",
)
details.append(error_text)
elif task["status"] in ("SCHEDULED", "BUILD"):
return_data = {
"status": global_status,
- "details": ". ".join(details)
- if details
- else "progress {}/{}".format(done, total),
+ "details": (
+ ". ".join(details) if details else "progress {}/{}".format(done, total)
+ ),
"nsr_id": nsr_id,
"action_id": action_id,
"tasks": task_list,
return return_data, None, True
+ def recreate_status(
+ self, session, indata, version, nsr_id, action_id, *args, **kwargs
+ ):
+ return self.status(session, indata, version, nsr_id, action_id, *args, **kwargs)
+
def cancel(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
print(
"ns.cancel session={} indata={} version={} nsr_id={}, action_id={}".format(
return None, None, True
+ def rebuild_start_stop_task(
+ self,
+ vdu_id,
+ vnf_id,
+ vdu_index,
+ action_id,
+ nsr_id,
+ task_index,
+ target_vim,
+ extra_dict,
+ ):
+ self._assign_vim(target_vim)
+ target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+ vnf_id, vdu_index, target_vim
+ )
+ target_record_id = "vnfrs:{}:vdur.{}".format(vnf_id, vdu_id)
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+
+ task = Ns._create_task(
+ deployment_info=deployment_info,
+ target_id=target_vim,
+ item="update",
+ action="EXEC",
+ target_record=target_record,
+ target_record_id=target_record_id,
+ extra_dict=extra_dict,
+ )
+ return task
+
+ def rebuild_start_stop(
+ self, session, action_dict, version, nsr_id, *args, **kwargs
+ ):
+ task_index = 0
+ extra_dict = {}
+ now = time()
+ action_id = action_dict.get("action_id", str(uuid4()))
+ step = ""
+ logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
+ self.logger.debug(logging_text + "Enter")
+
+ action = list(action_dict.keys())[0]
+ task_dict = action_dict.get(action)
+ vim_vm_id = action_dict.get(action).get("vim_vm_id")
+
+ if action_dict.get("stop"):
+ action = "shutoff"
+ db_new_tasks = []
+ try:
+ step = "lock the operation & do task creation"
+ with self.write_lock:
+ extra_dict["params"] = {
+ "vim_vm_id": vim_vm_id,
+ "action": action,
+ }
+ task = self.rebuild_start_stop_task(
+ task_dict["vdu_id"],
+ task_dict["vnf_id"],
+ task_dict["vdu_index"],
+ action_id,
+ nsr_id,
+ task_index,
+ task_dict["target_vim"],
+ extra_dict,
+ )
+ db_new_tasks.append(task)
+ step = "upload Task to db"
+ self.upload_all_tasks(
+ db_new_tasks=db_new_tasks,
+ now=now,
+ )
+ self.logger.debug(
+ logging_text + "Exit. Created {} tasks".format(len(db_new_tasks))
+ )
+ return (
+ {"status": "ok", "nsr_id": nsr_id, "action_id": action_id},
+ action_id,
+ True,
+ )
+ except Exception as e:
+ if isinstance(e, (DbException, NsException)):
+ self.logger.error(
+ logging_text + "Exit Exception while '{}': {}".format(step, e)
+ )
+ else:
+ e = traceback_format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception while '{}': {}".format(step, e),
+ exc_info=True,
+ )
+ raise NsException(e)
+
def get_deploy(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
nsrs = self.db.get_list("nsrs", {})
return_data = []
return_data.append(task["action_id"])
return return_data, None, True
+
+ def migrate_task(
+ self, vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict
+ ):
+ target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
+ self._assign_vim(target_vim)
+ target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+ vnf["_id"], vdu_index, target_vim
+ )
+ target_record_id = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu["id"])
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+
+ task = Ns._create_task(
+ deployment_info=deployment_info,
+ target_id=target_vim,
+ item="migrate",
+ action="EXEC",
+ target_record=target_record,
+ target_record_id=target_record_id,
+ extra_dict=extra_dict,
+ )
+
+ return task
+
+ def migrate(self, session, indata, version, nsr_id, *args, **kwargs):
+ task_index = 0
+ extra_dict = {}
+ now = time()
+ action_id = indata.get("action_id", str(uuid4()))
+ step = ""
+ logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
+ self.logger.debug(logging_text + "Enter")
+ try:
+ vnf_instance_id = indata["vnfInstanceId"]
+ step = "Getting vnfrs from db"
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
+ vdu = indata.get("vdu")
+ migrateToHost = indata.get("migrateToHost")
+ db_new_tasks = []
+
+ with self.write_lock:
+ if vdu is not None:
+ vdu_id = indata["vdu"]["vduId"]
+ vdu_count_index = indata["vdu"].get("vduCountIndex", 0)
+ for vdu_index, vdu in enumerate(db_vnfr["vdur"]):
+ if (
+ vdu["vdu-id-ref"] == vdu_id
+ and vdu["count-index"] == vdu_count_index
+ ):
+ extra_dict["params"] = {
+ "vim_vm_id": vdu["vim-id"],
+ "migrate_host": migrateToHost,
+ "vdu_vim_info": vdu["vim_info"],
+ }
+ step = "Creating migration task for vdu:{}".format(vdu)
+ task = self.migrate_task(
+ vdu,
+ db_vnfr,
+ vdu_index,
+ action_id,
+ nsr_id,
+ task_index,
+ extra_dict,
+ )
+ db_new_tasks.append(task)
+ task_index += 1
+ break
+ else:
+ for vdu_index, vdu in enumerate(db_vnfr["vdur"]):
+ extra_dict["params"] = {
+ "vim_vm_id": vdu["vim-id"],
+ "migrate_host": migrateToHost,
+ "vdu_vim_info": vdu["vim_info"],
+ }
+ step = "Creating migration task for vdu:{}".format(vdu)
+ task = self.migrate_task(
+ vdu,
+ db_vnfr,
+ vdu_index,
+ action_id,
+ nsr_id,
+ task_index,
+ extra_dict,
+ )
+ db_new_tasks.append(task)
+ task_index += 1
+
+ self.upload_all_tasks(
+ db_new_tasks=db_new_tasks,
+ now=now,
+ )
+
+ self.logger.debug(
+ logging_text + "Exit. Created {} tasks".format(len(db_new_tasks))
+ )
+ return (
+ {"status": "ok", "nsr_id": nsr_id, "action_id": action_id},
+ action_id,
+ True,
+ )
+ except Exception as e:
+ if isinstance(e, (DbException, NsException)):
+ self.logger.error(
+ logging_text + "Exit Exception while '{}': {}".format(step, e)
+ )
+ else:
+ e = traceback_format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception while '{}': {}".format(step, e),
+ exc_info=True,
+ )
+ raise NsException(e)
+
+ def verticalscale_task(
+ self, vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict
+ ):
+ target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
+ self._assign_vim(target_vim)
+ ns_preffix = "nsrs:{}".format(nsr_id)
+ flavor_text = ns_preffix + ":flavor." + vdu["ns-flavor-id"]
+ extra_dict["depends_on"] = [flavor_text]
+ extra_dict["params"].update({"flavor_id": "TASK-" + flavor_text})
+ target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+ vnf["_id"], vdu_index, target_vim
+ )
+ target_record_id = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu["id"])
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+
+ task = Ns._create_task(
+ deployment_info=deployment_info,
+ target_id=target_vim,
+ item="verticalscale",
+ action="EXEC",
+ target_record=target_record,
+ target_record_id=target_record_id,
+ extra_dict=extra_dict,
+ )
+ return task
+
+ def verticalscale_flavor_task(
+ self, vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict
+ ):
+ target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
+ self._assign_vim(target_vim)
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ target_record = "nsrs:{}:flavor.{}.vim_info.{}".format(
+ nsr_id, len(db_nsr["flavor"]) - 1, target_vim
+ )
+ target_record_id = "nsrs:{}:flavor.{}".format(nsr_id, len(db_nsr["flavor"]) - 1)
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+ task = Ns._create_task(
+ deployment_info=deployment_info,
+ target_id=target_vim,
+ item="flavor",
+ action="CREATE",
+ target_record=target_record,
+ target_record_id=target_record_id,
+ extra_dict=extra_dict,
+ )
+ return task
+
+ def verticalscale(self, session, indata, version, nsr_id, *args, **kwargs):
+ task_index = 0
+ extra_dict = {}
+ flavor_extra_dict = {}
+ now = time()
+ action_id = indata.get("action_id", str(uuid4()))
+ step = ""
+ logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
+ self.logger.debug(logging_text + "Enter")
+ try:
+ VnfFlavorData = indata.get("changeVnfFlavorData")
+ vnf_instance_id = VnfFlavorData["vnfInstanceId"]
+ step = "Getting vnfrs from db"
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
+ vduid = VnfFlavorData["additionalParams"]["vduid"]
+ vduCountIndex = VnfFlavorData["additionalParams"]["vduCountIndex"]
+ virtualMemory = VnfFlavorData["additionalParams"]["virtualMemory"]
+ numVirtualCpu = VnfFlavorData["additionalParams"]["numVirtualCpu"]
+ sizeOfStorage = VnfFlavorData["additionalParams"]["sizeOfStorage"]
+ flavor_dict = {
+ "name": vduid + "-flv",
+ "ram": virtualMemory,
+ "vcpus": numVirtualCpu,
+ "disk": sizeOfStorage,
+ }
+ flavor_data = {
+ "ram": virtualMemory,
+ "vcpus": numVirtualCpu,
+ "disk": sizeOfStorage,
+ }
+ flavor_extra_dict["find_params"] = {"flavor_data": flavor_data}
+ flavor_extra_dict["params"] = {"flavor_data": flavor_dict}
+ db_new_tasks = []
+ step = "Creating Tasks for vertical scaling"
+ with self.write_lock:
+ for vdu_index, vdu in enumerate(db_vnfr["vdur"]):
+ if (
+ vdu["vdu-id-ref"] == vduid
+ and vdu["count-index"] == vduCountIndex
+ ):
+ extra_dict["params"] = {
+ "vim_vm_id": vdu["vim-id"],
+ "flavor_dict": flavor_dict,
+ "vdu-id-ref": vdu["vdu-id-ref"],
+ "count-index": vdu["count-index"],
+ "vnf_instance_id": vnf_instance_id,
+ }
+ task = self.verticalscale_flavor_task(
+ vdu,
+ db_vnfr,
+ vdu_index,
+ action_id,
+ nsr_id,
+ task_index,
+ flavor_extra_dict,
+ )
+ db_new_tasks.append(task)
+ task_index += 1
+ task = self.verticalscale_task(
+ vdu,
+ db_vnfr,
+ vdu_index,
+ action_id,
+ nsr_id,
+ task_index,
+ extra_dict,
+ )
+ db_new_tasks.append(task)
+ task_index += 1
+ break
+ self.upload_all_tasks(
+ db_new_tasks=db_new_tasks,
+ now=now,
+ )
+ self.logger.debug(
+ logging_text + "Exit. Created {} tasks".format(len(db_new_tasks))
+ )
+ return (
+ {"status": "ok", "nsr_id": nsr_id, "action_id": action_id},
+ action_id,
+ True,
+ )
+ except Exception as e:
+ if isinstance(e, (DbException, NsException)):
+ self.logger.error(
+ logging_text + "Exit Exception while '{}': {}".format(step, e)
+ )
+ else:
+ e = traceback_format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception while '{}': {}".format(step, e),
+ exc_info=True,
+ )
+ raise NsException(e)