##
from http import HTTPStatus
+from itertools import product
import logging
from random import choice as random_choice
from threading import Lock
Returns:
Dict[str, Any]: [description]
"""
+ db = kwargs.get("db")
+ target_vdur = {}
+
flavor_data = {
"disk": int(target_flavor["storage-gb"]),
"ram": int(target_flavor["memory-mb"]),
"vcpus": int(target_flavor["vcpu-count"]),
}
- target_vdur = {}
for vnf in indata.get("vnf", []):
for vdur in vnf.get("vdur", []):
- if vdur.get("ns-flavor-id") == target_flavor["id"]:
+ if vdur.get("ns-flavor-id") == target_flavor.get("id"):
target_vdur = vdur
+ if db and isinstance(indata.get("vnf"), list):
+ vnfd_id = indata.get("vnf")[0].get("vnfd-id")
+ vnfd = db.get_one("vnfds", {"_id": vnfd_id})
+ # check if there is persistent root disk
+ for vdu in vnfd.get("vdu", ()):
+ if vdu["name"] == target_vdur.get("vdu-name"):
+ for vsd in vnfd.get("virtual-storage-desc", ()):
+ if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
+ root_disk = vsd
+ if (
+ root_disk.get("type-of-storage")
+ == "persistent-storage:persistent-storage"
+ ):
+ flavor_data["disk"] = 0
+
for storage in target_vdur.get("virtual-storages", []):
if (
storage.get("type-of-storage")
return extra_dict
+ @staticmethod
+ def find_persistent_root_volumes(
+ vnfd: dict,
+ target_vdu: str,
+ vdu_instantiation_volumes_list: list,
+ disk_list: list,
+ ) -> (list, dict):
+ """Find the persistent root volumes and add them to the disk_list
+ by parsing the instantiation parameters
+
+ Args:
+ vnfd: VNFD
+ target_vdu: processed VDU
+ vdu_instantiation_volumes_list: instantiation parameters for the each VDU as a list
+ disk_list: to be filled up
+
+ Returns:
+ disk_list: filled VDU list which is used for VDU creation
+
+ """
+ persistent_root_disk = {}
+
+ for vdu, vsd in product(
+ vnfd.get("vdu", ()), vnfd.get("virtual-storage-desc", ())
+ ):
+ if (
+ vdu["name"] == target_vdu["vdu-name"]
+ and vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]
+ ):
+ root_disk = vsd
+ if (
+ root_disk.get("type-of-storage")
+ == "persistent-storage:persistent-storage"
+ ):
+ for vdu_volume in vdu_instantiation_volumes_list:
+
+ if (
+ vdu_volume["vim-volume-id"]
+ and root_disk["id"] == vdu_volume["name"]
+ ):
+
+ persistent_root_disk[vsd["id"]] = {
+ "vim_volume_id": vdu_volume["vim-volume-id"],
+ "image_id": vdu.get("sw-image-desc"),
+ }
+
+ disk_list.append(persistent_root_disk[vsd["id"]])
+
+ # There can be only one root disk, when we find it, it will return the result
+ return disk_list, persistent_root_disk
+
+ else:
+
+ if root_disk.get("size-of-storage"):
+ persistent_root_disk[vsd["id"]] = {
+ "image_id": vdu.get("sw-image-desc"),
+ "size": root_disk.get("size-of-storage"),
+ }
+
+ disk_list.append(persistent_root_disk[vsd["id"]])
+ return disk_list, persistent_root_disk
+
+ return disk_list, persistent_root_disk
+
+ @staticmethod
+ def find_persistent_volumes(
+ persistent_root_disk: dict,
+ target_vdu: str,
+ vdu_instantiation_volumes_list: list,
+ disk_list: list,
+ ) -> list:
+ """Find the ordinary persistent volumes and add them to the disk_list
+ by parsing the instantiation parameters
+
+ Args:
+ persistent_root_disk: persistent root disk dictionary
+ target_vdu: processed VDU
+ vdu_instantiation_volumes_list: instantiation parameters for the each VDU as a list
+ disk_list: to be filled up
+
+ Returns:
+ disk_list: filled VDU list which is used for VDU creation
+
+ """
+ # Find the ordinary volumes which are not added to the persistent_root_disk
+ persistent_disk = {}
+ for disk in target_vdu.get("virtual-storages", {}):
+ if (
+ disk.get("type-of-storage") == "persistent-storage:persistent-storage"
+ and disk["id"] not in persistent_root_disk.keys()
+ ):
+ for vdu_volume in vdu_instantiation_volumes_list:
+
+ if vdu_volume["vim-volume-id"] and disk["id"] == vdu_volume["name"]:
+
+ persistent_disk[disk["id"]] = {
+ "vim_volume_id": vdu_volume["vim-volume-id"],
+ }
+ disk_list.append(persistent_disk[disk["id"]])
+
+ else:
+ if disk["id"] not in persistent_disk.keys():
+ persistent_disk[disk["id"]] = {
+ "size": disk.get("size-of-storage"),
+ }
+ disk_list.append(persistent_disk[disk["id"]])
+
+ return disk_list
+
@staticmethod
def _process_vdu_params(
target_vdu: Dict[str, Any],
extra_dict = {"depends_on": [image_text, flavor_text]}
net_list = []
+ # If the position info is provided for all the interfaces, it will be sorted
+ # according to position number ascendingly.
+ if all(
+ i.get("position") + 1
+ for i in target_vdu["interfaces"]
+ if i.get("position") is not None
+ ):
+ sorted_interfaces = sorted(
+ target_vdu["interfaces"],
+ key=lambda x: (x.get("position") is None, x.get("position")),
+ )
+ target_vdu["interfaces"] = sorted_interfaces
+
+ # If the position info is provided for some interfaces but not all of them, the interfaces
+ # which has specific position numbers will be placed and others' positions will not be taken care.
+ else:
+ if any(
+ i.get("position") + 1
+ for i in target_vdu["interfaces"]
+ if i.get("position") is not None
+ ):
+ n = len(target_vdu["interfaces"])
+ sorted_interfaces = [-1] * n
+ k, m = 0, 0
+ while k < n:
+ if target_vdu["interfaces"][k].get("position"):
+ idx = target_vdu["interfaces"][k]["position"]
+ sorted_interfaces[idx - 1] = target_vdu["interfaces"][k]
+ k += 1
+ while m < n:
+ if not target_vdu["interfaces"][m].get("position"):
+ idy = sorted_interfaces.index(-1)
+ sorted_interfaces[idy] = target_vdu["interfaces"][m]
+ m += 1
+
+ target_vdu["interfaces"] = sorted_interfaces
+
+ # If the position info is not provided for the interfaces, interfaces will be attached
+ # according to the order in the VNFD.
for iface_index, interface in enumerate(target_vdu["interfaces"]):
if interface.get("ns-vld-id"):
net_text = ns_preffix + ":vld." + interface["ns-vld-id"]
cloud_config["key-pairs"] = ssh_keys
persistent_root_disk = {}
+ vdu_instantiation_volumes_list = []
disk_list = []
vnfd_id = vnfr["vnfd-id"]
vnfd = db.get_one("vnfds", {"_id": vnfd_id})
- for vdu in vnfd.get("vdu", ()):
- if vdu["name"] == target_vdu["vdu-name"]:
- for vsd in vnfd.get("virtual-storage-desc", ()):
- if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
- root_disk = vsd
- if root_disk.get(
- "type-of-storage"
- ) == "persistent-storage:persistent-storage" and root_disk.get(
- "size-of-storage"
- ):
- persistent_root_disk[vsd["id"]] = {
- "image_id": vdu.get("sw-image-desc"),
- "size": root_disk["size-of-storage"],
- }
- disk_list.append(persistent_root_disk[vsd["id"]])
- if target_vdu.get("virtual-storages"):
- for disk in target_vdu["virtual-storages"]:
- if (
- disk.get("type-of-storage")
- == "persistent-storage:persistent-storage"
- and disk["id"] not in persistent_root_disk.keys()
- ):
- disk_list.append({"size": disk["size-of-storage"]})
+ if target_vdu.get("additionalParams"):
+ vdu_instantiation_volumes_list = (
+ target_vdu.get("additionalParams").get("OSM").get("vdu_volumes")
+ )
+
+ if vdu_instantiation_volumes_list:
+
+ # Find the root volumes and add to the disk_list
+ (disk_list, persistent_root_disk,) = Ns.find_persistent_root_volumes(
+ vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
+ )
+
+ # Find the ordinary volumes which are not added to the persistent_root_disk
+ # and put them to the disk list
+ disk_list = Ns.find_persistent_volumes(
+ persistent_root_disk,
+ target_vdu,
+ vdu_instantiation_volumes_list,
+ disk_list,
+ )
+
+ else:
+
+ # vdu_instantiation_volumes_list is empty
+ for vdu in vnfd.get("vdu", ()):
+ if vdu["name"] == target_vdu["vdu-name"]:
+ for vsd in vnfd.get("virtual-storage-desc", ()):
+ if vsd.get("id") == vdu.get("virtual-storage-desc", [[]])[0]:
+ root_disk = vsd
+ if root_disk.get(
+ "type-of-storage"
+ ) == "persistent-storage:persistent-storage" and root_disk.get(
+ "size-of-storage"
+ ):
+ persistent_root_disk[vsd["id"]] = {
+ "image_id": vdu.get("sw-image-desc"),
+ "size": root_disk["size-of-storage"],
+ }
+ disk_list.append(persistent_root_disk[vsd["id"]])
+
+ if target_vdu.get("virtual-storages"):
+ for disk in target_vdu["virtual-storages"]:
+ if (
+ disk.get("type-of-storage")
+ == "persistent-storage:persistent-storage"
+ and disk["id"] not in persistent_root_disk.keys()
+ ):
+ disk_list.append({"size": disk["size-of-storage"]})
affinity_group_list = []
"floating_ip",
)
}
- existing_ifaces = existing_vdu["vim_info"][target_id].get("interfaces", [])
+ existing_ifaces = existing_vdu["vim_info"][target_id].get(
+ "interfaces_backup", []
+ )
net_id = next(
(
i["vim_net_id"]
self.logger.warning(
"ns.calculate_diff_items target_item={}".format(target_item)
)
+ if process_params == Ns._process_flavor_params:
+ kwargs.update(
+ {
+ "db": self.db,
+ }
+ )
+ self.logger.warning(
+ "calculate_diff_items for flavor kwargs={}".format(kwargs)
+ )
+
if process_params == Ns._process_vdu_params:
self.logger.warning(
"calculate_diff_items self.fs={}".format(self.fs)
target_id = db_task.pop("target_id")
common_id = db_task.get("common_id")
+ # Do not chek tasks with vim_status DELETED
+ # because in manual heealing there are two tasks for the same vdur:
+ # one with vim_status deleted and the other one with the actual VM status.
+
if common_id:
if self.db.set_one(
"ro_tasks",
q_filter={
"target_id": target_id,
"tasks.common_id": common_id,
+ "vim_info.vim_status.ne": "DELETED",
},
update_dict={"to_check_at": now, "modified_at": now},
push={"tasks": db_task},
q_filter={
"target_id": target_id,
"tasks.target_record": db_task["target_record"],
+ "vim_info.vim_status.ne": "DELETED",
},
update_dict={"to_check_at": now, "modified_at": now},
push={"tasks": db_task},
db_ro_task["vim_info"]["created_items"] = db_task.get(
"created_items", {}
)
+ db_ro_task["vim_info"]["volumes_to_hold"] = db_task.get(
+ "volumes_to_hold", []
+ )
db_ro_task["vim_info"]["vim_id"] = db_task.get("vim_id", None)
nb_ro_tasks += 1
def _prepare_created_items_for_healing(
self,
- target_id,
- existing_vdu,
+ nsr_id,
+ target_record,
):
- # Only ports are considered because created volumes are persistent
- ports_list = {}
- vim_interfaces = existing_vdu["vim_info"][target_id].get("interfaces", [])
- for iface in vim_interfaces:
- ports_list["port:" + iface["vim_interface_id"]] = True
+ created_items = {}
+ # Get created_items from ro_task
+ ro_tasks = self.db.get_list("ro_tasks", {"tasks.nsr_id": nsr_id})
+ for ro_task in ro_tasks:
+ for task in ro_task["tasks"]:
+ if (
+ task["target_record"] == target_record
+ and task["action"] == "CREATE"
+ and ro_task["vim_info"]["created_items"]
+ ):
+ created_items = ro_task["vim_info"]["created_items"]
+ break
- return ports_list
+ return created_items
def _prepare_persistent_volumes_for_healing(
self,
target_record = f"{db_record}.{item_index}.vim_info.{target_vim}"
created_items = self._prepare_created_items_for_healing(
- target_vim, existing_instance
+ nsr_id, target_record
)
volumes_to_hold = self._prepare_persistent_volumes_for_healing(
# The CREATE task depens on the DELETE task
extra_dict["depends_on"] = [delete_task_id]
+ # Add volumes created from created_items if any
+ # Ports should be deleted with delete task and automatically created with create task
+ volumes = {}
+ for k, v in created_items.items():
+ try:
+ k_item, _, k_id = k.partition(":")
+ if k_item == "volume":
+ volumes[k] = v
+ except Exception as e:
+ self.logger.error(
+ "Error evaluating created item {}: {}".format(k, e)
+ )
+ extra_dict["previous_created_volumes"] = volumes
+
deployment_info = {
"action_id": action_id,
"nsr_id": nsr_id,
return None, None, True
+ def rebuild_start_stop_task(
+ self,
+ vdu_id,
+ vnf_id,
+ vdu_index,
+ action_id,
+ nsr_id,
+ task_index,
+ target_vim,
+ extra_dict,
+ ):
+ self._assign_vim(target_vim)
+ target_record = "vnfrs:{}:vdur.{}".format(vnf_id, vdu_index)
+ target_record_id = "vnfrs:{}:vdur.{}".format(vnf_id, vdu_id)
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+
+ task = Ns._create_task(
+ deployment_info=deployment_info,
+ target_id=target_vim,
+ item="update",
+ action="EXEC",
+ target_record=target_record,
+ target_record_id=target_record_id,
+ extra_dict=extra_dict,
+ )
+ return task
+
+ def rebuild_start_stop(
+ self, session, action_dict, version, nsr_id, *args, **kwargs
+ ):
+ task_index = 0
+ extra_dict = {}
+ now = time()
+ action_id = action_dict.get("action_id", str(uuid4()))
+ step = ""
+ logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
+ self.logger.debug(logging_text + "Enter")
+
+ action = list(action_dict.keys())[0]
+ task_dict = action_dict.get(action)
+ vim_vm_id = action_dict.get(action).get("vim_vm_id")
+
+ if action_dict.get("stop"):
+ action = "shutoff"
+ db_new_tasks = []
+ try:
+ step = "lock the operation & do task creation"
+ with self.write_lock:
+ extra_dict["params"] = {
+ "vim_vm_id": vim_vm_id,
+ "action": action,
+ }
+ task = self.rebuild_start_stop_task(
+ task_dict["vdu_id"],
+ task_dict["vnf_id"],
+ task_dict["vdu_index"],
+ action_id,
+ nsr_id,
+ task_index,
+ task_dict["target_vim"],
+ extra_dict,
+ )
+ db_new_tasks.append(task)
+ step = "upload Task to db"
+ self.upload_all_tasks(
+ db_new_tasks=db_new_tasks,
+ now=now,
+ )
+ self.logger.debug(
+ logging_text + "Exit. Created {} tasks".format(len(db_new_tasks))
+ )
+ return (
+ {"status": "ok", "nsr_id": nsr_id, "action_id": action_id},
+ action_id,
+ True,
+ )
+ except Exception as e:
+ if isinstance(e, (DbException, NsException)):
+ self.logger.error(
+ logging_text + "Exit Exception while '{}': {}".format(step, e)
+ )
+ else:
+ e = traceback_format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception while '{}': {}".format(step, e),
+ exc_info=True,
+ )
+ raise NsException(e)
+
def get_deploy(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
nsrs = self.db.get_list("nsrs", {})
return_data = []
exc_info=True,
)
raise NsException(e)
+
+ def verticalscale_task(
+ self, vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict
+ ):
+ target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
+ self._assign_vim(target_vim)
+ target_record = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu_index)
+ target_record_id = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu["id"])
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+
+ task = Ns._create_task(
+ deployment_info=deployment_info,
+ target_id=target_vim,
+ item="verticalscale",
+ action="EXEC",
+ target_record=target_record,
+ target_record_id=target_record_id,
+ extra_dict=extra_dict,
+ )
+ return task
+
+ def verticalscale(self, session, indata, version, nsr_id, *args, **kwargs):
+ task_index = 0
+ extra_dict = {}
+ now = time()
+ action_id = indata.get("action_id", str(uuid4()))
+ step = ""
+ logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
+ self.logger.debug(logging_text + "Enter")
+ try:
+ VnfFlavorData = indata.get("changeVnfFlavorData")
+ vnf_instance_id = VnfFlavorData["vnfInstanceId"]
+ step = "Getting vnfrs from db"
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnf_instance_id})
+ vduid = VnfFlavorData["additionalParams"]["vduid"]
+ vduCountIndex = VnfFlavorData["additionalParams"]["vduCountIndex"]
+ virtualMemory = VnfFlavorData["additionalParams"]["virtualMemory"]
+ numVirtualCpu = VnfFlavorData["additionalParams"]["numVirtualCpu"]
+ sizeOfStorage = VnfFlavorData["additionalParams"]["sizeOfStorage"]
+ flavor_dict = {
+ "name": vduid + "-flv",
+ "ram": virtualMemory,
+ "vcpus": numVirtualCpu,
+ "disk": sizeOfStorage,
+ }
+ db_new_tasks = []
+ step = "Creating Tasks for vertical scaling"
+ with self.write_lock:
+ for vdu_index, vdu in enumerate(db_vnfr["vdur"]):
+ if (
+ vdu["vdu-id-ref"] == vduid
+ and vdu["count-index"] == vduCountIndex
+ ):
+ extra_dict["params"] = {
+ "vim_vm_id": vdu["vim-id"],
+ "flavor_dict": flavor_dict,
+ }
+ task = self.verticalscale_task(
+ vdu,
+ db_vnfr,
+ vdu_index,
+ action_id,
+ nsr_id,
+ task_index,
+ extra_dict,
+ )
+ db_new_tasks.append(task)
+ task_index += 1
+ break
+ self.upload_all_tasks(
+ db_new_tasks=db_new_tasks,
+ now=now,
+ )
+ self.logger.debug(
+ logging_text + "Exit. Created {} tasks".format(len(db_new_tasks))
+ )
+ return (
+ {"status": "ok", "nsr_id": nsr_id, "action_id": action_id},
+ action_id,
+ True,
+ )
+ except Exception as e:
+ if isinstance(e, (DbException, NsException)):
+ self.logger.error(
+ logging_text + "Exit Exception while '{}': {}".format(step, e)
+ )
+ else:
+ e = traceback_format_exc()
+ self.logger.critical(
+ logging_text + "Exit Exception while '{}': {}".format(step, e),
+ exc_info=True,
+ )
+ raise NsException(e)