# devops-stages/stage-build.sh
#
-FROM ubuntu:20.04
+FROM ubuntu:22.04
ARG APT_PROXY
RUN if [ ! -z $APT_PROXY ] ; then \
python3 \
python3-all \
python3-dev \
- python3-setuptools
-
-RUN python3 -m easy_install pip==21.3.1
-RUN pip install tox==3.24.5
+ python3-setuptools \
+ python3-pip \
+ tox
ENV LC_ALL C.UTF-8
ENV LANG C.UTF-8
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+from copy import deepcopy
+from dataclasses import dataclass
+import logging
+from os import makedirs, path
+from pprint import pformat
+import random
+import threading
+from typing import Optional
+
+from importlib_metadata import entry_points
+from osm_common import dbmemory, dbmongo
+from osm_common.dbbase import DbException
+from osm_ng_ro.ns_thread import ConfigValidate
+from osm_ro_plugin import vimconn
+import yaml
+from yaml.representer import RepresenterError
+
+
+openStackvmStatusOk = [
+ "ACTIVE",
+ "PAUSED",
+ "SUSPENDED",
+ "SHUTOFF",
+ "BUILD",
+]
+
+openStacknetStatusOk = [
+ "ACTIVE",
+ "PAUSED",
+ "BUILD",
+]
+
+db_vim_collection = "vim_accounts"
+vim_type = "openstack"
+ro_task_collection = "ro_tasks"
+plugin_name = "rovim_openstack"
+monitoring_task = None
+
+
+@dataclass
+class VmToMonitor:
+ vm_id: str
+ target_record: str
+
+
+@dataclass
+class VimToMonitor:
+ vim_id: str
+ vms: list
+
+
+class MonitorVmsException(Exception):
+ def __init__(self, message):
+ super(Exception, self).__init__(message)
+
+
+class MonitorDbException(Exception):
+ def __init__(self, message):
+ super(Exception, self).__init__(message)
+
+
+class MonitorVimException(Exception):
+ def __init__(self, message):
+ super(Exception, self).__init__(message)
+
+
+class SafeDumper(yaml.SafeDumper):
+ def represent_data(self, data):
+ if isinstance(data, dict) and data.__class__ != dict:
+ # A solution to convert subclasses of dict to dicts which is not handled by pyyaml.
+ data = dict(data.items())
+ return super(SafeDumper, self).represent_data(data)
+
+
+class MonitorVms:
+ def __init__(self, config: dict):
+ self.config = config
+ self.db = None
+ self.refresh_config = ConfigValidate(config)
+ self.my_vims = {}
+ self.plugins = {}
+ self.logger = logging.getLogger("ro.monitor")
+ self.connect_db()
+ self.db_vims = self.get_db_vims()
+ self.load_vims()
+
+ def load_vims(self) -> None:
+ for vim in self.db_vims:
+ if vim["_id"] not in self.my_vims:
+ self._load_vim(vim["_id"])
+
+ def connect_db(self) -> None:
+ """Connect to the Database.
+
+ Raises:
+ MonitorDbException
+ """
+ try:
+ if not self.db:
+ if self.config["database"]["driver"] == "mongo":
+ self.db = dbmongo.DbMongo()
+ self.db.db_connect(self.config["database"])
+ elif self.config["database"]["driver"] == "memory":
+ self.db = dbmemory.DbMemory()
+ self.db.db_connect(self.config["database"])
+ else:
+ raise MonitorDbException(
+ "Invalid configuration param '{}' at '[database]':'driver'".format(
+ self.config["database"]["driver"]
+ )
+ )
+ except (DbException, MonitorDbException, ValueError) as e:
+ raise MonitorDbException(str(e))
+
+ def get_db_vims(self) -> list:
+ """Get all VIM accounts which types are Openstack."""
+ return self.db.get_list(db_vim_collection, {"vim_type": vim_type})
+
+ def find_ro_tasks_to_monitor(self) -> list:
+ """Get the ro_tasks which belongs to vdu and status DONE."""
+ return self.db.get_list(
+ ro_task_collection,
+ q_filter={
+ "tasks.status": ["DONE"],
+ "tasks.item": ["vdu"],
+ },
+ )
+
+ @staticmethod
+ def _initialize_target_vim(vim_module_conn, vim: dict) -> object:
+ """Create the VIM connector object with given vim details.
+
+ Args:
+ vim_module_conn (class): VIM connector class
+ vim (dict): VIM details to initialize VIM connecter object
+
+ Returns:
+ VIM connector (object): VIM connector object
+ """
+ return vim_module_conn(
+ uuid=vim["_id"],
+ name=vim["name"],
+ tenant_id=vim.get("vim_tenant_id"),
+ tenant_name=vim.get("vim_tenant_name"),
+ url=vim["vim_url"],
+ url_admin=None,
+ user=vim["vim_user"],
+ passwd=vim["vim_password"],
+ config=vim.get("config") or {},
+ persistent_info={},
+ )
+
+ def _load_vim(self, target_id) -> None:
+ """Load or reload a vim_account.
+ Read content from database, load the plugin if not loaded, then it fills my_vims dictionary.
+
+ Args:
+ target_id (str): ID of vim account
+
+ Raises:
+ MonitorVimException
+ """
+ try:
+ vim = self.db.get_one(db_vim_collection, {"_id": target_id})
+ schema_version = vim.get("schema_version")
+ self.db.encrypt_decrypt_fields(
+ vim,
+ "decrypt",
+ fields=("password", "secret"),
+ schema_version=schema_version,
+ salt=target_id,
+ )
+ self._process_vim_config(target_id, vim)
+ vim_module_conn = self._load_plugin(plugin_name)
+ self.my_vims[target_id] = self._initialize_target_vim(vim_module_conn, vim)
+ self.logger.debug(
+ "Connector loaded for {}, plugin={}".format(target_id, plugin_name)
+ )
+ except (
+ DbException,
+ IOError,
+ AttributeError,
+ MonitorDbException,
+ MonitorVimException,
+ TypeError,
+ ) as e:
+ raise MonitorVimException(
+ "Cannot load {} plugin={}: {}".format(target_id, plugin_name, str(e))
+ )
+
+ @staticmethod
+ def _process_vim_config(target_id: str, db_vim: dict) -> None:
+ """
+ Process vim config, creating vim configuration files as ca_cert
+ Args:
+ target_id (str): vim id
+ db_vim (dict): Vim dictionary obtained from database
+
+ Raises:
+ MonitorVimException
+ """
+ if not db_vim.get("config"):
+ return
+ file_name = ""
+ work_dir = "/app/osm_ro/certs"
+ try:
+ if db_vim["config"].get("ca_cert_content"):
+ file_name = f"{work_dir}/{target_id}:{random.randint(0, 99999)}"
+
+ if not path.isdir(file_name):
+ makedirs(file_name)
+
+ file_name = file_name + "/ca_cert"
+
+ with open(file_name, "w") as f:
+ f.write(db_vim["config"]["ca_cert_content"])
+ del db_vim["config"]["ca_cert_content"]
+ db_vim["config"]["ca_cert"] = file_name
+
+ except (FileNotFoundError, IOError, OSError) as e:
+ raise MonitorVimException(
+ "Error writing to file '{}': {}".format(file_name, e)
+ )
+
+ def _load_plugin(self, name: str = "rovim_openstack", type: str = "vim"):
+ """Finds the proper VIM connector and returns VIM connector class name.
+ Args:
+ name (str): rovim_openstack
+ type (str): vim
+
+ Returns:
+ VIM connector class name (class)
+
+ Raises:
+ MonitorVimException
+ """
+ try:
+ if name in self.plugins:
+ return self.plugins[name]
+
+ for ep in entry_points(group="osm_ro{}.plugins".format(type), name=name):
+ self.plugins[name] = ep.load()
+ return self.plugins[name]
+
+ except Exception as e:
+ raise MonitorVimException("Cannot load plugin osm_{}: {}".format(name, e))
+
+ @staticmethod
+ def create_vm_to_monitor(ro_task: dict) -> Optional[object]:
+ """Create VM using dataclass with ro task details.
+
+ Args:
+ ro_task (dict): Details of ro_task
+
+ Returns:
+ VmToMonitor (object)
+ """
+ if not ro_task:
+ return
+ return VmToMonitor(
+ ro_task["vim_info"]["vim_id"], ro_task["tasks"][0]["target_record"]
+ )
+
+ @staticmethod
+ def add_vm_to_existing_vim(
+ vims_to_monitor: list, ro_task: dict, target_vim: str
+ ) -> bool:
+ """Add VmToMonitor to existing VIM list.
+
+ Args:
+ vims_to_monitor (list): List of VIMs to monitor
+ ro_task (dict): ro_task details
+ target_vim (str): ID of target VIM
+
+ Returns:
+ Boolean If VM is added to VIM list, it returns True else False.
+ """
+ for vim in vims_to_monitor:
+ if target_vim == vim.vim_id:
+ vm_to_monitor = MonitorVms.create_vm_to_monitor(ro_task)
+ vim.vms.append(vm_to_monitor)
+ return True
+ return False
+
+ @staticmethod
+ def add_new_vim_for_monitoring(
+ vims_to_monitor: list, ro_task: dict, target_vim: str
+ ) -> None:
+ """Create a new VIM object and add to vims_to_monitor list.
+
+ Args:
+ vims_to_monitor (list): List of VIMs to monitor
+ ro_task (dict): ro_task details
+ target_vim (str): ID of target VIM
+ """
+ vim_to_monitor = VimToMonitor(target_vim, [])
+ vm_to_monitor = MonitorVms.create_vm_to_monitor(ro_task)
+ vim_to_monitor.vms.append(vm_to_monitor)
+ vims_to_monitor.append(vim_to_monitor)
+
+ @staticmethod
+ def prepare_vims_to_monitor(
+ vims_to_monitor: list, ro_task: dict, target_vim: str
+ ) -> None:
+ """If the required VIM exists in the vims_to_monitor list, add VM under related VIM,
+ otherwise create a new VIM object and add VM to this new created VIM.
+
+ Args:
+ vims_to_monitor (list): List of VIMs to monitor
+ ro_task (dict): ro_task details
+ target_vim (str): ID of target VIM
+ """
+ if not MonitorVms.add_vm_to_existing_vim(vims_to_monitor, ro_task, target_vim):
+ MonitorVms.add_new_vim_for_monitoring(vims_to_monitor, ro_task, target_vim)
+
+ def _get_db_paths(self, target_record: str) -> tuple:
+ """Get the database paths and info of target VDU and VIM.
+
+ Args:
+ target_record (str): A string which includes vnfr_id, vdur_id, vim_id
+
+ Returns:
+ (vim_info_path: str, vim_id: str, vnfr_id: str, vdur_path:str, vdur_index: int, db_vnfr: dict) tuple
+
+ Raises:
+ MonitorVmsException
+ """
+ try:
+ [_, vnfr_id, vdur_info, vim_id] = target_record.split(":")
+ vim_info_path = vdur_info + ":" + vim_id
+ vdur_path = vim_info_path.split(".vim_info.")[0]
+ vdur_index = int(vdur_path.split(".")[1])
+ db_vnfr = self.db.get_one("vnfrs", {"_id": vnfr_id}, fail_on_empty=False)
+ return vim_info_path, vim_id, vnfr_id, vdur_path, vdur_index, db_vnfr
+ except (DbException, ValueError) as e:
+ raise MonitorVmsException(str(e))
+
+ @staticmethod
+ def _check_if_vdur_vim_info_exists(
+ db_vnfr: dict, vdur_index: int
+ ) -> Optional[bool]:
+ """Check if VNF record and vdur vim_info record exists.
+
+ Args:
+ db_vnfr (dict): VNF record
+ vdur_index (int): index of vdur under db_vnfr["vdur"]
+
+ Returns:
+ Boolean True if VNF record and vdur vim_info record exists.
+ """
+ try:
+ if db_vnfr and db_vnfr.get("vdur") and isinstance(vdur_index, int):
+ if db_vnfr["vdur"][vdur_index] and db_vnfr["vdur"][vdur_index].get(
+ "vim_info"
+ ):
+ return True
+ except IndexError:
+ return
+
+ def _get_vm_data_from_db(self, vm_to_monitor: object) -> Optional[tuple]:
+ """Get the required DB path and VIM info data from database.
+
+ Args:
+ vm_to_monitor (object): Includes vm_id and target record in DB.
+
+ Returns:
+ (vdur_path: str, vdur_vim_info_update: dict, db_vnfr: dict, existing_vim_info: dict, vnfr_id,vim_info_path: str) (Tuple):
+ Required VM info if _check_if_vdur_vim_info_exists else None
+ """
+ (
+ vim_info_path,
+ vim_id,
+ vnfr_id,
+ vdur_path,
+ vdur_index,
+ db_vnfr,
+ ) = self._get_db_paths(vm_to_monitor.target_record)
+ if not self._check_if_vdur_vim_info_exists(db_vnfr, vdur_index):
+ return
+
+ existing_vim_info = db_vnfr["vdur"][vdur_index]["vim_info"].get("vim:" + vim_id)
+ if not existing_vim_info:
+ return
+
+ vdur_vim_info_update = deepcopy(existing_vim_info)
+ return (
+ vdur_path,
+ vdur_vim_info_update,
+ db_vnfr,
+ existing_vim_info,
+ vnfr_id,
+ vim_info_path,
+ )
+
+ @staticmethod
+ def update_vim_info_for_deleted_vm(vdur_vim_info_update: dict) -> None:
+ """Updates the vdur_vim_info_update to report that VM is deleted.
+
+ Args:
+ vdur_vim_info_update (dict): Dictionary to be updated and used to update VDUR later.
+ """
+ vdur_vim_info_update.update(
+ {
+ "vim_status": "DELETED",
+ "vim_message": "Deleted externally",
+ "vim_id": None,
+ "vim_name": None,
+ "interfaces": None,
+ }
+ )
+
+ def report_deleted_vdur(self, vm_to_monitor: object) -> None:
+ """VM does not exist in the Openstack Cloud so update the VNFR to report VM deletion.
+
+ Args:
+ vm_to_monitor (object): VM needs to be reported as deleted.
+ """
+ vm_data = self._get_vm_data_from_db(vm_to_monitor)
+ if not vm_data:
+ return
+ (
+ vdur_path,
+ vdur_vim_info_update,
+ _,
+ existing_vim_info,
+ vnfr_id,
+ vim_info_path,
+ ) = vm_data
+ self.update_vim_info_for_deleted_vm(vdur_vim_info_update)
+ vdur_update = {
+ vdur_path + ".status": "DELETED",
+ }
+
+ if existing_vim_info != vdur_vim_info_update:
+ # VNFR record is updated one time upon VM deletion.
+ self.logger.info(f"Reporting deletion of VM: {vm_to_monitor.vm_id}")
+ self.backup_vdu_interfaces(vdur_vim_info_update)
+ all_updates = [vdur_update, {vim_info_path: vdur_vim_info_update}]
+ self.update_in_database(all_updates, vnfr_id)
+ self.logger.info(f"Updated vnfr for vm_id: {vm_to_monitor.vm_id}.")
+
+ def update_vnfrs(self, servers: list, ports: dict, vms_to_monitor: list) -> None:
+ """Update the VDURs according to the latest information provided by servers list.
+
+ Args:
+ servers (list): List of existing VMs comes from single Openstack VIM account
+ ports (dict): List of all ports comes from single Openstack VIM account
+ vms_to_monitor (list): List of VMs to be monitored and updated.
+ """
+ for vm_to_monitor in vms_to_monitor:
+ server = next(
+ filter(lambda server: server.id == vm_to_monitor.vm_id, servers), None
+ )
+ if server:
+ self.report_vdur_updates(server, vm_to_monitor, ports)
+ else:
+ self.report_deleted_vdur(vm_to_monitor)
+
+ def serialize(self, value: dict) -> Optional[str]:
+ """Serialization of python basic types.
+ In the case value is not serializable a message will be logged.
+
+ Args:
+ value (dict/str): Data to serialize
+
+ Returns:
+ serialized_value (str, yaml)
+ """
+ if isinstance(value, str):
+ return value
+ try:
+ return yaml.dump(
+ value, Dumper=SafeDumper, default_flow_style=True, width=256
+ )
+ except RepresenterError:
+ self.logger.info(
+ "The following entity cannot be serialized in YAML:\n\n%s\n\n",
+ pformat(value),
+ exc_info=True,
+ )
+ return str(value)
+
+ def _get_server_info(self, server: object) -> str:
+ """Get the server info, extract some fields and returns info as string.
+
+ Args:
+ server (object): VM info object
+
+ Returns:
+ server_info (string)
+ """
+ server_info = server.to_dict()
+ server_info.pop("OS-EXT-SRV-ATTR:user_data", None)
+ server_info.pop("user_data", None)
+ return self.serialize(server_info)
+
+ def check_vm_status_updates(
+ self,
+ vdur_vim_info_update: dict,
+ vdur_update: dict,
+ server: object,
+ vdur_path: str,
+ ) -> None:
+ """Fills up dictionaries to update VDUR according to server.status.
+
+ Args:
+ vdur_vim_info_update (dict): Dictionary which keeps the differences of vdur_vim_info
+ vdur_update (dict): Dictionary which keeps the differences of vdur
+ server (server): VM info
+ vdur_path (str): Path of VDUR in DB
+ """
+ if server.status in openStackvmStatusOk:
+ vdur_vim_info_update["vim_status"] = vdur_update[
+ vdur_path + ".status"
+ ] = server.status
+
+ else:
+ vdur_vim_info_update["vim_status"] = vdur_update[
+ vdur_path + ".status"
+ ] = server.status
+ vdur_vim_info_update["vim_message"] = "VIM status reported " + server.status
+
+ vdur_vim_info_update["vim_details"] = self._get_server_info(server)
+ vdur_vim_info_update["vim_id"] = server.id
+ vdur_vim_info_update["vim_name"] = vdur_update[
+ vdur_path + ".name"
+ ] = server.name
+
+ @staticmethod
+ def get_interface_info(
+ ports: dict, interface: dict, server: object
+ ) -> Optional[dict]:
+ """Get the updated port info regarding with existing interface of server.
+
+ Args:
+ ports (dict): List of all ports belong to single VIM account
+ interface (dict): Existing interface info which is taken from DB
+ server (object): Server info
+
+ Returns:
+ port (dict): The updated port info related to existing interface of server
+ """
+ return next(
+ filter(
+ lambda port: port.get("id") == interface.get("vim_interface_id")
+ and port.get("device_id") == server.id,
+ ports["ports"],
+ ),
+ None,
+ )
+
+ @staticmethod
+ def check_vlan_pci_updates(
+ interface_info: dict, index: int, vdur_vim_info_update: dict
+ ) -> None:
+ """If interface has pci and vlan, update vdur_vim_info dictionary with the refreshed data.
+
+ Args:
+ interface_info (dict): Refreshed interface info
+ index (int): Index of interface in VDUR
+ vdur_vim_info_update (dict): Dictionary to be updated and used to update VDUR later.
+ """
+ if interface_info.get("binding:profile") and interface_info[
+ "binding:profile"
+ ].get("pci_slot"):
+ pci = interface_info["binding:profile"]["pci_slot"]
+ vdur_vim_info_update["interfaces"][index]["pci"] = pci
+
+ if interface_info.get("binding:vif_details"):
+ vdur_vim_info_update["interfaces"][index]["vlan"] = interface_info[
+ "binding:vif_details"
+ ].get("vlan")
+
+ @staticmethod
+ def check_vdur_interface_updates(
+ vdur_update: dict,
+ vdur_path: str,
+ index: int,
+ interface_info: dict,
+ old_interface: dict,
+ vnfr_update: dict,
+ vnfr_id: str,
+ ) -> None:
+ """Updates the vdur_update dictionary which stores differences between the latest interface data and data in DB.
+
+ Args:
+ vdur_update (dict): Dictionary used to store vdur updates
+ vdur_path (str): VDUR record path in DB
+ index (int): Index of interface in VDUR
+ interface_info (dict): Refreshed interface info
+ old_interface (dict): The previous interface info comes from DB
+ vnfr_update (dict): VDUR record path in DB
+ vnfr_id (str): VNFR ID
+ """
+ current_ip_address = MonitorVms._get_current_ip_address(interface_info)
+ if current_ip_address:
+ vdur_update[
+ vdur_path + ".interfaces." + str(index) + ".ip-address"
+ ] = current_ip_address
+
+ if old_interface.get("mgmt_vdu_interface"):
+ vdur_update[vdur_path + ".ip-address"] = current_ip_address
+
+ if old_interface.get("mgmt_vnf_interface"):
+ vnfr_update[vnfr_id + ".ip-address"] = current_ip_address
+
+ vdur_update[
+ vdur_path + ".interfaces." + str(index) + ".mac-address"
+ ] = interface_info.get("mac_address")
+
+ @staticmethod
+ def _get_current_ip_address(interface_info: dict) -> Optional[str]:
+ if interface_info.get("fixed_ips") and interface_info["fixed_ips"][0]:
+ return interface_info["fixed_ips"][0].get("ip_address")
+
+ @staticmethod
+ def backup_vdu_interfaces(vdur_vim_info_update: dict) -> None:
+ """Backup VDU interfaces as interfaces_backup.
+
+ Args:
+ vdur_vim_info_update (dict): Dictionary used to store vdur_vim_info updates
+ """
+ if vdur_vim_info_update.get("interfaces") and not vdur_vim_info_update.get(
+ "vim_message"
+ ):
+ vdur_vim_info_update["interfaces_backup"] = vdur_vim_info_update[
+ "interfaces"
+ ]
+
+ def update_vdur_vim_info_interfaces(
+ self,
+ vdur_vim_info_update: dict,
+ index: int,
+ interface_info: dict,
+ server: object,
+ ) -> None:
+ """Update the vdur_vim_info dictionary with the latest interface info.
+
+ Args:
+ vdur_vim_info_update (dict): The dictionary which is used to store vdur_vim_info updates
+ index (int): Interface index
+ interface_info (dict): The latest interface info
+ server (object): The latest VM info
+ """
+ if not (
+ vdur_vim_info_update.get("interfaces")
+ and vdur_vim_info_update["interfaces"][index]
+ ):
+ raise MonitorVmsException("Existing interfaces info could not found.")
+
+ vdur_vim_info_update["interfaces"][index].update(
+ {
+ "mac_address": interface_info["mac_address"],
+ "ip_address": interface_info["fixed_ips"][0].get("ip_address")
+ if interface_info.get("fixed_ips")
+ else None,
+ "vim_net_id": interface_info["network_id"],
+ "vim_info": self.serialize(interface_info),
+ "compute_node": server.to_dict()["OS-EXT-SRV-ATTR:host"]
+ if server.to_dict().get("OS-EXT-SRV-ATTR:host")
+ else None,
+ }
+ )
+
+ def prepare_interface_updates(
+ self,
+ vdur_vim_info_update: dict,
+ index: int,
+ interface_info: dict,
+ server: object,
+ vdur_path: str,
+ vnfr_update: dict,
+ old_interface: dict,
+ vdur_update: dict,
+ vnfr_id: str,
+ ) -> None:
+ """Updates network related info in vdur_vim_info and vdur by using the latest interface info.
+
+ Args:
+ vdur_vim_info_update (dict): Dictionary used to store vdur_vim_info updates
+ index (int): Interface index
+ interface_info (dict): The latest interface info
+ server (object): The latest VM info
+ vdur_path (str): VDUR record path in DB
+ vnfr_update (dict): VDUR record path in DB
+ old_interface (dict): The previous interface info comes from DB
+ vdur_update (dict): Dictionary used to store vdur updates
+ vnfr_id (str): VNFR ID
+ """
+ self.update_vdur_vim_info_interfaces(
+ vdur_vim_info_update, index, interface_info, server
+ )
+ self.check_vlan_pci_updates(interface_info, index, vdur_vim_info_update)
+ self.check_vdur_interface_updates(
+ vdur_update,
+ vdur_path,
+ index,
+ interface_info,
+ old_interface,
+ vnfr_update,
+ vnfr_id,
+ )
+
+ def check_vm_interface_updates(
+ self,
+ server: object,
+ existing_vim_info: dict,
+ ports: dict,
+ vdur_vim_info_update: dict,
+ vdur_update: dict,
+ vdur_path: str,
+ vnfr_update: dict,
+ vnfr_id: str,
+ ) -> None:
+ """Gets the refreshed interfaces info of server and updates the VDUR if interfaces exist,
+ otherwise reports that interfaces are deleted.
+
+ Args:
+ server (object): The latest VM info
+ existing_vim_info (dict): VM info details comes from DB
+ ports (dict): All ports info belongs to single VIM account
+ vdur_vim_info_update (dict): Dictionary used to store vdur_vim_info updates
+ vdur_update (dict): Dictionary used to store vdur updates
+ vdur_path (str): VDUR record path in DB
+ vnfr_update (dict): VDUR record path in DB
+ vnfr_id (str): VNFR ID
+ """
+ for index, old_interface in enumerate(existing_vim_info["interfaces"]):
+ interface_info = self.get_interface_info(ports, old_interface, server)
+ if not interface_info:
+ vdur_vim_info_update[
+ "vim_message"
+ ] = f"Interface {old_interface['vim_interface_id']} deleted externally."
+
+ else:
+ if interface_info.get("status") in openStacknetStatusOk:
+ self.prepare_interface_updates(
+ vdur_vim_info_update,
+ index,
+ interface_info,
+ server,
+ vdur_path,
+ vnfr_update,
+ old_interface,
+ vdur_update,
+ vnfr_id,
+ )
+
+ else:
+ vdur_vim_info_update["vim_message"] = (
+ f"Interface {old_interface['vim_interface_id']} status: "
+ + interface_info.get("status")
+ )
+
+ def update_in_database(self, all_updates: list, vnfr_id: str) -> None:
+ """Update differences in VNFR.
+
+ Args:
+ all_updates (list): List of dictionaries which includes differences
+ vnfr_id (str): VNF record ID
+
+ Raises:
+ MonitorDbException
+ """
+ try:
+ for updated_dict in all_updates:
+ if updated_dict:
+ self.db.set_list(
+ "vnfrs",
+ update_dict=updated_dict,
+ q_filter={"_id": vnfr_id},
+ )
+ except DbException as e:
+ raise MonitorDbException(
+ f"Error while updating differences in VNFR {str(e)}"
+ )
+
+ def report_vdur_updates(
+ self, server: object, vm_to_monitor: object, ports: dict
+ ) -> None:
+ """Report VDU updates by changing the VDUR records in DB.
+
+ Args:
+ server (object): Refreshed VM info
+ vm_to_monitor (object): VM to be monitored
+ ports (dict): Ports dict includes all ports details regarding with single VIM account
+ """
+ vm_data = self._get_vm_data_from_db(vm_to_monitor)
+ if not vm_data:
+ return
+ (
+ vdur_path,
+ vdur_vim_info_update,
+ _,
+ existing_vim_info,
+ vnfr_id,
+ vim_info_path,
+ ) = vm_data
+ vdur_update, vnfr_update = {}, {}
+
+ self.check_vm_status_updates(
+ vdur_vim_info_update, vdur_update, server, vdur_path
+ )
+
+ self.check_vm_interface_updates(
+ server,
+ existing_vim_info,
+ ports,
+ vdur_vim_info_update,
+ vdur_update,
+ vdur_path,
+ vnfr_update,
+ vnfr_id,
+ )
+ # Update vnfr in MongoDB if there are differences
+ if existing_vim_info != vdur_vim_info_update:
+ self.logger.info(f"Reporting status updates of VM: {vm_to_monitor.vm_id}.")
+ self.backup_vdu_interfaces(vdur_vim_info_update)
+ all_updates = [
+ vdur_update,
+ {vim_info_path: vdur_vim_info_update},
+ vnfr_update,
+ ]
+ self.update_in_database(all_updates, vnfr_id)
+ self.logger.info(f"Updated vnfr for vm_id: {server.id}.")
+
+ def run(self) -> None:
+ """Perfoms the periodic updates of Openstack VMs by sending only two requests to Openstack APIs
+ for each VIM account (in order to get details of all servers, all ports).
+
+ Raises:
+ MonitorVmsException
+ """
+ try:
+ # If there is not any Openstack type VIM account in DB or VM status updates are disabled by config,
+ # Openstack VMs will not be monitored.
+ if not self.db_vims or self.refresh_config.active == -1:
+ return
+
+ ro_tasks_to_monitor = self.find_ro_tasks_to_monitor()
+ db_vims = [vim["_id"] for vim in self.db_vims]
+ vims_to_monitor = []
+
+ for ro_task in ro_tasks_to_monitor:
+ _, _, target_vim = ro_task["target_id"].partition(":")
+ if target_vim in db_vims:
+ self.prepare_vims_to_monitor(vims_to_monitor, ro_task, target_vim)
+
+ for vim in vims_to_monitor:
+ all_servers, all_ports = self.my_vims[vim.vim_id].get_monitoring_data()
+ self.update_vnfrs(all_servers, all_ports, vim.vms)
+ except (
+ DbException,
+ MonitorDbException,
+ MonitorVimException,
+ MonitorVmsException,
+ ValueError,
+ KeyError,
+ TypeError,
+ AttributeError,
+ vimconn.VimConnException,
+ ) as e:
+ raise MonitorVmsException(
+ f"Exception while monitoring Openstack VMs: {str(e)}"
+ )
+
+
+def start_monitoring(config: dict):
+ global monitoring_task
+ if not (config and config.get("period")):
+ raise MonitorVmsException("Wrong configuration format is provided.")
+ instance = MonitorVms(config)
+ period = instance.refresh_config.active
+ instance.run()
+ monitoring_task = threading.Timer(period, start_monitoring, args=(config,))
+ monitoring_task.start()
+
+
+def stop_monitoring():
+ global monitoring_task
+ if monitoring_task:
+ monitoring_task.cancel()
return extra_dict
- @staticmethod
- def _ip_profile_to_ro(
- ip_profile: Dict[str, Any],
- ) -> Dict[str, Any]:
- """[summary]
-
- Args:
- ip_profile (Dict[str, Any]): [description]
-
- Returns:
- Dict[str, Any]: [description]
- """
- if not ip_profile:
- return None
-
- ro_ip_profile = {
- "ip_version": "IPv4"
- if "v4" in ip_profile.get("ip-version", "ipv4")
- else "IPv6",
- "subnet_address": ip_profile.get("subnet-address"),
- "gateway_address": ip_profile.get("gateway-address"),
- "dhcp_enabled": ip_profile.get("dhcp-params", {}).get("enabled", False),
- "dhcp_start_address": ip_profile.get("dhcp-params", {}).get(
- "start-address", None
- ),
- "dhcp_count": ip_profile.get("dhcp-params", {}).get("count", None),
- }
-
- if ip_profile.get("dns-server"):
- ro_ip_profile["dns_address"] = ";".join(
- [v["address"] for v in ip_profile["dns-server"] if v.get("address")]
- )
-
- if ip_profile.get("security-group"):
- ro_ip_profile["security_group"] = ip_profile["security-group"]
-
- return ro_ip_profile
-
@staticmethod
def _process_net_params(
target_vld: Dict[str, Any],
"net_name": (
f"{indata.get('name')[:16]}-{target_vld.get('name', target_vld.get('id'))[:16]}"
),
- "ip_profile": Ns._ip_profile_to_ro(vim_info.get("ip_profile")),
+ "ip_profile": vim_info.get("ip_profile"),
"provider_network_profile": vim_info.get("provider_network"),
}
vnf_preffix = "vnfrs:{}".format(vnfr_id)
ns_preffix = "nsrs:{}".format(nsr_id)
image_text = ns_preffix + ":image." + target_vdu["ns-image-id"]
- flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
- extra_dict = {"depends_on": [image_text, flavor_text]}
+ extra_dict = {"depends_on": [image_text]}
net_list = []
persistent_root_disk = {}
persistent_ordinary_disk = {}
vdu_instantiation_volumes_list = []
+ vdu_instantiation_flavor_id = None
disk_list = []
vnfd_id = vnfr["vnfd-id"]
vnfd = db.get_one("vnfds", {"_id": vnfd_id})
if target_vdu.get("additionalParams"):
vdu_instantiation_volumes_list = (
- target_vdu.get("additionalParams").get("OSM").get("vdu_volumes")
+ target_vdu.get("additionalParams").get("OSM", {}).get("vdu_volumes")
+ )
+ vdu_instantiation_flavor_id = (
+ target_vdu.get("additionalParams").get("OSM", {}).get("vim_flavor_id")
)
+ # flavor id
+ if vdu_instantiation_flavor_id:
+ flavor_id = vdu_instantiation_flavor_id
+ else:
+ flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
+ flavor_id = "TASK-" + flavor_text
+ extra_dict["depends_on"].append(flavor_text)
+
if vdu_instantiation_volumes_list:
# Find the root volumes and add to the disk_list
persistent_root_disk = Ns.find_persistent_root_volumes(
"description": target_vdu["vdu-name"],
"start": True,
"image_id": "TASK-" + image_text,
- "flavor_id": "TASK-" + flavor_text,
+ "flavor_id": flavor_id,
"affinity_group_list": affinity_group_list,
"net_list": net_list,
"cloud_config": cloud_config or None,
# Check each VNF of the target
for target_vnf in target_list:
- # Find this VNF in the list from DB
- vnfr_id = target_vnf.get("vnfInstanceId", None)
- if vnfr_id:
- existing_vnf = db_vnfrs.get(vnfr_id)
- db_record = "vnfrs:{}:{}".format(vnfr_id, db_path)
- # vim_account_id = existing_vnf.get("vim-account-id", "")
+ # Find this VNF in the list from DB, raise exception if vnfInstanceId is not found
+ vnfr_id = target_vnf["vnfInstanceId"]
+ existing_vnf = db_vnfrs.get(vnfr_id)
+ db_record = "vnfrs:{}:{}".format(vnfr_id, db_path)
+ # vim_account_id = existing_vnf.get("vim-account-id", "")
+ target_vdus = target_vnf.get("additionalParams", {}).get("vdu", [])
# Check each VDU of this VNF
- for target_vdu in target_vnf["additionalParams"].get("vdu", None):
+ if not target_vdus:
+ # Create target_vdu_list from DB, if VDUs are not specified
+ target_vdus = []
+ for existing_vdu in existing_vnf.get("vdur"):
+ vdu_name = existing_vdu.get("vdu-name", None)
+ vdu_index = existing_vdu.get("count-index", 0)
+ vdu_to_be_healed = {"vdu-id": vdu_name, "count-index": vdu_index}
+ target_vdus.append(vdu_to_be_healed)
+ for target_vdu in target_vdus:
vdu_name = target_vdu.get("vdu-id", None)
# For multi instance VDU count-index is mandatory
# For single session VDU count-indes is 0
return ro_task_dependency, task_index
raise NsWorkerException("Cannot get depending task {}".format(task_id))
- def update_vm_refresh(self):
+ def update_vm_refresh(self, ro_task):
"""Enables the VM status updates if self.refresh_config.active parameter
- is not -1 and than updates the DB accordingly
+ is not -1 and then updates the DB accordingly
"""
try:
self.logger.debug("Checking if VM status update config")
next_refresh = time.time()
- if self.refresh_config.active == -1:
- next_refresh = -1
- else:
- next_refresh += self.refresh_config.active
+ next_refresh = self._get_next_refresh(ro_task, next_refresh)
if next_refresh != -1:
db_ro_task_update = {}
except Exception as e:
self.logger.error(f"Error updating tasks to enable VM status updates: {e}")
+ def _get_next_refresh(self, ro_task: dict, next_refresh: float):
+ """Decide the next_refresh according to vim type and refresh config period.
+ Args:
+ ro_task (dict): ro_task details
+ next_refresh (float): next refresh time as epoch format
+
+ Returns:
+ next_refresh (float) -1 if vm updates are disabled or vim type is openstack.
+ """
+ target_vim = ro_task["target_id"]
+ vim_type = self.db_vims[target_vim]["vim_type"]
+ if self.refresh_config.active == -1 or vim_type == "openstack":
+ next_refresh = -1
+ else:
+ next_refresh += self.refresh_config.active
+ return next_refresh
+
def _process_pending_tasks(self, ro_task):
ro_task_id = ro_task["_id"]
now = time.time()
elif new_status == "BUILD":
next_refresh += self.refresh_config.build
elif new_status == "DONE":
- if self.refresh_config.active == -1:
- next_refresh = -1
- else:
- next_refresh += self.refresh_config.active
+ next_refresh = self._get_next_refresh(ro_task, next_refresh)
else:
next_refresh += self.refresh_config.error
self._log_ro_task(ro_task, None, None, "TASK_WF", "GET_TASK")
"""
# Check if vim status refresh is enabled again
- self.update_vm_refresh()
+ self.update_vm_refresh(ro_task)
# 0: get task_status_create
lock_object = None
task_status_create = None
new_status, db_vim_info_update = self.item2class[
task["item"]
].new(ro_task, task_index, task_depends)
- # self._create_task(ro_task, task_index, task_depends, db_ro_task_update)
_update_refresh(new_status)
else:
refresh_at = ro_task["vim_info"]["refresh_at"]
from osm_common.msgbase import MsgException
from osm_ng_ro import version as ro_version, version_date as ro_version_date
import osm_ng_ro.html_out as html
+from osm_ng_ro.monitor import start_monitoring, stop_monitoring
from osm_ng_ro.ns import Ns, NsException
from osm_ng_ro.validation import ValidationError
from osm_ng_ro.vim_admin import VimAdminThread
# # start subscriptions thread:
vim_admin_thread = VimAdminThread(config=engine_config, engine=ro_server.ns)
vim_admin_thread.start()
+ start_monitoring(config=engine_config)
+
# # Do not capture except SubscriptionException
# backend = engine_config["authentication"]["backend"]
# terminate vim_admin_thread
if vim_admin_thread:
vim_admin_thread.terminate()
-
+ stop_monitoring()
vim_admin_thread = None
cherrypy.tree.apps["/ro"].root.ns.stop()
cherrypy.log.error("Stopping osm_ng_ro")
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+# The variables that are used in the monitoring tests
+db_vim_collection = "vim_accounts"
+vim_type = "openstack"
+ro_task_collection = "ro_tasks"
+plugin_name = "rovim_openstack"
+mac1_addr = "d0:94:66:ed:fc:e2"
+mac2_addr = "fa:16:3e:0b:84:08"
+ip1_addr = "192.168.22.13"
+vm1_id = "ebd39f37-e607-4bce-9f10-ea4c5635f726"
+vm2_id = "f4404a39-51d5-4cf8-9058-95001e69fdb3"
+vim1_id = target_id = "55b2219a-7bb9-4644-9612-980dada84e83"
+vim2_id = "77b2219a-8bb9-9644-9612-680dada84e83"
+vim3_id = "33b2219a-7bb9-4644-9612-280dada84e83"
+vim4_id = "f239ed93-756b-408e-89f8-fcbf47a9d8f7"
+file_name = "/app/osm_ro/certs/55b2219a-7bb9-4644-9612-980dada84e83:23242"
+net1_id = "21ea5d92-24f1-40ab-8d28-83230e277a49"
+vnfr_id = "35c034cc-8c5b-48c4-bfa2-17a71577ef19"
+db_vim_cacert = "/app/osm_ro/certs/55b2219a-7bb9-4644-9612-980dada84e83:23242/ca_cert"
+vdur_path = "vdur.0"
+vims_to_monitor = []
+vim_info_path = "vdur.0.vim_info.vim:f239ed93-756b-408e-89f8-fcbf47a9d8f7"
+server_other_info = {
+ "admin_state_up": "true",
+ "binding:host_id": "nfvisrv11",
+ "binding:profile": {},
+ "binding:vif_type": "ovs",
+ "binding:vnic_type": "normal",
+ "created_at": "2023-02-22T05:35:46Z",
+}
+deleted_externally = {
+ "vim_status": "DELETED",
+ "vim_message": "Deleted externally",
+ "vim_id": None,
+ "vim_name": None,
+ "interfaces": None,
+}
+interface_with_binding = {
+ "binding:profile": {
+ "physical_network": "physnet1",
+ "pci_slot": "0000:86:17.4",
+ },
+ "binding:vif_details": {
+ "vlan": 400,
+ },
+}
+target_record = "vnfrs:35c034cc-8c5b-48c4-bfa2-17a71577ef19:vdur.0.vim_info.vim:f239ed93-756b-408e-89f8-fcbf47a9d8f7"
+target_record2 = "vnfrs:41e16909-a519-4897-b481-f386e5022425:vdur.0.vim_info.vim:f239ed93-756b-408e-89f8-fcbf47a9d8f7"
+serialized_server_info = "{admin_state_up: true, binding:host_id: nfvisrv11, binding:profile: {}, binding:vif_type: ovs, binding:vnic_type: normal, created_at: 2023-02-22T05:35:46Z}"
+serialized_interface_info = "{fixed_ips: [{ip_address: 192.168.22.13}], mac_address: 'd0:94:66:ed:fc:e2', network_id: 21ea5d92-24f1-40ab-8d28-83230e277a49}"
+config = {
+ "period": {
+ "refresh_active": 60,
+ "refresh_build": 15,
+ "refresh_image": "3600 * 10",
+ "refresh_error": 600,
+ "queue_size": 100,
+ },
+ "database": {"driver": "mongo", "uri": "mongodb://mongo:27017", "name": "osm"},
+ "storage": {"driver": "None", "path": "/app/storage", "loglevel": "DEBUG"},
+}
+old_interface = {
+ "vim_info": "{admin_state_up: true, allowed_address_pairs: [], 'binding:host_id': nfvisrv12, 'binding:profile': {}, 'binding:vif_details': {bridge_name: br-int, connectivity: l2, datapath_type: system, ovs_hybrid_plug: true, port_filter: true}, 'binding:vif_type': ovs, 'binding:vnic_type': normal,\n created_at: '2023-02-18T21:28:52Z', description: '', device_id: ebd39f37-e607-4bce-9f10-ea4c5635f726, device_owner: 'compute:nova', extra_dhcp_opts: [], fixed_ips: [{ip_address: 192.168.251.15, subnet_id: bf950890-8d50-40cc-81ba-afa35db69a19}], id: 4d081f50-e13a-4306-a67e-1edb28d76013,\n mac_address: 'fa:16:3e:85:6c:02', name: vdu-eth0, network_id: 327f5e8e-a383-47c9-80a3-ed45b71d24ca, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 4, security_groups: [1de4b2c2-e4be-4e91-985c-d887e2715949], status: ACTIVE,\n tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2023-02-18T21:28:59Z'}\n",
+ "mac_address": "fa:16:3e:85:6c:02",
+ "vim_net_id": "327f5e8e-a383-47c9-80a3-ed45b71d24ca",
+ "vim_interface_id": "4d081f50-e13a-4306-a67e-1edb28d76013",
+ "compute_node": "nfvisrv12",
+ "pci": None,
+ "vlan": None,
+ "ip_address": "192.168.251.15",
+ "mgmt_vnf_interface": True,
+ "mgmt_vdu_interface": True,
+}
+old_interface2 = {
+ "mgmt_vdu_interface": True,
+ "mgmt_vnf_interface": True,
+}
+interface_info2 = {
+ "fixed_ips": [{"ip_address": ip1_addr}],
+ "mac_address": mac2_addr,
+ "network_id": net1_id,
+}
+sample_vim_info = {
+ "interfaces": [
+ old_interface,
+ ],
+ "interfaces_backup": [
+ {
+ "vim_info": "{admin_state_up: true, allowed_address_pairs: [], 'binding:host_id': nfvisrv12, 'binding:profile': {}, 'binding:vif_details': {bridge_name: br-int, connectivity: l2, datapath_type: system, ovs_hybrid_plug: true, port_filter: true}, 'binding:vif_type': ovs, 'binding:vnic_type': normal,\n created_at: '2023-02-18T21:28:52Z', description: '', device_id: ebd39f37-e607-4bce-9f10-ea4c5635f726, device_owner: 'compute:nova', extra_dhcp_opts: [], fixed_ips: [{ip_address: 192.168.251.15, subnet_id: bf950890-8d50-40cc-81ba-afa35db69a19}], id: 4d081f50-e13a-4306-a67e-1edb28d76013,\n mac_address: 'fa:16:3e:85:6c:02', name: vdu-eth0, network_id: 327f5e8e-a383-47c9-80a3-ed45b71d24ca, port_security_enabled: true, project_id: 71c7971a7cab4b72bd5c10dbe6617f1e, revision_number: 4, security_groups: [1de4b2c2-e4be-4e91-985c-d887e2715949], status: ACTIVE,\n tags: [], tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated_at: '2023-02-18T21:28:59Z'}\n",
+ "mac_address": "fa:16:3e:85:6c:02",
+ "vim_net_id": "327f5e8e-a383-47c9-80a3-ed45b71d24ca",
+ "vim_interface_id": "4d081f50-e13a-4306-a67e-1edb28d76013",
+ "compute_node": "nfvisrv12",
+ "pci": None,
+ "vlan": None,
+ "ip_address": "192.168.251.15",
+ "mgmt_vnf_interface": True,
+ "mgmt_vdu_interface": True,
+ }
+ ],
+ "vim_details": "{'OS-DCF:diskConfig': MANUAL, 'OS-EXT-AZ:availability_zone': nova, 'OS-EXT-SRV-ATTR:host': nfvisrv12, 'OS-EXT-SRV-ATTR:hypervisor_hostname': nfvisrv12, 'OS-EXT-SRV-ATTR:instance_name': instance-000400a6, 'OS-EXT-STS:power_state': 1, 'OS-EXT-STS:task_state': null,\n 'OS-EXT-STS:vm_state': active, 'OS-SRV-USG:launched_at': '2023-02-18T21:28:59.000000', 'OS-SRV-USG:terminated_at': null, accessIPv4: '', accessIPv6: '', addresses: {mgmtnet: [{'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:85:6c:02', 'OS-EXT-IPS:type': fixed, addr: 192.168.251.15,\n version: 4}]}, config_drive: '', created: '2023-02-18T21:28:54Z', flavor: {id: 367fc1eb-bd22-40f8-a519-ed2fb4e5976b, links: [{href: 'http://172.21.247.1:8774/flavors/367fc1eb-bd22-40f8-a519-ed2fb4e5976b', rel: bookmark}]}, hostId: e72dec159231b67a5d4fa37fae67e97051ce9aee003516dadb6a25e4,\n id: ebd39f37-e607-4bce-9f10-ea4c5635f726, image: {id: 919fc71a-6acd-4ee3-8123-739a9abbc2e7, links: [{href: 'http://172.21.247.1:8774/images/919fc71a-6acd-4ee3-8123-739a9abbc2e7', rel: bookmark}]}, key_name: null, links: [{href: 'http://172.21.247.1:8774/v2.1/servers/ebd39f37-e607-4bce-9f10-ea4c5635f726',\n rel: self}, {href: 'http://172.21.247.1:8774/servers/ebd39f37-e607-4bce-9f10-ea4c5635f726', rel: bookmark}], metadata: {}, name: test7-vnf-hackfest_basic-VM-000000, 'os-extended-volumes:volumes_attached': [], progress: 0, security_groups: [{name: default}],\n status: ACTIVE, tenant_id: 71c7971a7cab4b72bd5c10dbe6617f1e, updated: '2023-02-19T21:09:09Z', user_id: f043c84f940b4fc8a01a98714ea97c80}\n",
+ "vim_id": "ebd39f37-e607-4bce-9f10-ea4c5635f726",
+ "vim_message": "Interface 4d081f50-e13a-4306-a67e-1edb28d76013 status: DOWN",
+ "vim_status": "ACTIVE",
+ "vim_name": "test7-vnf-hackfest_basic-VM-000000",
+}
+sample_vnfr = {
+ "_id": "35c034cc-8c5b-48c4-bfa2-17a71577ef19",
+ "id": "35c034cc-8c5b-48c4-bfa2-17a71577ef19",
+ "nsr-id-ref": "ee46620f-cba3-4245-b8be-183ff483bb7e",
+ "created-time": 1676755692.20987,
+ "vnfd-ref": "hackfest_basic-vnf",
+ "vnfd-id": "f1401992-83f4-43cc-ac37-1ad7c1370d03",
+ "vim-account-id": vim4_id,
+ "vca-id": None,
+ "vdur": [
+ {
+ "_id": "faa21fc1-7f27-4a95-93dd-87535ce6b59c",
+ "additionalParams": {
+ "OSM": {
+ "count_index": 0,
+ "member_vnf_index": "vnf",
+ "ns_id": "ee46620f-cba3-4245-b8be-183ff483bb7e",
+ "vdu": {
+ "hackfest_basic-VM-0": {
+ "count_index": 0,
+ "interfaces": {"vdu-eth0": {"name": "vdu-eth0"}},
+ "vdu_id": "hackfest_basic-VM",
+ }
+ },
+ "vdu_id": "hackfest_basic-VM",
+ "vim_account_id": vim4_id,
+ "vnf_id": "35c034cc-8c5b-48c4-bfa2-17a71577ef19",
+ "vnfd_id": "f1401992-83f4-43cc-ac37-1ad7c1370d03",
+ "vnfd_ref": "hackfest_basic-vnf",
+ }
+ },
+ "affinity-or-anti-affinity-group-id": [],
+ "count-index": 0,
+ "id": "faa21fc1-7f27-4a95-93dd-87535ce6b59c",
+ "interfaces": [
+ {
+ "external-connection-point-ref": "vnf-cp0-ext",
+ "internal-connection-point-ref": "vdu-eth0-int",
+ "mgmt-interface": True,
+ "mgmt-vnf": True,
+ "name": "vdu-eth0",
+ "ns-vld-id": "mgmtnet",
+ "type": "PARAVIRT",
+ "compute_node": "nfvisrv12",
+ "ip-address": "192.168.251.15",
+ "mac-address": "fa:16:3e:85:6c:02",
+ "pci": None,
+ "vlan": None,
+ }
+ ],
+ "internal-connection-point": [
+ {
+ "connection-point-id": "vdu-eth0-int",
+ "id": "vdu-eth0-int",
+ "name": "vdu-eth0-int",
+ }
+ ],
+ "ip-address": "192.168.251.15",
+ "ns-flavor-id": "0",
+ "ns-image-id": "0",
+ "vdu-id-ref": "hackfest_basic-VM",
+ "vdu-name": "hackfest_basic-VM",
+ "vim_info": {"vim:f239ed93-756b-408e-89f8-fcbf47a9d8f7": sample_vim_info},
+ "virtual-storages": [
+ {"id": "hackfest_basic-VM-storage", "size-of-storage": "10"}
+ ],
+ "status": "ACTIVE",
+ "vim-id": "ebd39f37-e607-4bce-9f10-ea4c5635f726",
+ "name": "test7-vnf-hackfest_basic-VM-000000",
+ }
+ ],
+ "connection-point": [
+ {
+ "name": "vnf-cp0-ext",
+ "connection-point-id": "vdu-eth0-int",
+ "connection-point-vdu-id": "hackfest_basic-VM",
+ "id": "vnf-cp0-ext",
+ }
+ ],
+ "ip-address": "192.168.251.15",
+ "revision": 1,
+ "_admin": {
+ "created": 1676755692.21059,
+ "modified": 1676755692.21059,
+ "projects_read": ["9a61dad6cbc744879344e5b84d842578"],
+ "projects_write": ["9a61dad6cbc744879344e5b84d842578"],
+ "nsState": "INSTANTIATED",
+ },
+}
+vims = [
+ {
+ "_id": vim1_id,
+ "name": "openstackETSI1",
+ "vim_type": "openstack",
+ },
+ {
+ "_id": vim2_id,
+ "name": "openstackETSI2",
+ "vim_type": "openstack",
+ },
+]
+sample_vim = {
+ "_id": vim1_id,
+ "name": "openstackETSI1",
+ "vim_type": "openstack",
+ "description": None,
+ "vim_url": "http://172.21.223.1:5000/v3",
+ "vim_user": "myuser",
+ "vim_password": "mypassword",
+ "vim_tenant_name": "mytenant",
+ "_admin": {
+ "created": 1675758291.0110583,
+ "modified": 1675758291.0110583,
+ "operationalState": "ENABLED",
+ "current_operation": None,
+ "detailed-status": "",
+ },
+ "schema_version": "1.11",
+ "admin": {"current_operation": 0},
+}
+ro_task1 = {
+ "_id": "6659675b-b6a4-4c0c-ad40-47dae476a961:3",
+ "target_id": f"vim:{vim1_id}",
+ "vim_info": {
+ "created": True,
+ "created_items": {"port:4d081f50-e13a-4306-a67e-1edb28d76013": True},
+ "vim_id": vm1_id,
+ "vim_name": "test7-vnf-hackfest_basic-VM-0",
+ "vim_status": "ACTIVE",
+ "refresh_at": -1,
+ "interfaces": [
+ {
+ "mac_address": "fa:16:3e:85:6c:02",
+ "vim_net_id": "327f5e8e-a383-47c9-80a3-ed45b71d24ca",
+ "vim_interface_id": "4d081f50-e13a-4306-a67e-1edb28d76013",
+ "compute_node": "nfvisrv12",
+ "pci": None,
+ "vlan": None,
+ "ip_address": "192.168.251.15",
+ "mgmt_vnf_interface": True,
+ "mgmt_vdu_interface": True,
+ }
+ ],
+ "interfaces_vim_ids": ["4d081f50-e13a-4306-a67e-1edb28d76013"],
+ },
+ "modified_at": 1676755752.49715,
+ "created_at": 1676755693.91547,
+ "to_check_at": -1,
+ "tasks": [
+ {
+ "action_id": "6659675b-b6a4-4c0c-ad40-47dae476a961",
+ "nsr_id": "ee46620f-cba3-4245-b8be-183ff483bb7e",
+ "task_id": "6659675b-b6a4-4c0c-ad40-47dae476a961:3",
+ "status": "DONE",
+ "action": "CREATE",
+ "item": "vdu",
+ "target_record": target_record,
+ "mgmt_vnf_interface": 0,
+ }
+ ],
+}
+ro_task2 = {
+ "_id": "7b05fd30-f128-4486-a1ba-56fcf7387967:3",
+ "target_id": f"vim:{vim2_id}",
+ "vim_info": {
+ "created": True,
+ "created_items": {
+ "port:4d2faa64-3f10-42ec-a5db-0291600d0692": True,
+ },
+ "vim_id": vm2_id,
+ "vim_name": "test7-vnf-hackfest_basic-VM-0",
+ "vim_status": "ACTIVE",
+ "refresh_at": -1,
+ "interfaces": [
+ {
+ "mac_address": "fa:16:3e:2c:2d:21",
+ "vim_net_id": "327f5e8e-a383-47c9-80a3-ed45b71d24ca",
+ "vim_interface_id": "4d2faa64-3f10-42ec-a5db-0291600d0692",
+ "compute_node": "nfvisrv12",
+ "pci": None,
+ "vlan": None,
+ "ip_address": "192.168.251.197",
+ "mgmt_vnf_interface": True,
+ "mgmt_vdu_interface": True,
+ }
+ ],
+ "interfaces_vim_ids": ["4d2faa64-3f10-42ec-a5db-0291600d0692"],
+ },
+ "modified_at": 1676839542.4801,
+ "created_at": 1676839494.78525,
+ "to_check_at": -1,
+ "tasks": [
+ {
+ "action_id": "7b05fd30-f128-4486-a1ba-56fcf7387967",
+ "nsr_id": "ddf8c820-4cfa-47fb-8de3-e0afbe039efb",
+ "task_id": "7b05fd30-f128-4486-a1ba-56fcf7387967:3",
+ "status": "FAILED",
+ "action": "CREATE",
+ "item": "vdu",
+ "target_record": "vnfrs:41e16909-a519-4897-b481-f386e5022425:vdur.0.vim_info.vim:f239ed93-756b-408e-89f8-fcbf47a9d8f7",
+ "mgmt_vnf_interface": 0,
+ }
+ ],
+}
+wrong_ro_task = {
+ "_id": "6659675b-b6a4-4c0c-ad40-47dae476a961:3",
+ "target_id": "vim:f239ed93-756b-408e-89f8-fcbf47a9d8f7",
+}
+port1 = {
+ "id": "4d081f50-e13a-4306-a67e-1edb28d76013",
+ "network_id": net1_id,
+ "tenant_id": "34a71bb7d82f4ec691d8cc11045ae83e",
+ "mac_address": mac2_addr,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "device_id": vm1_id,
+ "device_owner": "compute:nova",
+ "fixed_ips": [],
+}
+port2 = {
+ "id": "5d081f50-e13a-4306-a67e-1edb28d76013",
+ "network_id": net1_id,
+ "tenant_id": "34a71bb7d82f4ec691d8cc11045ae83e",
+ "mac_address": mac2_addr,
+ "admin_state_up": True,
+ "status": "ACTIVE",
+ "device_id": vm1_id,
+ "device_owner": "compute:nova",
+ "fixed_ips": [],
+}
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+from copy import deepcopy
+import logging
+import threading
+import unittest
+from unittest.mock import MagicMock, mock_open, patch
+
+from novaclient.v2.servers import Server as NovaServer
+from osm_common import dbmongo
+from osm_common.dbbase import DbException
+from osm_ng_ro.monitor import (
+ MonitorDbException,
+ MonitorVimException,
+ MonitorVms,
+ MonitorVmsException,
+ start_monitoring,
+ stop_monitoring,
+ VimToMonitor,
+ VmToMonitor,
+)
+from osm_ng_ro.tests.sample_data import (
+ config,
+ db_vim_cacert,
+ db_vim_collection,
+ deleted_externally,
+ file_name,
+ interface_info2,
+ interface_with_binding,
+ ip1_addr,
+ mac1_addr,
+ mac2_addr,
+ net1_id,
+ old_interface,
+ old_interface2,
+ plugin_name,
+ port1,
+ port2,
+ ro_task1,
+ ro_task2,
+ sample_vim,
+ sample_vim_info,
+ sample_vnfr,
+ serialized_interface_info,
+ serialized_server_info,
+ server_other_info,
+ target_id,
+ target_record,
+ target_record2,
+ vdur_path,
+ vim1_id,
+ vim2_id,
+ vim3_id,
+ vim4_id,
+ vim_info_path,
+ vims,
+ vims_to_monitor,
+ vm1_id,
+ vm2_id,
+ vnfr_id,
+ wrong_ro_task,
+)
+from osm_ro_plugin.vimconn import VimConnector
+import yaml
+
+
+def create_server(id: str, name: str, status: str = "ACTIVE", info: dict = {}):
+ instance = NovaServer(manager="manager", info=info)
+ instance.id = id
+ instance.name = name
+ instance.status = status
+ return instance
+
+
+# The preparation for the tests
+sample_vim_connector_instance = VimConnector(
+ uuid=sample_vim["_id"],
+ name=sample_vim["name"],
+ tenant_id=sample_vim.get("vim_tenant_id"),
+ tenant_name=sample_vim.get("vim_tenant_name"),
+ url=sample_vim["vim_url"],
+)
+sample_vm = VmToMonitor(vm1_id, target_record)
+sample_vm2 = VmToMonitor(vm2_id, target_record)
+sample_vm3 = VmToMonitor("deleted-vm-id", target_record)
+server1 = create_server(vm1_id, "server1")
+server2 = create_server(vm2_id, "server2")
+server3 = create_server("other-vm-id3", "other-vm3")
+server4 = create_server("other-vm-id4", "other-vm4")
+all_server_info = deepcopy(server_other_info)
+server7 = create_server(vm1_id, "server7", info=all_server_info)
+
+
+class CopyingMock(MagicMock):
+ def __call__(self, *args, **kwargs):
+ args = deepcopy(args)
+ kwargs = deepcopy(kwargs)
+ return super(CopyingMock, self).__call__(*args, **kwargs)
+
+
+def check_if_assert_not_called(mocks: list):
+ for mocking in mocks:
+ mocking.assert_not_called()
+
+
+class TestMonitorVms(unittest.TestCase):
+ @patch("osm_ng_ro.monitor.MonitorVms.__init__")
+ @patch("osm_ng_ro.ns_thread.ConfigValidate")
+ @patch("osm_ng_ro.monitor.MonitorVms.get_db_vims")
+ @patch("osm_ng_ro.monitor.MonitorVms.load_vims")
+ @patch("logging.getLogger", autospec=True)
+ def setUp(
+ self,
+ mock_logger,
+ mock_load_vims,
+ mock_get_db_vims,
+ mock_config_validate,
+ mock_init,
+ ):
+ # We are disabling the logging of exception not to print them to console.
+ mock_logger = logging.getLogger()
+ mock_logger.disabled = True
+ mock_init.return_value = None
+ self.monitor = MonitorVms(config=config)
+ self.monitor.db_vims = []
+ self.monitor.db = CopyingMock(dbmongo.DbMongo(), autospec=True)
+ self.monitor.config = config
+ self.monitor.logger = mock_logger
+ self.monitor.my_vims = {}
+ self.monitor.refresh_config = mock_config_validate
+
+ @patch("osm_ng_ro.ns_thread.ConfigValidate.__init__")
+ @patch("osm_ng_ro.monitor.MonitorVms.get_db_vims")
+ @patch("osm_ng_ro.monitor.MonitorVms.load_vims")
+ @patch("logging.getLogger", autospec=True)
+ @patch("osm_ng_ro.monitor.MonitorVms.connect_db")
+ def test_init(
+ self,
+ mock_connect_db,
+ mock_logger,
+ mock_load_vims,
+ mock_get_db_vims,
+ mock_config_validate_init,
+ ):
+ mock_config_validate_init.return_value = None
+ mock_get_db_vims.return_value = vims
+ instance = MonitorVms(config)
+ mock_config_validate_init.assert_called_once_with(config)
+ self.assertDictEqual(instance.config, config)
+ mock_load_vims.assert_called_once()
+ self.assertEqual(instance.db_vims, vims)
+ mock_connect_db.assert_called_once()
+ self.assertIsNone(instance.db)
+ self.assertIsNotNone(instance.db_vims)
+ mock_logger.assert_called_once_with("ro.monitor")
+
+ @patch("osm_ng_ro.monitor.MonitorVms._load_vim")
+ def test_load_vims_empty_db_vims(self, mock_load_vim):
+ self.monitor.load_vims()
+ mock_load_vim.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.MonitorVms._load_vim")
+ def test_load_vims_vim_id_not_in_my_vims(self, mock_load_vim):
+ self.monitor.db_vims = vims
+ self.monitor.my_vims = {vim3_id: "vim-obj3"}
+ self.monitor.load_vims()
+ _call_mock_load_vim = mock_load_vim.call_args_list
+ self.assertEqual(mock_load_vim.call_count, 2)
+ self.assertEqual(
+ _call_mock_load_vim[0][0],
+ (vim1_id,),
+ )
+ self.assertEqual(
+ _call_mock_load_vim[1][0],
+ (vim2_id,),
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms._load_vim")
+ def test_load_vims_vim_id_in_my_vims(self, mock_load_vim):
+ self.monitor.db_vims = vims
+ self.monitor.my_vims = {vim1_id: "vim-obj1", vim2_id: "vim-obj2"}
+ self.monitor.load_vims()
+ mock_load_vim.assert_not_called()
+
+ @patch("osm_common.dbmongo.DbMongo.db_connect")
+ @patch("osm_common.dbmongo.DbMongo.__init__")
+ @patch("osm_common.dbmemory.DbMemory.db_connect")
+ @patch("osm_common.dbmemory.DbMemory.__init__")
+ def test_connect_db_type_mongo(
+ self,
+ mock_dbmemory_init,
+ mock_dbmemory_connect,
+ mock_dbmongo_init,
+ mock_dbmongo_connect,
+ ):
+ self.monitor.db = None
+ self.monitor.config["database"]["driver"] = "mongo"
+ mock_dbmongo_init.return_value = None
+ self.monitor.connect_db()
+ mock_dbmongo_init.assert_called_once()
+ mock_dbmongo_connect.assert_called_once()
+ self.monitor.db.db_connect.assert_called_once_with(
+ self.monitor.config["database"]
+ )
+ check_if_assert_not_called([mock_dbmemory_init, mock_dbmemory_connect])
+
+ @patch("osm_common.dbmongo.DbMongo.db_connect")
+ @patch("osm_common.dbmongo.DbMongo.__init__")
+ @patch("osm_common.dbmemory.DbMemory.db_connect")
+ @patch("osm_common.dbmemory.DbMemory.__init__")
+ def test_connect_db_type_mongo_initialize_exception(
+ self,
+ mock_dbmemory_init,
+ mock_dbmemory_connect,
+ mock_dbmongo_init,
+ mock_dbmongo_connect,
+ ):
+ self.monitor.db = None
+ self.monitor.config["database"]["driver"] = "mongo"
+ mock_dbmongo_init.side_effect = ValueError("Db object could not be created.")
+ with self.assertRaises(MonitorDbException) as err:
+ self.monitor.connect_db()
+ self.assertEqual(str(err.exception), "Db object could not be created.")
+ mock_dbmongo_init.assert_called_once()
+ check_if_assert_not_called(
+ [mock_dbmongo_connect, mock_dbmemory_init, mock_dbmemory_connect]
+ )
+
+ @patch("osm_common.dbmongo.DbMongo.db_connect")
+ @patch("osm_common.dbmongo.DbMongo.__init__")
+ @patch("osm_common.dbmemory.DbMemory.db_connect")
+ @patch("osm_common.dbmemory.DbMemory.__init__")
+ def test_connect_db_type_mongo_connection_exception(
+ self,
+ mock_dbmemory_init,
+ mock_dbmemory_connect,
+ mock_dbmongo_init,
+ mock_dbmongo_connect,
+ ):
+ self.monitor.db = None
+ self.monitor.config["database"]["driver"] = "mongo"
+ mock_dbmongo_init.return_value = None
+ mock_dbmongo_connect.side_effect = DbException("Connection failed")
+ with self.assertRaises(MonitorDbException) as err:
+ self.monitor.connect_db()
+ self.assertEqual(str(err.exception), "database exception Connection failed")
+ mock_dbmongo_init.assert_called_once()
+ mock_dbmongo_connect.assert_called_once_with(self.monitor.config["database"])
+ check_if_assert_not_called([mock_dbmemory_init, mock_dbmemory_connect])
+
+ @patch("osm_common.dbmongo.DbMongo.db_connect")
+ @patch("osm_common.dbmongo.DbMongo.__init__")
+ @patch("osm_common.dbmemory.DbMemory.db_connect")
+ @patch("osm_common.dbmemory.DbMemory.__init__")
+ def test_connect_db_type_memory(
+ self,
+ mock_dbmemory_init,
+ mock_dbmemory_connect,
+ mock_dbmongo_init,
+ mock_dbmongo_connect,
+ ):
+ self.monitor.db = None
+ self.monitor.config["database"]["driver"] = "memory"
+ mock_dbmemory_init.return_value = None
+ self.monitor.connect_db()
+ mock_dbmemory_init.assert_called_once()
+ mock_dbmemory_connect.assert_called_once_with(self.monitor.config["database"])
+ check_if_assert_not_called([mock_dbmongo_init, mock_dbmongo_connect])
+
+ @patch("osm_common.dbmongo.DbMongo.db_connect")
+ @patch("osm_common.dbmongo.DbMongo.__init__")
+ @patch("osm_common.dbmemory.DbMemory.db_connect")
+ @patch("osm_common.dbmemory.DbMemory.__init__")
+ def test_connect_db_existing_db(
+ self,
+ mock_dbmemory_init,
+ mock_dbmemory_connect,
+ mock_dbmongo_init,
+ mock_dbmongo_connect,
+ ):
+ self.monitor.connect_db()
+ check_if_assert_not_called(
+ [
+ mock_dbmemory_init,
+ mock_dbmongo_init,
+ mock_dbmemory_connect,
+ mock_dbmongo_connect,
+ ]
+ )
+
+ @patch("osm_common.dbmongo.DbMongo.db_connect")
+ @patch("osm_common.dbmongo.DbMongo.__init__")
+ @patch("osm_common.dbmemory.DbMemory.db_connect")
+ @patch("osm_common.dbmemory.DbMemory.__init__")
+ def test_connect_db_wrong_driver_type(
+ self,
+ mock_dbmemory_init,
+ mock_dbmemory_connect,
+ mock_dbmongo_init,
+ mock_dbmongo_connect,
+ ):
+ self.monitor.db = None
+ self.monitor.config["database"]["driver"] = "posgresql"
+ with self.assertRaises(MonitorDbException) as err:
+ self.monitor.connect_db()
+ self.assertEqual(
+ str(err.exception),
+ "Invalid configuration param 'posgresql' at '[database]':'driver'",
+ )
+ check_if_assert_not_called(
+ [
+ mock_dbmemory_init,
+ mock_dbmongo_init,
+ mock_dbmemory_connect,
+ mock_dbmongo_connect,
+ ]
+ )
+
+ def test_get_db_vims(self):
+ self.monitor.db.get_list.return_value = vims
+ result = self.monitor.get_db_vims()
+ self.assertEqual(result, vims)
+ self.monitor.db.get_list.assert_called_once_with(
+ "vim_accounts", {"vim_type": "openstack"}
+ )
+
+ def test_get_db_vims_db_raises(self):
+ self.monitor.db.get_list.side_effect = DbException("Connection failed.")
+ with self.assertRaises(DbException) as err:
+ result = self.monitor.get_db_vims()
+ self.assertEqual(result, None)
+ self.assertEqual(str(err.exception), "database exception Connection failed.")
+ self.monitor.db.get_list.assert_called_once_with(
+ "vim_accounts", {"vim_type": "openstack"}
+ )
+
+ def test_find_ro_tasks_to_monitor(self):
+ self.monitor.db.get_list.return_value = [ro_task1]
+ result = self.monitor.find_ro_tasks_to_monitor()
+ self.assertEqual(result, [ro_task1])
+ self.monitor.db.get_list.assert_called_once_with(
+ "ro_tasks",
+ q_filter={
+ "tasks.status": ["DONE"],
+ "tasks.item": ["vdu"],
+ },
+ )
+
+ def test_find_ro_tasks_to_monitor_db_exception(self):
+ self.monitor.db.get_list.side_effect = DbException("Wrong database status")
+ with self.assertRaises(DbException) as err:
+ result = self.monitor.find_ro_tasks_to_monitor()
+ self.assertEqual(result, None)
+ self.assertEqual(str(err.exception), "database exception Wrong database status")
+ self.monitor.db.get_list.assert_called_once_with(
+ "ro_tasks",
+ q_filter={
+ "tasks.status": ["DONE"],
+ "tasks.item": ["vdu"],
+ },
+ )
+
+ def test_initialize_target_vim(self):
+ vim_module_conn = VimConnector
+ vim_connector_instance = self.monitor._initialize_target_vim(
+ vim_module_conn, sample_vim
+ )
+ self.assertIsInstance(vim_connector_instance, VimConnector)
+ self.assertListEqual(
+ [vim_connector_instance.id, vim_connector_instance.name],
+ [target_id, "openstackETSI1"],
+ )
+
+ def test_initialize_target_vim_invalid_vim_connector_input(self):
+ vim_module_conn = "openstack_vim_connector"
+ with self.assertRaises(TypeError) as err:
+ self.monitor._initialize_target_vim(vim_module_conn, sample_vim)
+ self.assertEqual(str(err.exception), "'str' object is not callable")
+
+ def test_initialize_target_vim_missing_vim_keys(self):
+ vim_module_conn = VimConnector
+ sample_vim = {
+ "_id": target_id,
+ "name": "openstackETSI1",
+ "vim_type": "openstack",
+ }
+ with self.assertRaises(KeyError) as err:
+ self.monitor._initialize_target_vim(vim_module_conn, sample_vim)
+ self.assertEqual(str(err.exception.args[0]), "vim_url")
+
+ def test_initialize_target_vim_invalid_vim_input_type(self):
+ vim_module_conn = VimConnector
+ sample_vim = [target_id, "openstackETSI1"]
+ with self.assertRaises(TypeError) as err:
+ self.monitor._initialize_target_vim(vim_module_conn, sample_vim)
+ self.assertEqual(
+ str(err.exception), "list indices must be integers or slices, not str"
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms._process_vim_config")
+ @patch("osm_ng_ro.monitor.MonitorVms._load_plugin")
+ @patch("osm_ng_ro.monitor.MonitorVms._initialize_target_vim")
+ def test_load_vim(self, mock_target_vim, mock_load_plugin, mock_vim_config):
+ self.monitor.my_vims = {}
+ sample_vim["schema_version"] = "1.11"
+ self.monitor.db.get_one.return_value = sample_vim
+ mock_load_plugin.return_value = VimConnector
+ mock_target_vim.return_value = sample_vim_connector_instance
+ self.monitor._load_vim(target_id)
+ self.assertEqual(self.monitor.my_vims[target_id], sample_vim_connector_instance)
+ mock_vim_config.assert_called_once()
+ self.monitor.db.get_one.assert_called_once_with(
+ db_vim_collection, {"_id": target_id}
+ )
+ self.monitor.db.encrypt_decrypt_fields.assert_called_once_with(
+ sample_vim,
+ "decrypt",
+ fields=("password", "secret"),
+ schema_version="1.11",
+ salt=target_id,
+ )
+ mock_vim_config.assert_called_once_with(target_id, sample_vim)
+ mock_load_plugin.assert_called_once_with(plugin_name)
+ mock_target_vim.assert_called_once_with(VimConnector, sample_vim)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._process_vim_config")
+ @patch("osm_ng_ro.monitor.MonitorVms._load_plugin")
+ @patch("osm_ng_ro.monitor.MonitorVms._initialize_target_vim")
+ def test_load_vim_target_vim_not_found(
+ self, mock_target_vim, mock_load_plugin, mock_vim_config
+ ):
+ self.monitor.my_vims = {}
+ self.monitor.db.get_one.return_value = None
+ with self.assertRaises(MonitorVimException) as err:
+ self.monitor._load_vim(target_id)
+ self.assertEqual(
+ str(err.exception),
+ "Cannot load 55b2219a-7bb9-4644-9612-980dada84e83 plugin=rovim_openstack: "
+ "'NoneType' object has no attribute 'get'",
+ )
+ self.monitor.db.get_one.assert_called_once_with(
+ db_vim_collection, {"_id": target_id}
+ )
+ check_if_assert_not_called(
+ [
+ self.monitor.db.encrypt_decrypt_fields,
+ mock_vim_config,
+ mock_load_plugin,
+ mock_target_vim,
+ ]
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms._process_vim_config")
+ @patch("osm_ng_ro.monitor.MonitorVms._load_plugin")
+ @patch("osm_ng_ro.monitor.MonitorVms._initialize_target_vim")
+ def test_load_vim_decrypt_fields_raises(
+ self, mock_target_vim, mock_load_plugin, mock_vim_config
+ ):
+ self.monitor.my_vims = {}
+ sample_vim["schema_version"] = "1.11"
+ self.monitor.db.get_one.return_value = sample_vim
+ self.monitor.db.encrypt_decrypt_fields.side_effect = DbException(
+ "Value could not decrypted."
+ )
+ with self.assertRaises(MonitorVimException) as err:
+ self.monitor._load_vim(target_id)
+ self.assertEqual(
+ str(err.exception),
+ "Cannot load 55b2219a-7bb9-4644-9612-980dada84e83 plugin=rovim_openstack: "
+ "database exception Value could not decrypted.",
+ )
+ self.monitor.db.get_one.assert_called_once_with(
+ db_vim_collection, {"_id": target_id}
+ )
+ self.monitor.db.encrypt_decrypt_fields.assert_called_once_with(
+ sample_vim,
+ "decrypt",
+ fields=("password", "secret"),
+ schema_version="1.11",
+ salt=target_id,
+ )
+ check_if_assert_not_called([mock_vim_config, mock_load_plugin, mock_target_vim])
+
+ @patch("osm_ng_ro.monitor.MonitorVms._process_vim_config")
+ @patch("osm_ng_ro.monitor.MonitorVms._load_plugin")
+ @patch("osm_ng_ro.monitor.MonitorVms._initialize_target_vim")
+ def test_load_vim_process_vim_config_raises(
+ self, mock_target_vim, mock_load_plugin, mock_vim_config
+ ):
+ self.monitor.my_vims = {}
+ sample_vim["schema_version"] = "1.11"
+ self.monitor.db.get_one.return_value = sample_vim
+ mock_vim_config.side_effect = MonitorVimException(
+ "Error writing file config_1234"
+ )
+ with self.assertRaises(MonitorVimException) as err:
+ self.monitor._load_vim(target_id)
+ self.assertEqual(
+ str(err.exception),
+ "Cannot load 55b2219a-7bb9-4644-9612-980dada84e83 plugin=rovim_openstack: "
+ "Error writing file config_1234",
+ )
+ self.monitor.db.get_one.assert_called_once_with(
+ db_vim_collection, {"_id": target_id}
+ )
+ self.monitor.db.encrypt_decrypt_fields.assert_called_once_with(
+ sample_vim,
+ "decrypt",
+ fields=("password", "secret"),
+ schema_version="1.11",
+ salt=target_id,
+ )
+ mock_vim_config.assert_called_once_with(target_id, sample_vim)
+ check_if_assert_not_called([mock_load_plugin, mock_target_vim])
+
+ @patch("osm_ng_ro.monitor.MonitorVms._process_vim_config")
+ @patch("osm_ng_ro.monitor.MonitorVms._load_plugin")
+ @patch("osm_ng_ro.monitor.MonitorVms._initialize_target_vim")
+ def test_load_vim_load_plugin_raises(
+ self, mock_target_vim, mock_load_plugin, mock_vim_config
+ ):
+ self.monitor.my_vims = {}
+ sample_vim["schema_version"] = "1.11"
+ self.monitor.db.get_one.return_value = sample_vim
+ mock_load_plugin.side_effect = MonitorVimException(
+ "Cannot load plugin osm_rovim_openstack"
+ )
+ with self.assertRaises(MonitorVimException) as err:
+ self.monitor._load_vim(target_id)
+ self.assertEqual(
+ str(err.exception),
+ "Cannot load 55b2219a-7bb9-4644-9612-980dada84e83 plugin=rovim_openstack: "
+ "Cannot load plugin osm_rovim_openstack",
+ )
+ self.monitor.db.get_one.assert_called_once_with(
+ db_vim_collection, {"_id": target_id}
+ )
+ self.monitor.db.encrypt_decrypt_fields.assert_called_once_with(
+ sample_vim,
+ "decrypt",
+ fields=("password", "secret"),
+ schema_version="1.11",
+ salt=target_id,
+ )
+ mock_vim_config.assert_called_once_with(target_id, sample_vim)
+ mock_load_plugin.assert_called_once_with(plugin_name)
+ mock_target_vim.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.MonitorVms._process_vim_config")
+ @patch("osm_ng_ro.monitor.MonitorVms._load_plugin")
+ @patch("osm_ng_ro.monitor.MonitorVms._initialize_target_vim")
+ def test_load_vim_initialize_target_vim_raises(
+ self, mock_target_vim, mock_load_plugin, mock_vim_config
+ ):
+ self.monitor.my_vims = {}
+ self.monitor.db.get_one.return_value = sample_vim
+ sample_vim["schema_version"] = "1.0"
+ mock_load_plugin.return_value = VimConnector
+ mock_target_vim.side_effect = TypeError("'module' object is not callable")
+ with self.assertRaises(MonitorVimException) as err:
+ self.monitor._load_vim(target_id)
+ self.assertEqual(
+ str(err.exception),
+ "Cannot load 55b2219a-7bb9-4644-9612-980dada84e83 plugin=rovim_openstack: "
+ "'module' object is not callable",
+ )
+ self.monitor.db.get_one.assert_called_once_with(
+ db_vim_collection, {"_id": target_id}
+ )
+ self.monitor.db.encrypt_decrypt_fields.assert_called_once_with(
+ sample_vim,
+ "decrypt",
+ fields=("password", "secret"),
+ schema_version="1.0",
+ salt=target_id,
+ )
+ mock_vim_config.assert_called_once_with(target_id, sample_vim)
+ mock_load_plugin.assert_called_once_with(plugin_name)
+ mock_target_vim.assert_called_once_with(VimConnector, sample_vim)
+
+ @patch("osm_ng_ro.monitor.makedirs")
+ @patch("osm_ng_ro.monitor.path")
+ @patch("builtins.open", new_callable=mock_open())
+ def test_process_vim_config_vim_without_config(
+ self, mock_open, mock_path, mock_makedirs
+ ):
+ db_vim = {}
+ self.monitor._process_vim_config(target_id, db_vim)
+ check_if_assert_not_called([mock_open, mock_path.isdir, mock_makedirs])
+
+ @patch("osm_ng_ro.monitor.random")
+ @patch("osm_ng_ro.monitor.makedirs")
+ @patch("osm_ng_ro.monitor.path")
+ @patch("builtins.open", new_callable=mock_open())
+ def test_process_vim_config_vim_with_ca_cert(
+ self, mock_open, mock_path, mock_makedirs, mock_random
+ ):
+ db_vim = {"config": {"ca_cert_content": "my_vim_cert"}}
+ mock_path.isdir.return_value = False
+ mock_random.randint.return_value = 23242
+ self.monitor._process_vim_config(target_id, db_vim)
+ self.assertEqual(db_vim["config"].get("ca_cert_content"), None)
+ self.assertEqual(
+ db_vim["config"].get("ca_cert"),
+ db_vim_cacert,
+ )
+ mock_path.isdir.asssert_called_once_with(file_name)
+ mock_makedirs.assert_called_once_with(file_name)
+ mock_random.randint.assert_called_once()
+ mock_open.assert_called_once_with(file_name + "/ca_cert", "w")
+
+ @patch("osm_ng_ro.monitor.random")
+ @patch("osm_ng_ro.monitor.makedirs")
+ @patch("osm_ng_ro.monitor.path")
+ @patch("builtins.open", new_callable=mock_open())
+ def test_process_vim_config_vim_with_cacert_path_is_dir(
+ self, mock_open, mock_path, mock_makedirs, mock_random
+ ):
+ db_vim = {"config": {"ca_cert_content": "my_vim_cert"}}
+ mock_path.isdir.return_value = True
+ mock_random.randint.return_value = 23242
+ self.monitor._process_vim_config(target_id, db_vim)
+ self.assertEqual(db_vim["config"].get("ca_cert_content"), None)
+ self.assertEqual(
+ db_vim["config"].get("ca_cert"),
+ db_vim_cacert,
+ )
+ mock_path.isdir.asssert_called_once_with(file_name)
+ mock_makedirs.assert_not_called()
+ mock_random.randint.assert_called_once()
+ mock_open.assert_called_once_with(file_name + "/ca_cert", "w")
+
+ @patch("osm_ng_ro.monitor.random")
+ @patch("osm_ng_ro.monitor.makedirs")
+ @patch("osm_ng_ro.monitor.path")
+ @patch("builtins.open", new_callable=mock_open())
+ def test_process_vim_config_vim_with_cacert_makedir_raises(
+ self, mock_open, mock_path, mock_makedirs, mock_random
+ ):
+ db_vim = {"config": {"ca_cert_content": "my_vim_cert"}}
+ mock_path.isdir.return_value = False
+ mock_random.randint.return_value = 23242
+ mock_makedirs.side_effect = OSError("Can not create directory")
+ with self.assertRaises(MonitorVimException) as err:
+ self.monitor._process_vim_config(target_id, db_vim)
+ self.assertEqual(
+ str(err.exception),
+ "Error writing to file '/app/osm_ro/certs/55b2219a-7bb9-4644-9612-980dada84e83:23242': "
+ "Can not create directory",
+ )
+ self.assertEqual(db_vim["config"].get("ca_cert_content"), "my_vim_cert")
+ self.assertEqual(db_vim["config"].get("ca_cert"), None)
+ mock_path.isdir.asssert_called_once_with(file_name)
+ mock_makedirs.assert_called_once_with(file_name)
+ mock_random.randint.assert_called_once()
+ mock_open.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.random")
+ @patch("osm_ng_ro.monitor.makedirs")
+ @patch("osm_ng_ro.monitor.path")
+ @patch("builtins.open", new_callable=mock_open())
+ def test_process_vim_config_vim_with_cacert_mock_open_raises(
+ self, mock_open, mock_path, mock_makedirs, mock_random
+ ):
+ db_vim = {"config": {"ca_cert_content": "my_vim_cert"}}
+ mock_path.isdir.return_value = False
+ mock_random.randint.return_value = 23242
+ mock_open.side_effect = FileNotFoundError("File is not found.")
+ with self.assertRaises(MonitorVimException) as err:
+ self.monitor._process_vim_config(target_id, db_vim)
+ self.assertEqual(
+ str(err.exception),
+ "Error writing to file '/app/osm_ro/certs/55b2219a-7bb9-4644-9612-980dada84e83:23242/ca_cert': "
+ "File is not found.",
+ )
+ self.assertEqual(db_vim["config"].get("ca_cert_content"), "my_vim_cert")
+ self.assertEqual(db_vim["config"].get("ca_cert"), None)
+ mock_path.isdir.asssert_called_once_with(file_name)
+ mock_makedirs.assert_called_once_with(file_name)
+ mock_random.randint.assert_called_once()
+ mock_open.assert_called_once_with(file_name + "/ca_cert", "w")
+
+ @patch("osm_ng_ro.monitor.random")
+ @patch("osm_ng_ro.monitor.makedirs")
+ @patch("osm_ng_ro.monitor.path")
+ @patch("builtins.open", new_callable=mock_open())
+ def test_process_vim_config_vim_without_cacert(
+ self, mock_open, mock_path, mock_makedirs, mock_random
+ ):
+ db_vim = {"config": {}}
+ self.monitor._process_vim_config(target_id, db_vim)
+ self.assertEqual(db_vim["config"].get("ca_cert"), None)
+ check_if_assert_not_called(
+ [mock_path.isdir, mock_makedirs, mock_random.randint, mock_open]
+ )
+
+ @patch("osm_ng_ro.monitor.entry_points")
+ def test_load_plugin_name_exists(self, mock_entry_points):
+ self.monitor.plugins = {plugin_name: VimConnector}
+ result = self.monitor._load_plugin(plugin_name)
+ mock_entry_points.assert_not_called()
+ self.assertEqual(self.monitor.plugins, {plugin_name: VimConnector})
+ self.assertEqual(result, VimConnector)
+
+ @patch("osm_ng_ro.monitor.entry_points")
+ def test_load_plugin_name_does_not_exist(self, mock_entry_points):
+ self.monitor.plugins = {}
+ mock_ep = MagicMock()
+ mock_ep.load.return_value = sample_vim_connector_instance
+ mock_entry_points.return_value = [mock_ep]
+ result = self.monitor._load_plugin(plugin_name)
+ self.assertEqual(mock_entry_points.call_count, 1)
+ mock_entry_points.assert_called_once_with(
+ group="osm_rovim.plugins", name=plugin_name
+ )
+ self.assertEqual(
+ self.monitor.plugins, {plugin_name: sample_vim_connector_instance}
+ )
+ self.assertEqual(result, sample_vim_connector_instance)
+
+ @patch("osm_ng_ro.monitor.entry_points")
+ def test_load_plugin_load_raises(self, mock_entry_points):
+ self.monitor.plugins = {}
+ mock_entry_points.return_value = None
+ with self.assertRaises(MonitorVimException) as err:
+ self.monitor._load_plugin(plugin_name)
+ self.assertEqual(
+ str(err.exception),
+ "Cannot load plugin osm_rovim_openstack: 'NoneType' object is not iterable",
+ )
+ self.assertEqual(mock_entry_points.call_count, 1)
+ mock_entry_points.assert_called_once_with(
+ group="osm_rovim.plugins", name=plugin_name
+ )
+ self.assertEqual(self.monitor.plugins, {})
+
+ @patch("osm_ng_ro.monitor.VmToMonitor")
+ def test_create_vm_to_monitor_empty_ro_task(self, mock_vm_to_monitor):
+ ro_task = {}
+ result = self.monitor.create_vm_to_monitor(ro_task)
+ self.assertEqual(result, None)
+ mock_vm_to_monitor.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.VmToMonitor")
+ def test_create_vm_to_monitor(self, mock_vm_to_monitor):
+ sample_vm = VmToMonitor("sample_id", "sample_target_record")
+ mock_vm_to_monitor.return_value = sample_vm
+ result = self.monitor.create_vm_to_monitor(ro_task1)
+ self.assertEqual(result, sample_vm)
+ mock_vm_to_monitor.assert_called_once_with(
+ "ebd39f37-e607-4bce-9f10-ea4c5635f726", target_record
+ )
+
+ @patch("osm_ng_ro.monitor.VmToMonitor")
+ def test_create_vm_to_monitor_wrong_ro_task_format(self, mock_vm_to_monitor):
+ mock_vm_to_monitor.return_value = "VmtoMonitor"
+ with self.assertRaises(KeyError) as err:
+ self.monitor.create_vm_to_monitor(wrong_ro_task)
+ self.assertEqual(str(err.exception.args[0]), "vim_info")
+ mock_vm_to_monitor.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.MonitorVms.create_vm_to_monitor")
+ def test_add_vm_to_existing_vim(self, mock_create_vm_to_monitor):
+ sample_vim1 = VimToMonitor(vim1_id, [vm1_id])
+ vims_to_monitor = [sample_vim1]
+ result = self.monitor.add_vm_to_existing_vim(vims_to_monitor, ro_task2, vim1_id)
+ self.assertEqual(result, True)
+ mock_create_vm_to_monitor.assert_called_once_with(ro_task2)
+ self.assertEqual(2, len(sample_vim1.vms))
+
+ @patch("osm_ng_ro.monitor.MonitorVms.create_vm_to_monitor")
+ def test_add_vm_to_existing_vim_empty_vims_list(self, mock_create_vm_to_monitor):
+ vims_to_monitor = []
+ result = self.monitor.add_vm_to_existing_vim(vims_to_monitor, ro_task1, vim1_id)
+ self.assertEqual(result, False)
+ mock_create_vm_to_monitor.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.MonitorVms.create_vm_to_monitor")
+ def test_add_vm_to_existing_vim_different_target_vim_id(
+ self, mock_create_vm_to_monitor
+ ):
+ sample_vim1 = VimToMonitor(vim1_id, [vm1_id])
+ vims_to_monitor = [sample_vim1]
+ result = self.monitor.add_vm_to_existing_vim(vims_to_monitor, ro_task2, vim2_id)
+ self.assertEqual(result, False)
+ mock_create_vm_to_monitor.assert_not_called()
+ self.assertEqual(1, len(sample_vim1.vms))
+
+ @patch("osm_ng_ro.monitor.MonitorVms.create_vm_to_monitor")
+ def test_add_vm_to_existing_vim_create_vm_to_monitor_raises(
+ self, mock_create_vm_to_monitor
+ ):
+ sample_vim1 = VimToMonitor(vim1_id, [vm1_id])
+ vims_to_monitor = [sample_vim1]
+ mock_create_vm_to_monitor.side_effect = KeyError(
+ "target_record does not exist."
+ )
+ with self.assertRaises(KeyError) as err:
+ self.monitor.add_vm_to_existing_vim(vims_to_monitor, ro_task2, vim1_id)
+ self.assertEqual(str(err.exception.args[0]), "target_record does not exist.")
+ mock_create_vm_to_monitor.assert_called_once_with(ro_task2)
+ self.assertEqual(1, len(sample_vim1.vms))
+
+ @patch("osm_ng_ro.monitor.MonitorVms.create_vm_to_monitor")
+ @patch("osm_ng_ro.monitor.VimToMonitor")
+ def test_add_new_vim_for_monitoring(
+ self, mock_vim_to_monitor, mock_create_vm_to_monitor
+ ):
+ sample_vim = VimToMonitor(vim1_id, [])
+ mock_vim_to_monitor.return_value = sample_vim
+ self.monitor.add_new_vim_for_monitoring(vims_to_monitor, ro_task1, vim1_id)
+ mock_vim_to_monitor.assert_called_once_with(vim1_id, [])
+ mock_create_vm_to_monitor.assert_called_once_with(ro_task1)
+ self.assertEqual(len(sample_vim.vms), 1)
+ self.assertEqual(len(vims_to_monitor), 1)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.create_vm_to_monitor")
+ @patch("osm_ng_ro.monitor.VimToMonitor")
+ def test_add_new_vim_for_monitoring_vim_to_monitor_raises(
+ self, mock_vim_to_monitor, mock_create_vm_to_monitor
+ ):
+ vims_to_monitor = []
+ mock_vim_to_monitor.side_effect = TypeError(
+ "Missing required positional arguments"
+ )
+ with self.assertRaises(TypeError) as err:
+ self.monitor.add_new_vim_for_monitoring(vims_to_monitor, ro_task1, None)
+ self.assertEqual(
+ str(err.exception.args[0]), "Missing required positional arguments"
+ )
+ mock_vim_to_monitor.assert_called_once_with(None, [])
+ mock_create_vm_to_monitor.assert_not_called()
+ self.assertEqual(len(vims_to_monitor), 0)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.create_vm_to_monitor")
+ @patch("osm_ng_ro.monitor.VimToMonitor")
+ def test_add_new_vim_for_monitoring_create_vm_to_monitor_raises(
+ self, mock_vim_to_monitor, mock_create_vm_to_monitor
+ ):
+ vims_to_monitor = []
+ mock_create_vm_to_monitor.side_effect = KeyError("target_record is not found.")
+ with self.assertRaises(KeyError) as err:
+ self.monitor.add_new_vim_for_monitoring(vims_to_monitor, ro_task1, vim1_id)
+ self.assertEqual(str(err.exception.args[0]), "target_record is not found.")
+ mock_vim_to_monitor.assert_called_once_with(vim1_id, [])
+ mock_create_vm_to_monitor.assert_called_once_with(ro_task1)
+ self.assertEqual(len(vims_to_monitor), 0)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.add_vm_to_existing_vim")
+ @patch("osm_ng_ro.monitor.MonitorVms.add_new_vim_for_monitoring")
+ def test_prepare_vims_to_monitor_no_proper_existing_vim(
+ self, mock_add_new_vim_for_monitoring, mock_add_vm_to_existing_vim
+ ):
+ mock_add_vm_to_existing_vim.return_value = False
+ self.monitor.prepare_vims_to_monitor(vims_to_monitor, ro_task1, vim1_id)
+ mock_add_vm_to_existing_vim.assert_called_once_with(
+ vims_to_monitor, ro_task1, vim1_id
+ )
+ mock_add_new_vim_for_monitoring.assert_called_once_with(
+ vims_to_monitor, ro_task1, vim1_id
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms.add_vm_to_existing_vim")
+ @patch("osm_ng_ro.monitor.MonitorVms.add_new_vim_for_monitoring")
+ def test_prepare_vims_to_monitor_proper_existing_vim(
+ self, mock_add_new_vim_for_monitoring, mock_add_vm_to_existing_vim
+ ):
+ mock_add_vm_to_existing_vim.return_value = True
+ self.monitor.prepare_vims_to_monitor(vims_to_monitor, ro_task1, vim1_id)
+ mock_add_vm_to_existing_vim.assert_called_once_with(
+ vims_to_monitor, ro_task1, vim1_id
+ )
+ mock_add_new_vim_for_monitoring.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.MonitorVms.add_vm_to_existing_vim")
+ @patch("osm_ng_ro.monitor.MonitorVms.add_new_vim_for_monitoring")
+ def test_prepare_vims_to_monitor_add_vm_to_existing_vim_raises(
+ self, mock_add_new_vim_for_monitoring, mock_add_vm_to_existing_vim
+ ):
+ mock_add_vm_to_existing_vim.side_effect = KeyError(
+ "target_record is not found."
+ )
+ with self.assertRaises(KeyError) as err:
+ self.monitor.prepare_vims_to_monitor(vims_to_monitor, ro_task1, vim1_id)
+ self.assertEqual(str(err.exception.args[0]), "target_record is not found.")
+ mock_add_vm_to_existing_vim.assert_called_once_with(
+ vims_to_monitor, ro_task1, vim1_id
+ )
+ mock_add_new_vim_for_monitoring.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.MonitorVms.add_vm_to_existing_vim")
+ @patch("osm_ng_ro.monitor.MonitorVms.add_new_vim_for_monitoring")
+ def test_prepare_vims_to_monitor_add_new_vim_for_monitoring_raises(
+ self, mock_add_new_vim_for_monitoring, mock_add_vm_to_existing_vim
+ ):
+ mock_add_vm_to_existing_vim.return_value = False
+ mock_add_new_vim_for_monitoring.side_effect = KeyError(
+ "target_record is not found."
+ )
+ with self.assertRaises(KeyError) as err:
+ self.monitor.prepare_vims_to_monitor(vims_to_monitor, ro_task1, vim1_id)
+ self.assertEqual(str(err.exception.args[0]), "target_record is not found.")
+ mock_add_vm_to_existing_vim.assert_called_once_with(
+ vims_to_monitor, ro_task1, vim1_id
+ )
+ mock_add_new_vim_for_monitoring.assert_called_once_with(
+ vims_to_monitor, ro_task1, vim1_id
+ )
+
+ def test_get_db_paths(self):
+ self.monitor.db.get_one.return_value = sample_vnfr
+ (
+ vim_info_path,
+ vim_id,
+ vnfr_id,
+ vdur_path,
+ vdur_index,
+ db_vnfr,
+ ) = self.monitor._get_db_paths(target_record)
+ self.assertEqual(
+ vim_info_path, "vdur.0.vim_info.vim:f239ed93-756b-408e-89f8-fcbf47a9d8f7"
+ )
+ self.assertEqual(vim_id, vim4_id)
+ self.assertEqual(vdur_path, "vdur.0")
+ self.assertEqual(vdur_index, 0)
+ self.assertEqual(vnfr_id, vnfr_id)
+ self.assertDictEqual(db_vnfr, sample_vnfr)
+ self.monitor.db.get_one.assert_called_once_with(
+ "vnfrs",
+ {"_id": vnfr_id},
+ fail_on_empty=False,
+ )
+
+ def test_get_db_paths_empty_vnfr(self):
+ self.monitor.db.get_one.return_value = None
+ (
+ vim_info_path,
+ vim_id,
+ vnfr_id,
+ vdur_path,
+ vdur_index,
+ db_vnfr,
+ ) = self.monitor._get_db_paths(target_record)
+ self.assertEqual(
+ vim_info_path, "vdur.0.vim_info.vim:f239ed93-756b-408e-89f8-fcbf47a9d8f7"
+ )
+ self.assertEqual(vim_id, vim4_id)
+ self.assertEqual(vdur_path, "vdur.0")
+ self.assertEqual(vdur_index, 0)
+ self.assertEqual(vnfr_id, vnfr_id)
+ self.assertEqual(db_vnfr, None)
+ self.monitor.db.get_one.assert_called_once_with(
+ "vnfrs",
+ {"_id": vnfr_id},
+ fail_on_empty=False,
+ )
+
+ def test_get_db_paths_invalid_target_record(self):
+ invalid_target_record = "vnfrs:35c034cc-8c5b-48c4-bfa2-17a71577ef19:f239ed93-756b-408e-89f8-fcbf47a9d8f7"
+ with self.assertRaises(MonitorVmsException) as err:
+ self.monitor._get_db_paths(invalid_target_record)
+ self.assertEqual(
+ str(err.exception.args[0]),
+ "not enough values to unpack (expected 4, got 3)",
+ )
+ self.monitor.db.get_one.assert_not_called()
+
+ def test_get_db_paths_db_raises(self):
+ self.monitor.db.get_one.side_effect = DbException("Connection Failed.")
+ with self.assertRaises(MonitorVmsException) as err:
+ self.monitor._get_db_paths(target_record)
+ self.assertEqual(
+ str(err.exception.args[0]), "database exception Connection Failed."
+ )
+ self.monitor.db.get_one.assert_called_once_with(
+ "vnfrs",
+ {"_id": vnfr_id},
+ fail_on_empty=False,
+ )
+
+ def test_check_if_vdur_vim_info_exists(self):
+ vdur_index = 0
+ result = self.monitor._check_if_vdur_vim_info_exists(sample_vnfr, vdur_index)
+ self.assertEqual(result, True)
+
+ def test_check_if_vdur_vim_info_exists_wrong_vdur_index(self):
+ vdur_index = 3
+ result = self.monitor._check_if_vdur_vim_info_exists(sample_vnfr, vdur_index)
+ self.assertEqual(result, None)
+
+ def test_check_if_vdur_vim_info_exists_empty_vnfr(self):
+ vdur_index = 2
+ result = self.monitor._check_if_vdur_vim_info_exists({}, vdur_index)
+ self.assertEqual(result, None)
+
+ def test_check_if_vdur_vim_info_exists_str_vdur_index(self):
+ vdur_index = "2"
+ result = self.monitor._check_if_vdur_vim_info_exists({}, vdur_index)
+ self.assertEqual(result, None)
+
+ def test_check_if_vdur_vim_info_exists_none_vnfr(self):
+ vdur_index = 2
+ result = self.monitor._check_if_vdur_vim_info_exists(None, vdur_index)
+ self.assertEqual(result, None)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_db_paths")
+ @patch("osm_ng_ro.monitor.MonitorVms._check_if_vdur_vim_info_exists")
+ @patch("osm_ng_ro.monitor.deepcopy")
+ def test_get_vm_data_from_db(
+ self, mock_deepcopy, mock_vim_info_exists, mock_get_db_paths
+ ):
+ vim_id = vim4_id
+ vdur_index = 0
+ db_vnfr = sample_vnfr
+ mock_get_db_paths.return_value = (
+ vim_info_path,
+ vim_id,
+ vnfr_id,
+ vdur_path,
+ vdur_index,
+ db_vnfr,
+ )
+ mock_vim_info_exists.return_value = True
+ mock_deepcopy.return_value = sample_vim_info
+ (
+ vdur_path_result,
+ vdur_vim_info_update_result,
+ db_vnfr_result,
+ existing_vim_info_result,
+ vnfr_id_result,
+ vim_info_path_result,
+ ) = self.monitor._get_vm_data_from_db(sample_vm)
+ self.assertEqual(vdur_path_result, vdur_path)
+ self.assertEqual(vdur_vim_info_update_result, sample_vim_info)
+ self.assertEqual(db_vnfr_result, db_vnfr)
+ self.assertEqual(existing_vim_info_result, sample_vim_info)
+ self.assertEqual(vnfr_id_result, vnfr_id)
+ self.assertEqual(vim_info_path_result, vim_info_path)
+ mock_deepcopy.assert_called_once_with(sample_vim_info)
+ mock_get_db_paths.assert_called_once_with(target_record)
+ mock_vim_info_exists.assert_called_once_with(db_vnfr, vdur_index)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_db_paths")
+ @patch("osm_ng_ro.monitor.MonitorVms._check_if_vdur_vim_info_exists")
+ @patch("osm_ng_ro.monitor.deepcopy")
+ def test_get_vm_data_from_db_no_vim_info(
+ self, mock_deepcopy, mock_vim_info_exists, mock_get_db_paths
+ ):
+ vim_id = vim4_id
+ vdur_index = 0
+ db_vnfr = sample_vnfr
+ mock_get_db_paths.return_value = (
+ vim_info_path,
+ vim_id,
+ vnfr_id,
+ vdur_path,
+ vdur_index,
+ db_vnfr,
+ )
+ mock_vim_info_exists.return_value = False
+ result = self.monitor._get_vm_data_from_db(sample_vm)
+ self.assertEqual(result, None)
+ mock_deepcopy.assert_not_called()
+ mock_get_db_paths.assert_called_once_with(target_record)
+ mock_vim_info_exists.assert_called_once_with(db_vnfr, vdur_index)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_db_paths")
+ @patch("osm_ng_ro.monitor.MonitorVms._check_if_vdur_vim_info_exists")
+ @patch("osm_ng_ro.monitor.deepcopy")
+ def test_get_vm_data_from_db_get_db_path_raises(
+ self, mock_deepcopy, mock_vim_info_exists, mock_get_db_paths
+ ):
+ mock_get_db_paths.side_effect = DbException("Connection failed.")
+ with self.assertRaises(DbException) as err:
+ self.monitor._get_vm_data_from_db(sample_vm)
+ self.assertEqual(
+ str(err.exception.args[0]), "database exception Connection failed."
+ )
+ mock_get_db_paths.assert_called_once_with(target_record)
+ check_if_assert_not_called([mock_deepcopy, mock_vim_info_exists])
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_db_paths")
+ @patch("osm_ng_ro.monitor.MonitorVms._check_if_vdur_vim_info_exists")
+ @patch("osm_ng_ro.monitor.deepcopy")
+ def test_get_vm_data_from_db_vnfr_without_correct_vdur_index(
+ self, mock_deepcopy, mock_vim_info_exists, mock_get_db_paths
+ ):
+ vim_id = vim4_id
+ vdur_index = 2
+ db_vnfr = sample_vnfr
+ mock_get_db_paths.return_value = (
+ vim_info_path,
+ vim_id,
+ vnfr_id,
+ vdur_path,
+ vdur_index,
+ db_vnfr,
+ )
+ mock_vim_info_exists.return_value = True
+ with self.assertRaises(IndexError) as err:
+ self.monitor._get_vm_data_from_db(sample_vm)
+ self.assertEqual(str(err.exception.args[0]), "list index out of range")
+ mock_deepcopy.assert_not_called()
+ mock_get_db_paths.assert_called_once_with(target_record)
+ mock_vim_info_exists.assert_called_once_with(db_vnfr, vdur_index)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_db_paths")
+ @patch("osm_ng_ro.monitor.MonitorVms._check_if_vdur_vim_info_exists")
+ @patch("osm_ng_ro.monitor.deepcopy")
+ def test_get_vm_data_from_db_vnfr_without_proper_vim_id(
+ self, mock_deepcopy, mock_vim_info_exists, mock_get_db_paths
+ ):
+ vim_id = "5239ed93-756b-408e-89f8-fcbf47a9d8f7"
+ vdur_index = 0
+ db_vnfr = sample_vnfr
+ mock_get_db_paths.return_value = (
+ vim_info_path,
+ vim_id,
+ vnfr_id,
+ vdur_path,
+ vdur_index,
+ db_vnfr,
+ )
+ mock_vim_info_exists.return_value = True
+ self.monitor._get_vm_data_from_db(sample_vm)
+ mock_deepcopy.assert_not_called()
+ mock_get_db_paths.assert_called_once_with(target_record)
+ mock_vim_info_exists.assert_called_once_with(db_vnfr, vdur_index)
+
+ def test_update_vim_info_for_deleted_vm_empty_input_dict(self):
+ vdur_vim_info_update = {}
+ self.monitor.update_vim_info_for_deleted_vm(vdur_vim_info_update)
+ self.assertEqual(
+ vdur_vim_info_update,
+ deleted_externally,
+ )
+
+ def test_update_vim_info_for_deleted_vm_update_existing_info(self):
+ vdur_vim_info_update = {
+ "vim_status": "ACTIVE",
+ "vim_message": None,
+ "vim_id": vm1_id,
+ "vim_name": "test7-vnf-hackfest_basic-VM-000000",
+ }
+ self.monitor.update_vim_info_for_deleted_vm(vdur_vim_info_update)
+ self.assertEqual(
+ vdur_vim_info_update,
+ deleted_externally,
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_vm_data_from_db")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vim_info_for_deleted_vm")
+ @patch("osm_ng_ro.monitor.MonitorVms.backup_vdu_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_in_database")
+ def test_report_deleted_vdur_no_vm_data_in_db(
+ self,
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_update_vim_info_for_deleted_vm,
+ mock_get_vm_data_from_db,
+ ):
+ mock_get_vm_data_from_db.return_value = None
+ self.monitor.report_deleted_vdur(sample_vm)
+ self.assertEqual(mock_get_vm_data_from_db.call_count, 1)
+ check_if_assert_not_called(
+ [
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_update_vim_info_for_deleted_vm,
+ ]
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_vm_data_from_db")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vim_info_for_deleted_vm")
+ @patch("osm_ng_ro.monitor.MonitorVms.backup_vdu_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_in_database")
+ def test_report_deleted_vdur(
+ self,
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_update_vim_info_for_deleted_vm,
+ mock_get_vm_data_from_db,
+ ):
+ existing_vim_info = sample_vim_info
+ vdur_vim_info_update = deleted_externally
+ mock_get_vm_data_from_db.return_value = (
+ vdur_path,
+ vdur_vim_info_update,
+ None,
+ existing_vim_info,
+ vnfr_id,
+ vim_info_path,
+ )
+ vdur_update = {
+ vdur_path + ".status": "DELETED",
+ }
+ self.monitor.report_deleted_vdur(sample_vm)
+ self.assertEqual(mock_get_vm_data_from_db.call_count, 1)
+ mock_get_vm_data_from_db.assert_called_with(sample_vm)
+ mock_update_vim_info_for_deleted_vm.assert_called_once_with(
+ vdur_vim_info_update
+ )
+ mock_backup_vdu_interfaces.assert_called_once_with(vdur_vim_info_update)
+ mock_update_in_database.assert_called_once_with(
+ [vdur_update, {vim_info_path: vdur_vim_info_update}], vnfr_id
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_vm_data_from_db")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vim_info_for_deleted_vm")
+ @patch("osm_ng_ro.monitor.MonitorVms.backup_vdu_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_in_database")
+ def test_report_deleted_vdur_vm_db_already_updated(
+ self,
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_update_vim_info_for_deleted_vm,
+ mock_get_vm_data_from_db,
+ ):
+ vdur_vim_info_update = existing_vim_info = deleted_externally
+ mock_get_vm_data_from_db.return_value = (
+ vdur_path,
+ vdur_vim_info_update,
+ None,
+ existing_vim_info,
+ vnfr_id,
+ vim_info_path,
+ )
+ self.monitor.report_deleted_vdur(sample_vm)
+ self.assertEqual(mock_get_vm_data_from_db.call_count, 1)
+ mock_get_vm_data_from_db.assert_called_with(sample_vm)
+ mock_update_vim_info_for_deleted_vm.assert_called_once_with(
+ vdur_vim_info_update
+ )
+ check_if_assert_not_called(
+ [mock_backup_vdu_interfaces, mock_update_in_database]
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_vm_data_from_db")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vim_info_for_deleted_vm")
+ @patch("osm_ng_ro.monitor.MonitorVms.backup_vdu_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_in_database")
+ def test_report_deleted_vdur_get_vm_data_raises(
+ self,
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_update_vim_info_for_deleted_vm,
+ mock_get_vm_data_from_db,
+ ):
+ mock_get_vm_data_from_db.side_effect = IndexError("list index out of range.")
+ with self.assertRaises(IndexError) as err:
+ self.monitor.report_deleted_vdur(sample_vm)
+ self.assertEqual(str(err.exception.args[0]), "list index out of range.")
+ self.assertEqual(mock_get_vm_data_from_db.call_count, 1)
+ mock_get_vm_data_from_db.assert_called_with(sample_vm)
+ check_if_assert_not_called(
+ [
+ mock_update_vim_info_for_deleted_vm,
+ mock_backup_vdu_interfaces,
+ mock_update_in_database,
+ ]
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_vm_data_from_db")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vim_info_for_deleted_vm")
+ @patch("osm_ng_ro.monitor.MonitorVms.backup_vdu_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_in_database")
+ def test_report_deleted_vdur_update_in_database_raises(
+ self,
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_update_vim_info_for_deleted_vm,
+ mock_get_vm_data_from_db,
+ ):
+ existing_vim_info = sample_vim_info
+ vdur_vim_info_update = deleted_externally
+ mock_update_in_database.side_effect = MonitorDbException(
+ "Error while updating differences in VNFR."
+ )
+ mock_get_vm_data_from_db.return_value = (
+ vdur_path,
+ vdur_vim_info_update,
+ None,
+ existing_vim_info,
+ vnfr_id,
+ vim_info_path,
+ )
+ vdur_update = {
+ vdur_path + ".status": "DELETED",
+ }
+ with self.assertRaises(MonitorDbException) as err:
+ self.monitor.report_deleted_vdur(sample_vm)
+ self.assertEqual(
+ str(err.exception.args[0]), "Error while updating differences in VNFR."
+ )
+ self.assertEqual(mock_get_vm_data_from_db.call_count, 1)
+ mock_get_vm_data_from_db.assert_called_with(sample_vm)
+ mock_update_vim_info_for_deleted_vm.assert_called_once_with(
+ vdur_vim_info_update
+ )
+ mock_backup_vdu_interfaces.assert_called_once_with(vdur_vim_info_update)
+ mock_update_in_database.assert_called_once_with(
+ [vdur_update, {vim_info_path: vdur_vim_info_update}], vnfr_id
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms.report_vdur_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.report_deleted_vdur")
+ def test_update_vnfrs(self, mock_report_deleted_vdur, mock_report_vdur_updates):
+ vms_to_monitor = [sample_vm, sample_vm2, sample_vm3]
+ servers = [server1, server2, server3, server4]
+ ports = {"ports": [port1, port2]}
+ self.monitor.update_vnfrs(servers, ports, vms_to_monitor)
+ self.assertEqual(mock_report_vdur_updates.call_count, 2)
+ mock_report_deleted_vdur.assert_called_once_with(sample_vm3)
+ _call_mock_report_vdur_updates = mock_report_vdur_updates.call_args_list
+ self.assertEqual(
+ _call_mock_report_vdur_updates[0].args,
+ (server1, sample_vm, ports),
+ )
+ self.assertEqual(
+ _call_mock_report_vdur_updates[1].args,
+ (server2, sample_vm2, ports),
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms.report_vdur_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.report_deleted_vdur")
+ def test_update_vnfrs_empty_vms_to_monitor(
+ self, mock_report_deleted_vdur, mock_report_vdur_updates
+ ):
+ vms_to_monitor = []
+ servers = [server1, server2, server3, server4]
+ ports = {"ports": [port1, port2]}
+ self.monitor.update_vnfrs(servers, ports, vms_to_monitor)
+ check_if_assert_not_called([mock_report_deleted_vdur, mock_report_vdur_updates])
+
+ @patch("osm_ng_ro.monitor.MonitorVms.report_vdur_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.report_deleted_vdur")
+ def test_update_vnfrs_empty_servers(
+ self, mock_report_deleted_vdur, mock_report_vdur_updates
+ ):
+ vms_to_monitor = [sample_vm, sample_vm2, sample_vm3]
+ servers = []
+ ports = {"ports": [port1, port2]}
+ self.monitor.update_vnfrs(servers, ports, vms_to_monitor)
+ mock_report_vdur_updates.assert_not_called()
+ self.assertEqual(mock_report_deleted_vdur.call_count, 3)
+ _call_mock_report_deleted_vdur = mock_report_deleted_vdur.call_args_list
+ self.assertEqual(
+ _call_mock_report_deleted_vdur[0].args[0],
+ (sample_vm),
+ )
+ self.assertEqual(
+ _call_mock_report_deleted_vdur[1].args[0],
+ (sample_vm2),
+ )
+ self.assertEqual(
+ _call_mock_report_deleted_vdur[2].args[0],
+ (sample_vm3),
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms.report_vdur_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.report_deleted_vdur")
+ def test_update_vnfrs_report_vdur_updates_raises(
+ self, mock_report_deleted_vdur, mock_report_vdur_updates
+ ):
+ vms_to_monitor = [sample_vm, sample_vm2, sample_vm3]
+ servers = [server1, server2, server3, server4]
+ ports = {"ports": [port1, port2]}
+ mock_report_vdur_updates.side_effect = IndexError("list index out of range.")
+ with self.assertRaises(IndexError) as err:
+ self.monitor.update_vnfrs(servers, ports, vms_to_monitor)
+ self.assertEqual(str(err.exception.args[0]), "list index out of range.")
+ self.assertEqual(mock_report_vdur_updates.call_count, 1)
+ mock_report_deleted_vdur.assert_not_called()
+ _call_mock_report_vdur_updates = mock_report_vdur_updates.call_args_list
+ self.assertEqual(
+ _call_mock_report_vdur_updates[0].args,
+ (server1, sample_vm, ports),
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms.report_vdur_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.report_deleted_vdur")
+ def test_update_vnfrs_report_deleted_vdur_raises(
+ self, mock_report_deleted_vdur, mock_report_vdur_updates
+ ):
+ vms_to_monitor = [sample_vm, sample_vm2, sample_vm3]
+ servers = [server1, server2, server3, server4]
+ ports = {"ports": [port1, port2]}
+ mock_report_deleted_vdur.side_effect = DbException("DB is not in active state.")
+ with self.assertRaises(DbException) as err:
+ self.monitor.update_vnfrs(servers, ports, vms_to_monitor)
+ self.assertEqual(
+ str(err.exception.args[0]), "database exception DB is not in active state."
+ )
+ self.assertEqual(mock_report_vdur_updates.call_count, 2)
+ mock_report_deleted_vdur.assert_called_once_with(sample_vm3)
+ _call_mock_report_vdur_updates = mock_report_vdur_updates.call_args_list
+ self.assertEqual(
+ _call_mock_report_vdur_updates[0].args,
+ (server1, sample_vm, ports),
+ )
+ self.assertEqual(
+ _call_mock_report_vdur_updates[1].args,
+ (server2, sample_vm2, ports),
+ )
+
+ @patch("osm_ng_ro.monitor.yaml")
+ def test_serialize_string_value(self, mock_yaml):
+ value = "some string"
+ result = self.monitor.serialize(value)
+ mock_yaml.dump.assert_not_called()
+ self.assertEqual(result, value)
+
+ @patch("osm_ng_ro.monitor.yaml")
+ def test_serialize_list_value(self, mock_yaml):
+ value = [
+ {"version": 3.4},
+ ["image", "ghcr.io/foo/mysvc"],
+ {"MYSVC_ENV": "to_nice_yaml"},
+ ]
+ output = [
+ {"version": 3.4},
+ ["image", "ghcr.io/foo/mysvc"],
+ {"MYSVC_ENV": "to_nice_yaml"},
+ ]
+ mock_yaml.dump.return_value = output
+ result = self.monitor.serialize(value)
+ mock_yaml.dump.assert_called_once()
+ self.assertEqual(result, output)
+
+ @patch("osm_ng_ro.monitor.yaml")
+ def test_serialize_dict_value(self, mock_yaml):
+ value = {
+ "version": 3.4,
+ "MYSVC_ENV": "to_nice_yaml_to_nice_yaml_to_nice_yaml_to_nice_yaml_to_nice_yaml",
+ }
+ output = {
+ "MYSVC_ENV": "to_nice_yaml_to_nice_yaml_to_nice_yaml_to_nice_yaml_to_nice_yaml",
+ "version": 3.4,
+ }
+ mock_yaml.dump.return_value = output
+ result = self.monitor.serialize(value)
+ mock_yaml.dump.assert_called_once()
+ self.assertEqual(result, output)
+
+ @patch("osm_ng_ro.monitor.yaml")
+ def test_serialize_raise_representer_error(self, mock_yaml):
+ value = {
+ "name": {"firstname": str, "lastname": str},
+ "age": int,
+ }
+ mock_yaml.dump.side_effect = yaml.representer.RepresenterError(
+ "cannot represent an object"
+ )
+ result = self.monitor.serialize(value)
+ mock_yaml.dump.assert_called_once()
+ self.assertEqual(result, str(value))
+
+ @patch("osm_ng_ro.monitor.yaml")
+ def test_serialize_raise_yaml_error(self, mock_yaml):
+ value = {
+ "name": {"firstname": str, "lastname": str},
+ "age": int,
+ }
+
+ mock_yaml.dump.side_effect = yaml.YAMLError("cannot represent an object.")
+ with self.assertRaises(yaml.YAMLError) as err:
+ result = self.monitor.serialize(value)
+ self.assertEqual(result, None)
+ self.assertEqual(str(err.exception.args[0]), "cannot represent an object.")
+ mock_yaml.dump.assert_called_once()
+
+ @patch("osm_ng_ro.monitor.MonitorVms.serialize")
+ def test_get_server_info_with_user_data(self, mock_serialize):
+ all_server_info = deepcopy(server_other_info)
+ user_data = {
+ "OS-EXT-SRV-ATTR:user_data": "EXT-USER-DATA",
+ "user_data": "some-data",
+ }
+ mock_serialize.return_value = serialized_server_info
+ all_server_info.update(user_data)
+ server5 = create_server(vm1_id, "server5", info=all_server_info)
+ result = self.monitor._get_server_info(server5)
+ self.assertEqual(result, serialized_server_info)
+ mock_serialize.assert_called_once_with(server_other_info)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.serialize")
+ def test_get_server_info_without_user_data(self, mock_serialize):
+ mock_serialize.return_value = serialized_server_info
+ server5 = create_server(vm1_id, "server5", info=server_other_info)
+ result = self.monitor._get_server_info(server5)
+ self.assertEqual(result, serialized_server_info)
+ mock_serialize.assert_called_once_with(server_other_info)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.serialize")
+ def test_get_server_info_empty_server_info(self, mock_serialize):
+ server_other_info = {}
+ expected_result = {}
+ mock_serialize.return_value = expected_result
+ server5 = create_server(vm1_id, "server5", info=server_other_info)
+ result = self.monitor._get_server_info(server5)
+ self.assertEqual(result, expected_result)
+ mock_serialize.assert_called_once_with(server_other_info)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.serialize")
+ def test_get_server_info_serialize_raises(self, mock_serialize):
+ server_other_info = {
+ "admin_state_up": "true",
+ "binding:host_id": int,
+ "binding:profile": {},
+ "binding:vif_type": str,
+ "binding:vnic_type": "normal",
+ "created_at": "2023-02-22T05:35:46Z",
+ }
+ mock_serialize.side_effect = yaml.YAMLError("cannot represent an object.")
+ server5 = create_server(vm1_id, "server5", info=server_other_info)
+ with self.assertRaises(yaml.YAMLError) as err:
+ self.monitor._get_server_info(server5)
+ self.assertEqual(str(err.exception.args[0]), "cannot represent an object.")
+ mock_serialize.assert_called_once_with(server_other_info)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_server_info")
+ def test_check_vm_status_updates_server_status_ok(self, mock_server_info):
+ server6 = create_server("server6-id", "server6", status="PAUSED")
+ mock_server_info.return_value = serialized_server_info
+ vdur_vim_info_update = {}
+ vdur_update = {}
+ expected_vdur_vim_info_update = {
+ "vim_status": "PAUSED",
+ "vim_details": serialized_server_info,
+ "vim_id": server6.id,
+ "vim_name": server6.name,
+ }
+ expected_vdur_update = {
+ "vdur.0.status": "PAUSED",
+ "vdur.0.name": server6.name,
+ }
+ self.monitor.check_vm_status_updates(
+ vdur_vim_info_update, vdur_update, server6, vdur_path
+ )
+ self.assertDictEqual(vdur_vim_info_update, expected_vdur_vim_info_update)
+ self.assertDictEqual(vdur_update, expected_vdur_update)
+ mock_server_info.assert_called_once_with(server6)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_server_info")
+ def test_check_vm_status_updates_server_status_nok(self, mock_server_info):
+ server8 = create_server("server8-id", "server8", status="FAILED")
+ mock_server_info.return_value = serialized_server_info
+ vdur_vim_info_update = {}
+ vdur_update = {}
+ expected_vdur_vim_info_update = {
+ "vim_status": "FAILED",
+ "vim_details": serialized_server_info,
+ "vim_id": server8.id,
+ "vim_name": server8.name,
+ "vim_message": "VIM status reported FAILED",
+ }
+ expected_vdur_update = {
+ "vdur.0.status": "FAILED",
+ "vdur.0.name": server8.name,
+ }
+ self.monitor.check_vm_status_updates(
+ vdur_vim_info_update, vdur_update, server8, vdur_path
+ )
+ self.assertDictEqual(vdur_vim_info_update, expected_vdur_vim_info_update)
+ self.assertDictEqual(vdur_update, expected_vdur_update)
+ mock_server_info.assert_called_once_with(server8)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_server_info")
+ def test_check_vm_status_updates_get_server_info_raises(self, mock_server_info):
+ server8 = create_server("server8-id", "server8", status="FAILED")
+ mock_server_info.side_effect = yaml.YAMLError("Cannot represent an object.")
+ vdur_vim_info_update = {}
+ vdur_update = {}
+ expected_vdur_vim_info_update = {
+ "vim_status": "FAILED",
+ "vim_message": "VIM status reported FAILED",
+ }
+ expected_vdur_update = {
+ "vdur.0.status": "FAILED",
+ }
+ with self.assertRaises(yaml.YAMLError) as err:
+ self.monitor.check_vm_status_updates(
+ vdur_vim_info_update, vdur_update, server8, vdur_path
+ )
+ self.assertEqual(str(err.exception.args[0]), "Cannot represent an object.")
+ self.assertDictEqual(vdur_vim_info_update, expected_vdur_vim_info_update)
+ self.assertDictEqual(vdur_update, expected_vdur_update)
+ mock_server_info.assert_called_once_with(server8)
+
+ def test_get_interface_info(self):
+ interface = {"vim_interface_id": "4d081f50-e13a-4306-a67e-1edb28d76013"}
+ ports = {"ports": [port1, port2]}
+ result = self.monitor.get_interface_info(ports, interface, server1)
+ self.assertEqual(result, port1)
+
+ def test_get_interface_info_port_id_mismatch(self):
+ interface = {"vim_interface_id": "4d081f50-e13a-4306-a67e-1edb28d76013"}
+ ports = {"ports": [port2]}
+ result = self.monitor.get_interface_info(ports, interface, server1)
+ self.assertEqual(result, None)
+
+ def test_get_interface_info_device_id_mismatch(self):
+ interface = {"vim_interface_id": "4d081f50-e13a-4306-a67e-1edb28d76013"}
+ ports = {"ports": [port1, port2]}
+ result = self.monitor.get_interface_info(ports, interface, server2)
+ self.assertEqual(result, None)
+
+ def test_get_interface_info_empty_ports(self):
+ interface = {"vim_interface_id": "4d081f50-e13a-4306-a67e-1edb28d76013"}
+ ports = {"ports": []}
+ result = self.monitor.get_interface_info(ports, interface, server2)
+ self.assertEqual(result, None)
+
+ def test_check_vlan_pci_update(self):
+ interface_info = interface_with_binding
+ index = 1
+ vdur_vim_info_update = {"interfaces": [{}, {}]}
+ expected_vdur_vim_info_update = {
+ "interfaces": [{}, {"pci": "0000:86:17.4", "vlan": 400}]
+ }
+ self.monitor.check_vlan_pci_updates(interface_info, index, vdur_vim_info_update)
+ self.assertDictEqual(vdur_vim_info_update, expected_vdur_vim_info_update)
+
+ def test_check_vlan_pci_update_empty_interface_info(self):
+ interface_info = {}
+ index = 1
+ vdur_vim_info_update = {"interfaces": [{}, {}]}
+ expected_vdur_vim_info_update = {"interfaces": [{}, {}]}
+ self.monitor.check_vlan_pci_updates(interface_info, index, vdur_vim_info_update)
+ self.assertDictEqual(vdur_vim_info_update, expected_vdur_vim_info_update)
+
+ def test_check_vlan_pci_update_index_out_of_range(self):
+ interface_info = interface_with_binding
+ index = 3
+ vdur_vim_info_update = {"interfaces": [{}]}
+ expected_vdur_vim_info_update = {"interfaces": [{}]}
+ with self.assertRaises(IndexError) as err:
+ self.monitor.check_vlan_pci_updates(
+ interface_info, index, vdur_vim_info_update
+ )
+ self.assertEqual(str(err.exception.args[0]), "list index out of range")
+ self.assertEqual(vdur_vim_info_update, expected_vdur_vim_info_update)
+
+ def test_check_vlan_pci_update_empty_vdur_vim_info_update(self):
+ interface_info = interface_with_binding
+ index = 0
+ vdur_vim_info_update = {}
+ expected_vdur_vim_info_update = {}
+ with self.assertRaises(KeyError) as err:
+ self.monitor.check_vlan_pci_updates(
+ interface_info, index, vdur_vim_info_update
+ )
+ self.assertEqual(str(err.exception.args[0]), "interfaces")
+ self.assertEqual(vdur_vim_info_update, expected_vdur_vim_info_update)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_current_ip_address")
+ def test_check_vdur_interface_updates(self, mock_get_current_ip_address):
+ vdur_update, vnfr_update = {}, {}
+ index = 0
+ interface_info = {
+ "fixed_ips": [{"ip_address": ip1_addr}],
+ "mac_address": mac1_addr,
+ }
+ mock_get_current_ip_address.return_value = ip1_addr
+ expected_vdur_update = {
+ "vdur.0.interfaces.0.ip-address": ip1_addr,
+ "vdur.0.ip-address": ip1_addr,
+ "vdur.0.interfaces.0.mac-address": mac1_addr,
+ }
+ expected_vnfr_update = {
+ "35c034cc-8c5b-48c4-bfa2-17a71577ef19.ip-address": ip1_addr
+ }
+ self.monitor.check_vdur_interface_updates(
+ vdur_update,
+ vdur_path,
+ index,
+ interface_info,
+ old_interface2,
+ vnfr_update,
+ vnfr_id,
+ )
+ self.assertEqual(vnfr_update, expected_vnfr_update)
+ self.assertEqual(vdur_update, expected_vdur_update)
+ mock_get_current_ip_address.assert_called_once_with(interface_info)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_current_ip_address")
+ def test_check_vdur_interface_updates_not_mgmt_interface(
+ self, mock_get_current_ip_address
+ ):
+ vdur_update, vnfr_update = {}, {}
+ index = 0
+ interface_info = {
+ "fixed_ips": [{"ip_address": ip1_addr}],
+ "mac_address": mac1_addr,
+ }
+ mock_get_current_ip_address.return_value = ip1_addr
+ old_interface = {}
+ expected_vdur_update = {
+ "vdur.0.interfaces.0.ip-address": ip1_addr,
+ "vdur.0.interfaces.0.mac-address": mac1_addr,
+ }
+ self.monitor.check_vdur_interface_updates(
+ vdur_update,
+ vdur_path,
+ index,
+ interface_info,
+ old_interface,
+ vnfr_update,
+ vnfr_id,
+ )
+ self.assertEqual(vnfr_update, {})
+ self.assertEqual(vdur_update, expected_vdur_update)
+ mock_get_current_ip_address.assert_called_once_with(interface_info)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_current_ip_address")
+ def test_check_vdur_interface_updates_without_mac_address(
+ self, mock_get_current_ip_address
+ ):
+ vdur_update, vnfr_update = {}, {}
+ index = 0
+ interface_info = {"fixed_ips": [{"ip_address": ip1_addr}]}
+ mock_get_current_ip_address.return_value = ip1_addr
+ expected_vdur_update = {
+ "vdur.0.interfaces.0.ip-address": ip1_addr,
+ "vdur.0.ip-address": ip1_addr,
+ "vdur.0.interfaces.0.mac-address": None,
+ }
+ expected_vnfr_update = {
+ "35c034cc-8c5b-48c4-bfa2-17a71577ef19.ip-address": ip1_addr
+ }
+ self.monitor.check_vdur_interface_updates(
+ vdur_update,
+ vdur_path,
+ index,
+ interface_info,
+ old_interface2,
+ vnfr_update,
+ vnfr_id,
+ )
+ self.assertEqual(vnfr_update, expected_vnfr_update)
+ self.assertEqual(vdur_update, expected_vdur_update)
+ mock_get_current_ip_address.assert_called_once_with(interface_info)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_current_ip_address")
+ def test_check_vdur_interface_updates_without_ip_address(
+ self, mock_get_current_ip_address
+ ):
+ vdur_update, vnfr_update = {}, {}
+ index = 0
+ interface_info = {"fixed_ips": [], "mac_address": mac1_addr}
+ mock_get_current_ip_address.return_value = None
+ expected_vdur_update = {
+ "vdur.0.interfaces.0.mac-address": mac1_addr,
+ }
+ expected_vnfr_update = {}
+ self.monitor.check_vdur_interface_updates(
+ vdur_update,
+ vdur_path,
+ index,
+ interface_info,
+ old_interface2,
+ vnfr_update,
+ vnfr_id,
+ )
+ self.assertEqual(vnfr_update, expected_vnfr_update)
+ self.assertEqual(vdur_update, expected_vdur_update)
+ mock_get_current_ip_address.assert_called_once_with(interface_info)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_current_ip_address")
+ def test_check_vdur_interface_updates_wrong_interface_info_format(
+ self, mock_get_current_ip_address
+ ):
+ vdur_update, vnfr_update = {}, {}
+ index = 0
+ interface_info = {"fixed_ips": ip1_addr, "mac_address": mac1_addr}
+ mock_get_current_ip_address.side_effect = TypeError(
+ "str is not list like object."
+ )
+ old_interface = {}
+ with self.assertRaises(TypeError) as err:
+ self.monitor.check_vdur_interface_updates(
+ vdur_update,
+ vdur_path,
+ index,
+ interface_info,
+ old_interface,
+ vnfr_update,
+ vnfr_id,
+ )
+ self.assertEqual(str(err.exception), "str is not list like object.")
+ self.assertEqual(vnfr_update, {})
+ self.assertEqual(vdur_update, {})
+ mock_get_current_ip_address.assert_called_once_with(interface_info)
+
+ def test_get_current_ip_address(self):
+ interface_info = {
+ "fixed_ips": [{"ip_address": ip1_addr}],
+ "mac_address": mac1_addr,
+ }
+ result = self.monitor._get_current_ip_address(interface_info)
+ self.assertEqual(result, ip1_addr)
+
+ def test_get_current_ip_address_no_ip(self):
+ interface_info = {"fixed_ips": [{}], "mac_address": mac1_addr}
+ result = self.monitor._get_current_ip_address(interface_info)
+ self.assertEqual(result, None)
+
+ def test_backup_vdu_interfaces_without_vim_message(self):
+ vdur_vim_info_update = {
+ "interfaces": {"mac_address": mac1_addr},
+ }
+ expected_vdur_vim_info_update = {
+ "interfaces": {"mac_address": mac1_addr},
+ "interfaces_backup": {"mac_address": mac1_addr},
+ }
+ self.monitor.backup_vdu_interfaces(vdur_vim_info_update)
+ self.assertDictEqual(expected_vdur_vim_info_update, vdur_vim_info_update)
+
+ def test_backup_vdu_interfaces_with_vim_message(self):
+ vdur_vim_info_update = {
+ "interfaces": {"mac_address": mac1_addr},
+ "vim_message": "Deleted Externally",
+ }
+ expected_vdur_vim_info_update = {
+ "interfaces": {"mac_address": mac1_addr},
+ "vim_message": "Deleted Externally",
+ }
+ self.monitor.backup_vdu_interfaces(vdur_vim_info_update)
+ self.assertDictEqual(expected_vdur_vim_info_update, vdur_vim_info_update)
+
+ def test_backup_vdu_interfaces_with_empty_interfaces(self):
+ vdur_vim_info_update = {
+ "interfaces": {},
+ }
+ expected_vdur_vim_info_update = {
+ "interfaces": {},
+ }
+ self.monitor.backup_vdu_interfaces(vdur_vim_info_update)
+ self.assertDictEqual(expected_vdur_vim_info_update, vdur_vim_info_update)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.serialize")
+ def test_update_vdur_vim_info_interfaces(self, mock_serialize):
+ index = 1
+ vdur_vim_info_update = {
+ "interfaces": [{}, {"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ all_server_info = deepcopy(server_other_info)
+ host_data = {"OS-EXT-SRV-ATTR:host": "nova"}
+ mock_serialize.return_value = serialized_interface_info
+ all_server_info.update(host_data)
+ server7 = create_server(vm1_id, "server7", info=all_server_info)
+ expected_vdur_vim_info_update = {
+ "interfaces": [
+ {},
+ {
+ "mac_address": mac2_addr,
+ "compute_node": "nova",
+ "vim_info": serialized_interface_info,
+ "vim_net_id": net1_id,
+ "ip_address": ip1_addr,
+ },
+ ]
+ }
+ self.monitor.update_vdur_vim_info_interfaces(
+ vdur_vim_info_update, index, interface_info2, server7
+ )
+ self.assertDictEqual(vdur_vim_info_update, expected_vdur_vim_info_update)
+ mock_serialize.assert_called_once_with(interface_info2)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.serialize")
+ def test_update_vdur_vim_info_interfaces_serialize_raises(self, mock_serialize):
+ index = 1
+ vdur_vim_info_update = {
+ "interfaces": [{}, {"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ all_server_info = deepcopy(server_other_info)
+ host_data = {"OS-EXT-SRV-ATTR:host": "nova"}
+ mock_serialize.side_effect = yaml.YAMLError("Cannot represent an object.")
+ all_server_info.update(host_data)
+ server7 = create_server(vm1_id, "server7", info=all_server_info)
+ expected_vdur_vim_info = deepcopy(vdur_vim_info_update)
+ with self.assertRaises(yaml.YAMLError) as err:
+ self.monitor.update_vdur_vim_info_interfaces(
+ vdur_vim_info_update, index, interface_info2, server7
+ )
+ self.assertEqual(str(err.exception), "Cannot represent an object.")
+ self.assertDictEqual(vdur_vim_info_update, expected_vdur_vim_info)
+ mock_serialize.assert_called_once_with(interface_info2)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.serialize")
+ def test_update_vdur_vim_info_interfaces_empty_interface_info(self, mock_serialize):
+ index = 1
+ vdur_vim_info_update = {
+ "interfaces": [{}, {"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ interface_info = {}
+ all_server_info = deepcopy(server_other_info)
+ host_data = {"OS-EXT-SRV-ATTR:host": "nova"}
+ all_server_info.update(host_data)
+ server7 = create_server(vm1_id, "server7", info=all_server_info)
+ expected_vdur_vim_info = deepcopy(vdur_vim_info_update)
+ with self.assertRaises(KeyError) as err:
+ self.monitor.update_vdur_vim_info_interfaces(
+ vdur_vim_info_update, index, interface_info, server7
+ )
+ self.assertEqual(str(err.exception.args[0]), "mac_address")
+ self.assertDictEqual(vdur_vim_info_update, expected_vdur_vim_info)
+ mock_serialize.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.MonitorVms.serialize")
+ def test_update_vdur_vim_info_interfaces_invalid_vdur_vim_info(
+ self, mock_serialize
+ ):
+ index = 1
+ vdur_vim_info_update = {
+ "interfaces": [{"mac_address": mac1_addr, "compute_node": "host1"}, {}]
+ }
+ expected_vdur_vim_info = deepcopy(vdur_vim_info_update)
+ with self.assertRaises(MonitorVmsException) as err:
+ self.monitor.update_vdur_vim_info_interfaces(
+ vdur_vim_info_update, index, interface_info2, server7
+ )
+ self.assertEqual(
+ str(err.exception.args[0]), "Existing interfaces info could not found."
+ )
+ self.assertDictEqual(vdur_vim_info_update, expected_vdur_vim_info)
+ mock_serialize.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vdur_vim_info_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vlan_pci_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vdur_interface_updates")
+ def test_prepare_interface_updates(
+ self,
+ mock_check_vdur_interface_updates,
+ mock_check_vlan_pci_updates,
+ mock_update_vdur_vim_info_interfaces,
+ ):
+ vdur_vim_info_update = {
+ "interfaces": [{"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ interface_info = {
+ "fixed_ips": [{"ip_address": ip1_addr}],
+ "mac_address": mac2_addr,
+ "network_id": net1_id,
+ }
+ old_interface = {
+ "mgmt_vdu_interface": True,
+ "mgmt_vnf_interface": True,
+ }
+ index = 0
+ vnfr_update, vdur_update = {}, {}
+ self.monitor.prepare_interface_updates(
+ vdur_vim_info_update,
+ index,
+ interface_info,
+ server7,
+ vdur_path,
+ vnfr_update,
+ old_interface2,
+ vdur_update,
+ vnfr_id,
+ )
+ mock_update_vdur_vim_info_interfaces.assert_called_once_with(
+ vdur_vim_info_update, index, interface_info, server7
+ )
+ mock_check_vlan_pci_updates.assert_called_once_with(
+ interface_info, index, vdur_vim_info_update
+ )
+ mock_check_vdur_interface_updates.assert_called_once_with(
+ vdur_update,
+ vdur_path,
+ index,
+ interface_info,
+ old_interface,
+ vnfr_update,
+ vnfr_id,
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vdur_vim_info_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vlan_pci_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vdur_interface_updates")
+ def test_prepare_interface_updates_update_vdur_vim_info_interfaces_raises(
+ self,
+ mock_check_vdur_interface_updates,
+ mock_check_vlan_pci_updates,
+ mock_update_vdur_vim_info_interfaces,
+ ):
+ vdur_vim_info_update = {
+ "interfaces": [{"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ index = 0
+ vnfr_update, vdur_update = {}, {}
+ mock_update_vdur_vim_info_interfaces.side_effect = MonitorVmsException(
+ "Existing interfaces info could not found."
+ )
+ with self.assertRaises(MonitorVmsException) as err:
+ self.monitor.prepare_interface_updates(
+ vdur_vim_info_update,
+ index,
+ interface_info2,
+ server7,
+ vdur_path,
+ vnfr_update,
+ old_interface2,
+ vdur_update,
+ vnfr_id,
+ )
+ self.assertEqual(
+ str(err.exception.args[0]), "Existing interfaces info could not found."
+ )
+ mock_update_vdur_vim_info_interfaces.assert_called_once_with(
+ vdur_vim_info_update, index, interface_info2, server7
+ )
+ check_if_assert_not_called(
+ [mock_check_vlan_pci_updates, mock_check_vdur_interface_updates]
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vdur_vim_info_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vlan_pci_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vdur_interface_updates")
+ def test_prepare_interface_updates_check_vlan_pci_updates_raises(
+ self,
+ mock_check_vdur_interface_updates,
+ mock_check_vlan_pci_updates,
+ mock_update_vdur_vim_info_interfaces,
+ ):
+ vdur_vim_info_update = {
+ "interfaces": [{"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ index = 0
+ vnfr_update, vdur_update = {}, {}
+ mock_check_vlan_pci_updates.side_effect = KeyError("vlan is not found.")
+ with self.assertRaises(KeyError) as err:
+ self.monitor.prepare_interface_updates(
+ vdur_vim_info_update,
+ index,
+ interface_info2,
+ server7,
+ vdur_path,
+ vnfr_update,
+ old_interface2,
+ vdur_update,
+ vnfr_id,
+ )
+ self.assertEqual(str(err.exception.args[0]), "vlan is not found.")
+ mock_update_vdur_vim_info_interfaces.assert_called_once_with(
+ vdur_vim_info_update, index, interface_info2, server7
+ )
+ mock_check_vlan_pci_updates.assert_called_once_with(
+ interface_info2, index, vdur_vim_info_update
+ )
+ mock_check_vdur_interface_updates.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.MonitorVms.get_interface_info")
+ @patch("osm_ng_ro.monitor.MonitorVms.prepare_interface_updates")
+ def test_check_vm_interface_updates(
+ self, mock_prepare_interface_updates, mock_get_interface_info
+ ):
+ vdur_vim_info_update = {
+ "interfaces": [{"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ index = 0
+ interface_info = {
+ "fixed_ips": [{"ip_address": ip1_addr}],
+ "mac_address": mac2_addr,
+ "network_id": net1_id,
+ "status": "ACTIVE",
+ }
+ vnfr_update, vdur_update = {}, {}
+ ports = {"ports": [port1, port2]}
+ existing_vim_info = sample_vim_info
+ mock_get_interface_info.return_value = interface_info
+ self.monitor.check_vm_interface_updates(
+ server7,
+ existing_vim_info,
+ ports,
+ vdur_vim_info_update,
+ vdur_update,
+ vdur_path,
+ vnfr_update,
+ vnfr_id,
+ )
+ mock_get_interface_info.assert_called_once_with(ports, old_interface, server7)
+ mock_prepare_interface_updates.assert_called_once_with(
+ vdur_vim_info_update,
+ index,
+ interface_info,
+ server7,
+ vdur_path,
+ vnfr_update,
+ old_interface,
+ vdur_update,
+ vnfr_id,
+ )
+ self.assertNotIn("vim_message", vdur_vim_info_update)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.get_interface_info")
+ @patch("osm_ng_ro.monitor.MonitorVms.prepare_interface_updates")
+ def test_check_vm_interface_updates_interface_new_status_is_nok(
+ self, mock_prepare_interface_updates, mock_get_interface_info
+ ):
+ vdur_vim_info_update = {
+ "interfaces": [{"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ interface_info = {
+ "fixed_ips": [{"ip_address": ip1_addr}],
+ "mac_address": mac2_addr,
+ "network_id": net1_id,
+ "status": "DOWN",
+ }
+ vnfr_update, vdur_update = {}, {}
+ ports = {"ports": [port1, port2]}
+ existing_vim_info = sample_vim_info
+ mock_get_interface_info.return_value = interface_info
+ self.monitor.check_vm_interface_updates(
+ server7,
+ existing_vim_info,
+ ports,
+ vdur_vim_info_update,
+ vdur_update,
+ vdur_path,
+ vnfr_update,
+ vnfr_id,
+ )
+ mock_get_interface_info.assert_called_once_with(ports, old_interface, server7)
+ mock_prepare_interface_updates.assert_not_called()
+ self.assertEqual(
+ vdur_vim_info_update["vim_message"],
+ "Interface 4d081f50-e13a-4306-a67e-1edb28d76013 status: DOWN",
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms.get_interface_info")
+ @patch("osm_ng_ro.monitor.MonitorVms.prepare_interface_updates")
+ def test_check_vm_interface_updates_no_new_interface_info(
+ self, mock_prepare_interface_updates, mock_get_interface_info
+ ):
+ vdur_vim_info_update = {
+ "interfaces": [{"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ vnfr_update, vdur_update = {}, {}
+ ports = {"ports": [port1, port2]}
+ existing_vim_info = sample_vim_info
+ mock_get_interface_info.return_value = None
+ self.monitor.check_vm_interface_updates(
+ server7,
+ existing_vim_info,
+ ports,
+ vdur_vim_info_update,
+ vdur_update,
+ vdur_path,
+ vnfr_update,
+ vnfr_id,
+ )
+ mock_get_interface_info.assert_called_once_with(ports, old_interface, server7)
+ mock_prepare_interface_updates.assert_not_called()
+ self.assertEqual(
+ vdur_vim_info_update["vim_message"],
+ "Interface 4d081f50-e13a-4306-a67e-1edb28d76013 deleted externally.",
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms.get_interface_info")
+ @patch("osm_ng_ro.monitor.MonitorVms.prepare_interface_updates")
+ def test_check_vm_interface_updates_no_existing_interface(
+ self, mock_prepare_interface_updates, mock_get_interface_info
+ ):
+ vdur_vim_info_update = {
+ "interfaces": [{"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ interface_info = {
+ "fixed_ips": [{"ip_address": ip1_addr}],
+ "mac_address": mac2_addr,
+ "network_id": net1_id,
+ "status": "ACTIVE",
+ }
+ vnfr_update, vdur_update = {}, {}
+ ports = {"ports": [port1, port2]}
+ updated_sample_vim_info = deepcopy(sample_vim_info)
+ updated_sample_vim_info["interfaces"] = []
+ existing_vim_info = updated_sample_vim_info
+ mock_get_interface_info.return_value = interface_info
+ self.monitor.check_vm_interface_updates(
+ server7,
+ existing_vim_info,
+ ports,
+ vdur_vim_info_update,
+ vdur_update,
+ vdur_path,
+ vnfr_update,
+ vnfr_id,
+ )
+ check_if_assert_not_called(
+ [mock_get_interface_info, mock_prepare_interface_updates]
+ )
+ self.assertNotIn("vim_message", vdur_vim_info_update)
+
+ def test_update_in_database(self):
+ all_updates = [{"some-key": "some-value"}, {"other-key": "other-value"}]
+ self.monitor.update_in_database(all_updates, vnfr_id)
+ self.assertEqual(self.monitor.db.set_list.call_count, 2)
+ _call_mock_set_list = self.monitor.db.set_list.call_args_list
+ self.assertEqual(
+ _call_mock_set_list[0][0],
+ ("vnfrs",),
+ )
+ self.assertEqual(
+ _call_mock_set_list[0][1],
+ (
+ {
+ "q_filter": {"_id": vnfr_id},
+ "update_dict": {"some-key": "some-value"},
+ }
+ ),
+ )
+ self.assertEqual(
+ _call_mock_set_list[1][0],
+ ("vnfrs",),
+ )
+ self.assertEqual(
+ _call_mock_set_list[1][1],
+ (
+ {
+ "q_filter": {"_id": vnfr_id},
+ "update_dict": {"other-key": "other-value"},
+ }
+ ),
+ )
+
+ def test_update_in_database_set_list_raises(self):
+ all_updates = [{"some-key": "some-value"}, {"other-key": "other-value"}]
+ self.monitor.db.set_list.side_effect = DbException("Connection failed.")
+ with self.assertRaises(MonitorDbException) as err:
+ self.monitor.update_in_database(all_updates, vnfr_id)
+ self.assertEqual(
+ str(err.exception.args[0]),
+ "Error while updating differences in VNFR database exception Connection failed.",
+ )
+ self.assertEqual(self.monitor.db.set_list.call_count, 1)
+ _call_mock_set_list = self.monitor.db.set_list.call_args_list
+ self.assertEqual(
+ _call_mock_set_list[0][0],
+ ("vnfrs",),
+ )
+ self.assertEqual(
+ _call_mock_set_list[0][1],
+ (
+ {
+ "q_filter": {"_id": vnfr_id},
+ "update_dict": {"some-key": "some-value"},
+ }
+ ),
+ )
+
+ def test_update_in_database_empty_all_updates(self):
+ all_updates = []
+ self.monitor.update_in_database(all_updates, vnfr_id)
+ self.monitor.db.set_list.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_vm_data_from_db")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vm_status_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vm_interface_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.backup_vdu_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_in_database")
+ def test_report_vdur_updates_no_change_in_vdur(
+ self,
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_check_vm_interface_updates,
+ mock_check_vm_status_updates,
+ mock_get_vm_data_from_db,
+ ):
+ existing_vim_info = sample_vim_info
+ vdur_vim_info_update = deepcopy(existing_vim_info)
+ mock_get_vm_data_from_db.return_value = (
+ vdur_path,
+ vdur_vim_info_update,
+ None,
+ existing_vim_info,
+ vnfr_id,
+ vim_info_path,
+ )
+ ports = {"ports": [port1, port2]}
+ self.monitor.report_vdur_updates(server7, sample_vm, ports)
+ check_if_assert_not_called(
+ [mock_update_in_database, mock_backup_vdu_interfaces]
+ )
+ mock_get_vm_data_from_db.assert_called_with(sample_vm)
+ self.assertEqual(mock_get_vm_data_from_db.call_count, 1)
+ mock_check_vm_status_updates.assert_called_once_with(
+ vdur_vim_info_update, {}, server7, vdur_path
+ )
+ mock_check_vm_interface_updates.assert_called_once_with(
+ server7,
+ existing_vim_info,
+ ports,
+ vdur_vim_info_update,
+ {},
+ vdur_path,
+ {},
+ vnfr_id,
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_vm_data_from_db")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vm_status_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vm_interface_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.backup_vdu_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_in_database")
+ def test_report_vdur_updates_vdur_changed(
+ self,
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_check_vm_interface_updates,
+ mock_check_vm_status_updates,
+ mock_get_vm_data_from_db,
+ ):
+ existing_vim_info = sample_vim_info
+ vdur_vim_info_update = {
+ "interfaces": [{"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ mock_get_vm_data_from_db.return_value = (
+ vdur_path,
+ vdur_vim_info_update,
+ None,
+ existing_vim_info,
+ vnfr_id,
+ vim_info_path,
+ )
+ all_updates = [{}, {vim_info_path: vdur_vim_info_update}, {}]
+ ports = {"ports": [port1, port2]}
+ self.monitor.report_vdur_updates(server7, sample_vm, ports)
+ mock_get_vm_data_from_db.assert_called_with(sample_vm)
+ self.assertEqual(mock_get_vm_data_from_db.call_count, 1)
+ mock_check_vm_status_updates.assert_called_once_with(
+ vdur_vim_info_update, {}, server7, vdur_path
+ )
+ mock_check_vm_interface_updates.assert_called_once_with(
+ server7,
+ existing_vim_info,
+ ports,
+ vdur_vim_info_update,
+ {},
+ vdur_path,
+ {},
+ vnfr_id,
+ )
+ mock_backup_vdu_interfaces.assert_called_once_with(vdur_vim_info_update)
+ mock_update_in_database.assert_called_once_with(all_updates, vnfr_id)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_vm_data_from_db")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vm_status_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vm_interface_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.backup_vdu_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_in_database")
+ def test_report_vdur_updates_check_vm_status_updates_raises(
+ self,
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_check_vm_interface_updates,
+ mock_check_vm_status_updates,
+ mock_get_vm_data_from_db,
+ ):
+ existing_vim_info = sample_vim_info
+ vdur_vim_info_update = {
+ "interfaces": [{"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ mock_get_vm_data_from_db.return_value = (
+ vdur_path,
+ vdur_vim_info_update,
+ None,
+ existing_vim_info,
+ vnfr_id,
+ vim_info_path,
+ )
+ ports = {"ports": [port1, port2]}
+ mock_check_vm_status_updates.side_effect = yaml.YAMLError(
+ "Cannot represent an object."
+ )
+ with self.assertRaises(yaml.YAMLError) as err:
+ self.monitor.report_vdur_updates(server7, sample_vm, ports)
+ self.assertEqual(str(err.exception), "Cannot represent an object.")
+ mock_get_vm_data_from_db.assert_called_with(sample_vm)
+ self.assertEqual(mock_get_vm_data_from_db.call_count, 1)
+ mock_check_vm_status_updates.assert_called_once_with(
+ vdur_vim_info_update, {}, server7, vdur_path
+ )
+ check_if_assert_not_called(
+ [
+ mock_check_vm_interface_updates,
+ mock_backup_vdu_interfaces,
+ mock_update_in_database,
+ ]
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_vm_data_from_db")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vm_status_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vm_interface_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.backup_vdu_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_in_database")
+ def test_report_vdur_updates_database_update_raises(
+ self,
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_check_vm_interface_updates,
+ mock_check_vm_status_updates,
+ mock_get_vm_data_from_db,
+ ):
+ existing_vim_info = sample_vim_info
+ vdur_vim_info_update = {
+ "interfaces": [{"mac_address": mac1_addr, "compute_node": "host1"}]
+ }
+ mock_get_vm_data_from_db.return_value = (
+ vdur_path,
+ vdur_vim_info_update,
+ None,
+ existing_vim_info,
+ vnfr_id,
+ vim_info_path,
+ )
+ all_updates = [{}, {vim_info_path: vdur_vim_info_update}, {}]
+ ports = {"ports": [port1, port2]}
+ mock_update_in_database.side_effect = MonitorDbException(
+ f"Error while updating differences in VNFR {vnfr_id}."
+ )
+ with self.assertRaises(MonitorDbException) as err:
+ self.monitor.report_vdur_updates(server7, sample_vm, ports)
+ self.assertEqual(
+ str(err.exception), f"Error while updating differences in VNFR {vnfr_id}."
+ )
+ mock_get_vm_data_from_db.assert_called_with(sample_vm)
+ self.assertEqual(mock_get_vm_data_from_db.call_count, 1)
+ mock_check_vm_status_updates.assert_called_once_with(
+ vdur_vim_info_update, {}, server7, vdur_path
+ )
+ mock_check_vm_interface_updates.assert_called_once_with(
+ server7,
+ existing_vim_info,
+ ports,
+ vdur_vim_info_update,
+ {},
+ vdur_path,
+ {},
+ vnfr_id,
+ )
+ mock_backup_vdu_interfaces.assert_called_once_with(vdur_vim_info_update)
+ mock_update_in_database.assert_called_once_with(all_updates, vnfr_id)
+
+ @patch("osm_ng_ro.monitor.MonitorVms._get_vm_data_from_db")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vm_status_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.check_vm_interface_updates")
+ @patch("osm_ng_ro.monitor.MonitorVms.backup_vdu_interfaces")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_in_database")
+ def test_report_vdur_updates_no_vm_data(
+ self,
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_check_vm_interface_updates,
+ mock_check_vm_status_updates,
+ mock_get_vm_data_from_db,
+ ):
+ mock_get_vm_data_from_db.return_value = None
+ ports = {"ports": [port1, port2]}
+ self.monitor.report_vdur_updates(server7, sample_vm, ports)
+ check_if_assert_not_called(
+ [
+ mock_update_in_database,
+ mock_backup_vdu_interfaces,
+ mock_check_vm_interface_updates,
+ mock_check_vm_status_updates,
+ ]
+ )
+ mock_get_vm_data_from_db.assert_called_once_with(sample_vm)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.find_ro_tasks_to_monitor")
+ @patch("osm_ng_ro.monitor.MonitorVms.prepare_vims_to_monitor")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vnfrs")
+ def test_run_no_db_vims(
+ self,
+ mock_update_vnfrs,
+ mock_prepare_vims_to_monitor,
+ mock_find_ro_tasks_to_monitor,
+ ):
+ self.monitor.db_vims = None
+ self.monitor.run()
+ check_if_assert_not_called(
+ [
+ mock_prepare_vims_to_monitor,
+ mock_find_ro_tasks_to_monitor,
+ mock_update_vnfrs,
+ ]
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms.find_ro_tasks_to_monitor")
+ @patch("osm_ng_ro.monitor.MonitorVms.prepare_vims_to_monitor")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vnfrs")
+ def test_run_refresh_disabled(
+ self,
+ mock_update_vnfrs,
+ mock_prepare_vims_to_monitor,
+ mock_find_ro_tasks_to_monitor,
+ ):
+ self.monitor.db_vims = vims
+ self.monitor.refresh_config.active = -1
+ self.monitor.run()
+ check_if_assert_not_called(
+ [
+ mock_prepare_vims_to_monitor,
+ mock_find_ro_tasks_to_monitor,
+ mock_update_vnfrs,
+ ]
+ )
+
+ @patch("osm_ng_ro.monitor.MonitorVms.find_ro_tasks_to_monitor")
+ @patch("osm_ng_ro.monitor.MonitorVms.prepare_vims_to_monitor")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vnfrs")
+ def test_run_no_proper_ro_task(
+ self,
+ mock_update_vnfrs,
+ mock_prepare_vims_to_monitor,
+ mock_find_ro_tasks_to_monitor,
+ ):
+ self.monitor.db_vims = vims
+ self.monitor.refresh_config.active = 60
+ mock_find_ro_tasks_to_monitor.return_value = []
+ self.monitor.run()
+ check_if_assert_not_called([mock_prepare_vims_to_monitor, mock_update_vnfrs])
+ mock_find_ro_tasks_to_monitor.assert_called_once()
+
+ @patch("osm_ng_ro.monitor.MonitorVms.find_ro_tasks_to_monitor")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vnfrs")
+ def test_run_with_proper_ro_task(
+ self, mock_update_vnfrs, mock_find_ro_tasks_to_monitor
+ ):
+ self.monitor.db_vims = vims
+ all_servers = [server1, server2]
+ vim1_vms = [
+ VmToMonitor(
+ vm_id=vm1_id,
+ target_record=target_record,
+ )
+ ]
+ vim2_vms = [
+ VmToMonitor(
+ vm_id=vm2_id,
+ target_record=target_record2,
+ )
+ ]
+ all_ports = {"ports": [port1, port2]}
+ mock_vim_connector = MagicMock()
+ mock_vim_connector.get_monitoring_data.return_value = all_servers, all_ports
+ self.monitor.my_vims = {
+ vim1_id: mock_vim_connector,
+ vim2_id: mock_vim_connector,
+ vim3_id: mock_vim_connector,
+ }
+ self.monitor.refresh_config.active = 60
+ mock_find_ro_tasks_to_monitor.return_value = [ro_task1, ro_task2]
+ self.monitor.run()
+ mock_find_ro_tasks_to_monitor.assert_called_once()
+ _call_mock_update_vnfrs = mock_update_vnfrs.call_args_list
+ self.assertEqual(mock_update_vnfrs.call_count, 2)
+ self.assertEqual(
+ _call_mock_update_vnfrs[0][0],
+ (all_servers, all_ports, vim1_vms),
+ )
+ self.assertEqual(
+ _call_mock_update_vnfrs[1][0],
+ (all_servers, all_ports, vim2_vms),
+ )
+ self.assertEqual(mock_vim_connector.get_monitoring_data.call_count, 2)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.find_ro_tasks_to_monitor")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vnfrs")
+ def test_run_update_vnfrs_raises(
+ self, mock_update_vnfrs, mock_find_ro_tasks_to_monitor
+ ):
+ self.monitor.db_vims = vims
+ all_servers = [server1, server2]
+ vim1_vms = [
+ VmToMonitor(
+ vm_id=vm1_id,
+ target_record=target_record,
+ )
+ ]
+ all_ports = {"ports": [port1, port2]}
+ mock_vim_connector = MagicMock()
+ mock_vim_connector.get_monitoring_data.return_value = all_servers, all_ports
+ self.monitor.my_vims = {
+ vim1_id: mock_vim_connector,
+ vim2_id: mock_vim_connector,
+ vim3_id: mock_vim_connector,
+ }
+ self.monitor.refresh_config.active = 60
+ mock_find_ro_tasks_to_monitor.return_value = [ro_task1, ro_task2]
+ mock_update_vnfrs.side_effect = DbException("DB is not active state.")
+ with self.assertRaises(MonitorVmsException) as err:
+ self.monitor.run()
+ self.assertEqual(
+ str(err.exception),
+ "Exception while monitoring Openstack VMs: database exception DB is not active state.",
+ )
+ mock_find_ro_tasks_to_monitor.assert_called_once()
+ _call_mock_update_vnfrs = mock_update_vnfrs.call_args_list
+ self.assertEqual(mock_update_vnfrs.call_count, 1)
+ self.assertEqual(
+ _call_mock_update_vnfrs[0][0],
+ (all_servers, all_ports, vim1_vms),
+ )
+ self.assertEqual(mock_vim_connector.get_monitoring_data.call_count, 1)
+
+ @patch("osm_ng_ro.monitor.MonitorVms.prepare_vims_to_monitor")
+ @patch("osm_ng_ro.monitor.MonitorVms.find_ro_tasks_to_monitor")
+ @patch("osm_ng_ro.monitor.MonitorVms.update_vnfrs")
+ def test_run_prepare_vims_to_monitor_raises(
+ self,
+ mock_update_vnfrs,
+ mock_find_ro_tasks_to_monitor,
+ mock_prepare_vims_to_monitor,
+ ):
+ self.monitor.db_vims = vims
+ mock_vim_connector = MagicMock()
+ self.monitor.my_vims = {
+ vim1_id: mock_vim_connector,
+ vim2_id: mock_vim_connector,
+ vim3_id: mock_vim_connector,
+ }
+ self.monitor.refresh_config.active = 60
+ mock_find_ro_tasks_to_monitor.return_value = [ro_task1, ro_task2]
+ mock_prepare_vims_to_monitor.side_effect = KeyError("vim_id")
+ with self.assertRaises(MonitorVmsException) as err:
+ self.monitor.run()
+ self.assertEqual(
+ str(err.exception), "Exception while monitoring Openstack VMs: 'vim_id'"
+ )
+ mock_find_ro_tasks_to_monitor.assert_called_once()
+ check_if_assert_not_called(
+ [mock_update_vnfrs, mock_vim_connector.get_monitoring_data]
+ )
+
+ @patch("osm_ng_ro.monitor.monitoring_task")
+ @patch("osm_ng_ro.monitor.threading.Timer")
+ @patch("osm_ng_ro.monitor.MonitorVms")
+ def test_start_monitoring(
+ self, mock_monitor_vms, mock_threading_timer, mock_monitoring_task
+ ):
+ mock_monitor_vms.return_value.refresh_config.active = 20
+ mock_threading_timer.return_value = mock_monitoring_task
+ start_monitoring(config)
+ mock_threading_timer.assert_called_once_with(
+ 20, start_monitoring, args=(config,)
+ )
+ mock_threading_timer.return_value = CopyingMock(threading.Timer)
+ self.assertEqual(mock_threading_timer.call_count, 1)
+ mock_monitor_vms.return_value.run.assert_called_once()
+ mock_monitor_vms.assert_called_once_with(config)
+ mock_monitoring_task.start.assert_called_once()
+
+ @patch("osm_ng_ro.monitor.monitoring_task")
+ @patch("osm_ng_ro.monitor.threading.Timer")
+ @patch("osm_ng_ro.monitor.MonitorVms")
+ def test_start_monitoring_empty_config(
+ self, mock_monitor_vms, mock_threading_timer, mock_monitoring_task
+ ):
+ with self.assertRaises(MonitorVmsException) as err:
+ start_monitoring(config={})
+ self.assertEqual(
+ str(err.exception),
+ "Wrong configuration format is provided.",
+ )
+ check_if_assert_not_called(
+ [mock_threading_timer, mock_monitor_vms, mock_monitoring_task]
+ )
+
+ @patch("osm_ng_ro.monitor.monitoring_task")
+ @patch("osm_ng_ro.monitor.threading.Timer")
+ @patch("osm_ng_ro.monitor.MonitorVms")
+ def test_start_monitoring_monitor_vms_raises(
+ self, mock_monitor_vms, mock_threading_timer, mock_monitoring_task
+ ):
+ mock_monitor_vms.side_effect = MonitorDbException("Can not connect to DB.")
+ with self.assertRaises(MonitorDbException) as err:
+ start_monitoring(config)
+ self.assertEqual(str(err.exception), "Can not connect to DB.")
+ mock_monitor_vms.assert_called_once_with(config)
+ check_if_assert_not_called([mock_threading_timer, mock_monitoring_task])
+
+ @patch("osm_ng_ro.monitor.monitoring_task")
+ @patch("osm_ng_ro.monitor.threading.Timer")
+ @patch("osm_ng_ro.monitor.MonitorVms")
+ def test_start_monitoring_timer_thread_raises(
+ self, mock_monitor_vms, mock_threading_timer, mock_monitoring_task
+ ):
+ mock_threading_timer.side_effect = RuntimeError(
+ "cannot release un-acquired lock"
+ )
+ mock_monitor_vms.return_value.refresh_config.active = 2
+ with self.assertRaises(RuntimeError) as err:
+ start_monitoring(config)
+ self.assertEqual(str(err.exception), "cannot release un-acquired lock")
+ mock_monitor_vms.assert_called_once_with(config)
+ mock_monitor_vms.return_value.run.assert_called_once()
+ mock_threading_timer.assert_called_once_with(
+ 2, start_monitoring, args=(config,)
+ )
+ mock_monitoring_task.start.assert_not_called()
+
+ @patch("osm_ng_ro.monitor.monitoring_task")
+ def test_stop_monitoring(self, mock_monitoring_task):
+ mock_monitoring_task.return_value = CopyingMock(threading.Timer)
+ stop_monitoring()
+ self.assertIsNotNone(mock_monitoring_task)
+ mock_monitoring_task.cancel.assert_called_once()
+
+ @patch("osm_ng_ro.monitor.monitoring_task")
+ def test_stop_monitoring_no_task(self, mock_monitoring_task):
+ mock_monitoring_task = CopyingMock(threading.Timer, return_value=None)
+ stop_monitoring()
+ mock_monitoring_task.cancel.assert_not_called()
+
+
+if __name__ == "__main__":
+ unittest.main()
"start": True,
},
}
+
+expected_extra_dict3 = {
+ "depends_on": [
+ f"{ns_preffix}:image.0",
+ ],
+ "params": {
+ "affinity_group_list": [],
+ "availability_zone_index": None,
+ "availability_zone_list": None,
+ "cloud_config": None,
+ "description": "without_volumes-VM",
+ "disk_list": [],
+ "flavor_id": "flavor_test",
+ "image_id": f"TASK-{ns_preffix}:image.0",
+ "name": "sample_name-vnf-several-volu-without_volumes-VM-0",
+ "net_list": [],
+ "start": True,
+ },
+}
tasks_by_target_record_id = {
"nsrs:th47f48-9870-4169-b758-9732e1ff40f3": {
"extra_dict": {
self.assertTrue(epa_params.called)
self.assertDictEqual(result, expected_result)
- def test__ip_profile_to_ro_with_none(self):
- ip_profile = None
-
- result = Ns._ip_profile_to_ro(
- ip_profile=ip_profile,
- )
-
- self.assertIsNone(result)
-
- def test__ip_profile_to_ro_with_empty_profile(self):
- ip_profile = {}
-
- result = Ns._ip_profile_to_ro(
- ip_profile=ip_profile,
- )
-
- self.assertIsNone(result)
-
- def test__ip_profile_to_ro_with_wrong_profile(self):
- ip_profile = {
- "no-profile": "here",
- }
- expected_result = {
- "ip_version": "IPv4",
- "subnet_address": None,
- "gateway_address": None,
- "dhcp_enabled": False,
- "dhcp_start_address": None,
- "dhcp_count": None,
- }
-
- result = Ns._ip_profile_to_ro(
- ip_profile=ip_profile,
- )
-
- self.assertDictEqual(expected_result, result)
-
- def test__ip_profile_to_ro_with_ipv4_profile(self):
- ip_profile = {
- "ip-version": "ipv4",
- "subnet-address": "192.168.0.0/24",
- "gateway-address": "192.168.0.254",
- "dhcp-params": {
- "enabled": True,
- "start-address": "192.168.0.10",
- "count": 25,
- },
- }
- expected_result = {
- "ip_version": "IPv4",
- "subnet_address": "192.168.0.0/24",
- "gateway_address": "192.168.0.254",
- "dhcp_enabled": True,
- "dhcp_start_address": "192.168.0.10",
- "dhcp_count": 25,
- }
-
- result = Ns._ip_profile_to_ro(
- ip_profile=ip_profile,
- )
-
- self.assertDictEqual(expected_result, result)
-
- def test__ip_profile_to_ro_with_ipv6_profile(self):
- ip_profile = {
- "ip-version": "ipv6",
- "subnet-address": "2001:0200:0001::/48",
- "gateway-address": "2001:0200:0001:ffff:ffff:ffff:ffff:fffe",
- "dhcp-params": {
- "enabled": True,
- "start-address": "2001:0200:0001::0010",
- "count": 25,
- },
- }
- expected_result = {
- "ip_version": "IPv6",
- "subnet_address": "2001:0200:0001::/48",
- "gateway_address": "2001:0200:0001:ffff:ffff:ffff:ffff:fffe",
- "dhcp_enabled": True,
- "dhcp_start_address": "2001:0200:0001::0010",
- "dhcp_count": 25,
- }
-
- result = Ns._ip_profile_to_ro(
- ip_profile=ip_profile,
- )
-
- self.assertDictEqual(expected_result, result)
-
- def test__ip_profile_to_ro_with_dns_server(self):
- ip_profile = {
- "ip-version": "ipv4",
- "subnet-address": "192.168.0.0/24",
- "gateway-address": "192.168.0.254",
- "dhcp-params": {
- "enabled": True,
- "start-address": "192.168.0.10",
- "count": 25,
- },
- "dns-server": [
- {
- "address": "8.8.8.8",
- },
- {
- "address": "1.1.1.1",
- },
- {
- "address": "1.0.0.1",
- },
- ],
- }
- expected_result = {
- "ip_version": "IPv4",
- "subnet_address": "192.168.0.0/24",
- "gateway_address": "192.168.0.254",
- "dhcp_enabled": True,
- "dhcp_start_address": "192.168.0.10",
- "dhcp_count": 25,
- "dns_address": "8.8.8.8;1.1.1.1;1.0.0.1",
- }
-
- result = Ns._ip_profile_to_ro(
- ip_profile=ip_profile,
- )
-
- self.assertDictEqual(expected_result, result)
-
- def test__ip_profile_to_ro_with_security_group(self):
- ip_profile = {
- "ip-version": "ipv4",
- "subnet-address": "192.168.0.0/24",
- "gateway-address": "192.168.0.254",
- "dhcp-params": {
- "enabled": True,
- "start-address": "192.168.0.10",
- "count": 25,
- },
- "security-group": {
- "some-security-group": "here",
- },
- }
- expected_result = {
- "ip_version": "IPv4",
- "subnet_address": "192.168.0.0/24",
- "gateway_address": "192.168.0.254",
- "dhcp_enabled": True,
- "dhcp_start_address": "192.168.0.10",
- "dhcp_count": 25,
- "security_group": {
- "some-security-group": "here",
- },
- }
-
- result = Ns._ip_profile_to_ro(
- ip_profile=ip_profile,
- )
-
- self.assertDictEqual(expected_result, result)
-
- def test__ip_profile_to_ro(self):
- ip_profile = {
- "ip-version": "ipv4",
- "subnet-address": "192.168.0.0/24",
- "gateway-address": "192.168.0.254",
- "dhcp-params": {
- "enabled": True,
- "start-address": "192.168.0.10",
- "count": 25,
- },
- "dns-server": [
- {
- "address": "8.8.8.8",
- },
- {
- "address": "1.1.1.1",
- },
- {
- "address": "1.0.0.1",
- },
- ],
- "security-group": {
- "some-security-group": "here",
- },
- }
- expected_result = {
- "ip_version": "IPv4",
- "subnet_address": "192.168.0.0/24",
- "gateway_address": "192.168.0.254",
- "dhcp_enabled": True,
- "dhcp_start_address": "192.168.0.10",
- "dhcp_count": 25,
- "dns_address": "8.8.8.8;1.1.1.1;1.0.0.1",
- "security_group": {
- "some-security-group": "here",
- },
- }
-
- result = Ns._ip_profile_to_ro(
- ip_profile=ip_profile,
- )
-
- self.assertDictEqual(expected_result, result)
-
- @patch("osm_ng_ro.ns.Ns._ip_profile_to_ro")
def test__process_net_params_with_empty_params(
self,
- ip_profile_to_ro,
):
target_vld = {
"name": "vld-name",
}
vim_info = {
"provider_network": "some-profile-here",
+ "ip_profile": {
+ "some_ip_profile": "here",
+ },
}
target_record_id = ""
expected_result = {
}
}
- ip_profile_to_ro.return_value = {
- "some_ip_profile": "here",
- }
-
result = Ns._process_net_params(
target_vld=target_vld,
indata=indata,
)
self.assertDictEqual(expected_result, result)
- self.assertTrue(ip_profile_to_ro.called)
- @patch("osm_ng_ro.ns.Ns._ip_profile_to_ro")
def test__process_net_params_with_vim_info_sdn(
self,
- ip_profile_to_ro,
):
target_vld = {
"name": "vld-name",
)
self.assertDictEqual(expected_result, result)
- self.assertFalse(ip_profile_to_ro.called)
- @patch("osm_ng_ro.ns.Ns._ip_profile_to_ro")
def test__process_net_params_with_vim_info_sdn_target_vim(
self,
- ip_profile_to_ro,
):
target_vld = {
"name": "vld-name",
)
self.assertDictEqual(expected_result, result)
- self.assertFalse(ip_profile_to_ro.called)
- @patch("osm_ng_ro.ns.Ns._ip_profile_to_ro")
def test__process_net_params_with_vim_network_name(
self,
- ip_profile_to_ro,
):
target_vld = {
"name": "vld-name",
)
self.assertDictEqual(expected_result, result)
- self.assertFalse(ip_profile_to_ro.called)
- @patch("osm_ng_ro.ns.Ns._ip_profile_to_ro")
def test__process_net_params_with_vim_network_id(
self,
- ip_profile_to_ro,
):
target_vld = {
"name": "vld-name",
)
self.assertDictEqual(expected_result, result)
- self.assertFalse(ip_profile_to_ro.called)
- @patch("osm_ng_ro.ns.Ns._ip_profile_to_ro")
def test__process_net_params_with_mgmt_network(
self,
- ip_profile_to_ro,
):
target_vld = {
"id": "vld-id",
)
self.assertDictEqual(expected_result, result)
- self.assertFalse(ip_profile_to_ro.called)
- @patch("osm_ng_ro.ns.Ns._ip_profile_to_ro")
def test__process_net_params_with_underlay_eline(
self,
- ip_profile_to_ro,
):
target_vld = {
"name": "vld-name",
}
vim_info = {
"provider_network": "some-profile-here",
+ "ip_profile": {
+ "some_ip_profile": "here",
+ },
}
target_record_id = ""
expected_result = {
}
}
- ip_profile_to_ro.return_value = {
- "some_ip_profile": "here",
- }
-
result = Ns._process_net_params(
target_vld=target_vld,
indata=indata,
)
self.assertDictEqual(expected_result, result)
- self.assertTrue(ip_profile_to_ro.called)
- @patch("osm_ng_ro.ns.Ns._ip_profile_to_ro")
def test__process_net_params_with_underlay_elan(
self,
- ip_profile_to_ro,
):
target_vld = {
"name": "vld-name",
}
vim_info = {
"provider_network": "some-profile-here",
+ "ip_profile": {
+ "some_ip_profile": "here",
+ },
}
target_record_id = ""
expected_result = {
}
}
- ip_profile_to_ro.return_value = {
- "some_ip_profile": "here",
- }
-
result = Ns._process_net_params(
target_vld=target_vld,
indata=indata,
)
self.assertDictEqual(expected_result, result)
- self.assertTrue(ip_profile_to_ro.called)
def test__get_cloud_init_exception(self):
db_mock = MagicMock(name="database mock")
persistent_root_disk, target_vdu, vdu_instantiation_vol_list, []
)
+ @patch("osm_ng_ro.ns.Ns._sort_vdu_interfaces")
+ @patch("osm_ng_ro.ns.Ns._partially_locate_vdu_interfaces")
+ @patch("osm_ng_ro.ns.Ns._prepare_vdu_interfaces")
+ @patch("osm_ng_ro.ns.Ns._prepare_vdu_cloud_init")
+ @patch("osm_ng_ro.ns.Ns._prepare_vdu_ssh_keys")
+ @patch("osm_ng_ro.ns.Ns.find_persistent_root_volumes")
+ @patch("osm_ng_ro.ns.Ns.find_persistent_volumes")
+ @patch("osm_ng_ro.ns.Ns._add_persistent_root_disk_to_disk_list")
+ @patch("osm_ng_ro.ns.Ns._add_persistent_ordinary_disks_to_disk_list")
+ @patch("osm_ng_ro.ns.Ns._prepare_vdu_affinity_group_list")
+ def test_process_vdu_params_with_inst_flavor_id(
+ self,
+ mock_prepare_vdu_affinity_group_list,
+ mock_add_persistent_ordinary_disks_to_disk_list,
+ mock_add_persistent_root_disk_to_disk_list,
+ mock_find_persistent_volumes,
+ mock_find_persistent_root_volumes,
+ mock_prepare_vdu_ssh_keys,
+ mock_prepare_vdu_cloud_init,
+ mock_prepare_vdu_interfaces,
+ mock_locate_vdu_interfaces,
+ mock_sort_vdu_interfaces,
+ ):
+ """Instantiation volume list is empty."""
+ target_vdu = deepcopy(target_vdu_wthout_persistent_storage)
+
+ target_vdu["interfaces"] = interfaces_wth_all_positions
+
+ vdu_instantiation_flavor_id = "flavor_test"
+
+ target_vdu["additionalParams"] = {
+ "OSM": {"vim_flavor_id": vdu_instantiation_flavor_id}
+ }
+ mock_prepare_vdu_cloud_init.return_value = {}
+ mock_prepare_vdu_affinity_group_list.return_value = []
+
+ new_kwargs = deepcopy(kwargs)
+ new_kwargs.update(
+ {
+ "vnfr_id": vnfr_id,
+ "nsr_id": nsr_id,
+ "tasks_by_target_record_id": {},
+ "logger": "logger",
+ }
+ )
+ expected_extra_dict_copy = deepcopy(expected_extra_dict3)
+ vnfd = deepcopy(vnfd_wth_persistent_storage)
+ db.get_one.return_value = vnfd
+ result = Ns._process_vdu_params(
+ target_vdu, indata, vim_info=None, target_record_id=None, **new_kwargs
+ )
+ mock_sort_vdu_interfaces.assert_called_once_with(target_vdu)
+ mock_locate_vdu_interfaces.assert_not_called()
+ mock_prepare_vdu_cloud_init.assert_called_once()
+ mock_add_persistent_root_disk_to_disk_list.assert_called_once()
+ mock_add_persistent_ordinary_disks_to_disk_list.assert_called_once()
+ mock_prepare_vdu_interfaces.assert_called_once_with(
+ target_vdu,
+ expected_extra_dict_copy,
+ ns_preffix,
+ vnf_preffix,
+ "logger",
+ {},
+ [],
+ )
+ self.assertDictEqual(result, expected_extra_dict_copy)
+ mock_prepare_vdu_ssh_keys.assert_called_once_with(target_vdu, None, {})
+ mock_prepare_vdu_affinity_group_list.assert_called_once()
+ mock_find_persistent_volumes.assert_not_called()
+
@patch("osm_ng_ro.ns.Ns._sort_vdu_interfaces")
@patch("osm_ng_ro.ns.Ns._partially_locate_vdu_interfaces")
@patch("osm_ng_ro.ns.Ns._prepare_vdu_interfaces")
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
-
import logging
import unittest
from unittest.mock import MagicMock, Mock, mock_open, patch
)
from osm_ro_plugin.vimconn import VimConnConnectionException, VimConnException
+# Variables used in tests
+db_vims_openstack = {
+ "my_target_vim": {"vim_type": "openstack"},
+}
+db_vims_aws = {
+ "my_target_vim": {"vim_type": "aws"},
+}
+
class TestConfigValidate(unittest.TestCase):
def setUp(self):
class TestNsWorker(unittest.TestCase):
- def setUp(self):
+ @patch("logging.getLogger", autospec=True)
+ def setUp(self, mock_logger):
+ mock_logger = logging.getLogger()
+ mock_logger.disabled = True
self.task_depends = None
self.plugins = {}
+ self.db_vims = db_vims_openstack
+ self.db = Mock(DbMemory())
self.worker_index = "worker-3"
self.config = {
"period": {
"_id": "122436:1",
"locked_by": None,
"locked_at": 0.0,
- "target_id": "vim_openstack_1",
+ "target_id": "my_target_vim",
"vim_info": {
"created": False,
"created_items": None,
},
],
}
+ self.instance = NsWorker(self.worker_index, self.config, self.plugins, self.db)
+ self.instance.db_vims = db_vims_openstack
+ self.instance.refresh_config = Mock()
def get_disabled_tasks(self, db, status):
db_disabled_tasks = db.get_list(
)
return db_disabled_tasks
- def test__update_vm_refresh(self):
- with self.subTest(
- i=1,
- t="1 disabled task with status BUILD in DB, refresh_active parameter is not equal to -1",
- ):
- # Disabled task with status build will not enabled again
- db = DbMemory()
- self.ro_task["tasks"][0]["status"] = "BUILD"
- self.ro_task["to_check_at"] = -1
- db.create("ro_tasks", self.ro_task)
- disabled_tasks_count = len(self.get_disabled_tasks(db, "BUILD"))
- instance = NsWorker(self.worker_index, self.config, self.plugins, db)
- with patch.object(instance, "logger", logging):
- instance.update_vm_refresh()
- self.assertEqual(
- len(self.get_disabled_tasks(db, "BUILD")), disabled_tasks_count
- )
-
- with self.subTest(
- i=2,
- t="1 disabled task with status DONE in DB, refresh_active parameter is equal to -1",
- ):
- # As refresh_active parameter is equal to -1, task will not be enabled to process again
- db = DbMemory()
- self.config["period"]["refresh_active"] = -1
- self.ro_task["tasks"][0]["status"] = "DONE"
- self.ro_task["to_check_at"] = -1
- db.create("ro_tasks", self.ro_task)
- disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
- instance = NsWorker(self.worker_index, self.config, self.plugins, db)
- with patch.object(instance, "logger", logging):
- instance.update_vm_refresh()
- self.assertEqual(
- len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count
- )
-
- with self.subTest(
- i=3,
- t="2 disabled task with status DONE in DB, refresh_active parameter is not equal to -1",
- ):
- # Disabled tasks should be enabled to process again
- db = DbMemory()
- self.config["period"]["refresh_active"] = 66
- self.ro_task["tasks"][0]["status"] = "DONE"
- self.ro_task["to_check_at"] = -1
- db.create("ro_tasks", self.ro_task)
- self.ro_task2 = self.ro_task
- self.ro_task2["_id"] = "122437:1"
- db.create("ro_tasks", self.ro_task2)
- disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
- instance = NsWorker(self.worker_index, self.config, self.plugins, db)
- with patch.object(instance, "logger", logging):
- instance.update_vm_refresh()
- self.assertEqual(
- len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count - 2
- )
+ def test_update_vm_refresh_disabled_task_with_status_build_vim_openstack_with_refresh(
+ self,
+ ):
+ """1 disabled task with status BUILD in DB, refresh_active parameter is not equal to -1."""
+ # Disabled task with status build is not enabled again
+ db = DbMemory()
+ self.ro_task["tasks"][0]["status"] = "BUILD"
+ self.config["period"]["refresh_active"] = 70
+ self.ro_task["to_check_at"] = -1
+ db.create("ro_tasks", self.ro_task)
+ disabled_tasks_count = len(self.get_disabled_tasks(db, "BUILD"))
+ instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+ instance.update_vm_refresh(self.ro_task)
+ self.assertEqual(
+ len(self.get_disabled_tasks(db, "BUILD")), disabled_tasks_count
+ )
- with self.subTest(
- i=4,
- t="No disabled task with status DONE in DB, refresh_active parameter is not equal to -1",
- ):
- # If there is not any disabled task, method will not change anything
- db = DbMemory()
- self.config["period"]["refresh_active"] = 66
- self.ro_task["tasks"][0]["status"] = "DONE"
- self.ro_task["to_check_at"] = 16373242400.994312
- db.create("ro_tasks", self.ro_task)
- self.ro_task2 = self.ro_task
- self.ro_task2["_id"] = "122437:1"
- db.create("ro_tasks", self.ro_task2)
- disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
- instance = NsWorker(self.worker_index, self.config, self.plugins, db)
- with patch.object(instance, "logger", logging):
- instance.update_vm_refresh()
- self.assertEqual(
- len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count
- )
+ def test_update_vm_refresh_disabled_task_with_status_done_vim_openstack_no_refresh(
+ self,
+ ):
+ """1 disabled task with status DONE in DB, refresh_active parameter is equal to -1."""
+ # As refresh_active parameter is equal to -1, task is not be enabled to process again
+ db = DbMemory()
+ self.config["period"]["refresh_active"] = -1
+ self.ro_task["tasks"][0]["status"] = "DONE"
+ self.ro_task["to_check_at"] = -1
+ db.create("ro_tasks", self.ro_task)
+ disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
+ instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+ instance.update_vm_refresh(self.ro_task)
+ self.assertEqual(len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count)
+
+ def test_update_vm_refresh_disabled_task_with_status_done_vim_aws_with_refresh(
+ self,
+ ):
+ """2 disabled task with status DONE in DB, refresh_active parameter is not equal to -1."""
+ # Disabled tasks should be enabled to process again as vim type aws
+ db = DbMemory()
+ self.config["period"]["refresh_active"] = 66
+ self.ro_task["tasks"][0]["status"] = "DONE"
+ self.ro_task["to_check_at"] = -1
+ db.create("ro_tasks", self.ro_task)
+ self.ro_task2 = self.ro_task
+ self.ro_task2["_id"] = "122437:1"
+ db.create("ro_tasks", self.ro_task2)
+ disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
+ instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+ with patch.object(instance, "db_vims", db_vims_aws):
+ instance.update_vm_refresh(self.ro_task)
+ self.assertEqual(
+ len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count - 2
+ )
- def test__process_pending_tasks(self):
- with self.subTest(
- i=1,
- t="refresh_active parameter is equal to -1, task status is DONE",
- ):
- # Task should be disabled to process again
- db = DbMemory()
- self.config["period"]["refresh_active"] = -1
- self.ro_task["tasks"][0]["status"] = "DONE"
- self.ro_task["to_check_at"] = 16373242400.994312
- db.create("ro_tasks", self.ro_task)
- # Number of disabled tasks in DB
- disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
- instance = NsWorker(self.worker_index, self.config, self.plugins, db)
- with patch.object(instance, "logger", logging):
- instance._process_pending_tasks(self.ro_task)
- self.assertEqual(
- len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count + 1
- )
+ def test_update_vm_refresh_no_disabled_task_with_status_done_vim_openstack_with_refresh(
+ self,
+ ):
+ """No disabled task with status DONE in DB, refresh_active parameter is not equal to -1."""
+ # There is not any disabled task, method does not change anything
+ db = DbMemory()
+ self.config["period"]["refresh_active"] = 66
+ self.ro_task["tasks"][0]["status"] = "DONE"
+ self.ro_task["to_check_at"] = 16373242400.994312
+ db.create("ro_tasks", self.ro_task)
+ self.ro_task2 = self.ro_task
+ self.ro_task2["_id"] = "122437:1"
+ db.create("ro_tasks", self.ro_task2)
+ disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
+ instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+ instance.update_vm_refresh(self.ro_task)
+ self.assertEqual(len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count)
+
+ def test_update_vm_refresh_disabled_task_with_status_done_vim_openstack_with_refresh(
+ self,
+ ):
+ """1 disabled task with status DONE in DB, refresh_active parameter is equal to -1, vim type is Openstack."""
+ # Disabled task with status done is not enabled again as vim type is openstack
+ db = DbMemory()
+ self.ro_task["tasks"][0]["status"] = "DONE"
+ self.ro_task["to_check_at"] = -1
+ db.create("ro_tasks", self.ro_task)
+ disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
+ instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+ instance.update_vm_refresh(self.ro_task)
+ self.assertEqual(len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count)
+
+ def test_process_pending_tasks_status_done_vim_aws_no_refresh(self):
+ """Refresh_active parameter is equal to -1, task status is DONE."""
+ # Task should be disabled to process again
+ db = DbMemory()
+ self.config["period"]["refresh_active"] = -1
+ self.ro_task["tasks"][0]["status"] = "DONE"
+ self.ro_task["to_check_at"] = 16373242400.994312
+ db.create("ro_tasks", self.ro_task)
+ # Number of disabled tasks in DB
+ disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
+ instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+ with patch.object(instance, "db_vims", db_vims_aws):
+ instance._process_pending_tasks(self.ro_task)
+ self.assertEqual(
+ len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count + 1
+ )
- with self.subTest(
- i=2, t="refresh_active parameter is equal to -1, task status is FAILED"
- ):
- # Task will not be disabled to process as task status is not DONE
- db = DbMemory()
- self.config["period"]["refresh_active"] = -1
- self.ro_task["tasks"][0]["status"] = "FAILED"
- self.ro_task["to_check_at"] = 16373242400.994312
- db.create("ro_tasks", self.ro_task)
- disabled_tasks_count = len(self.get_disabled_tasks(db, "FAILED"))
- instance = NsWorker(self.worker_index, self.config, self.plugins, db)
- with patch.object(instance, "logger", logging):
- instance._process_pending_tasks(self.ro_task)
- self.assertEqual(
- len(self.get_disabled_tasks(db, "FAILED")), disabled_tasks_count
- )
+ def test_process_pending_tasks_status_failed_vim_aws_no_refresh(self):
+ """Refresh_active parameter is equal to -1, task status is FAILED."""
+ # Task is not disabled to process as task status is not DONE
+ db = DbMemory()
+ self.config["period"]["refresh_active"] = -1
+ self.ro_task["tasks"][0]["status"] = "FAILED"
+ self.ro_task["to_check_at"] = 16373242400.994312
+ db.create("ro_tasks", self.ro_task)
+ disabled_tasks_count = len(self.get_disabled_tasks(db, "FAILED"))
+ instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+ with patch.object(instance, "db_vims", db_vims_aws):
+ instance._process_pending_tasks(self.ro_task)
+ self.assertEqual(
+ len(self.get_disabled_tasks(db, "FAILED")), disabled_tasks_count
+ )
- with self.subTest(
- i=3, t="refresh_active parameter is not equal to -1, task status is DONE"
- ):
- # Task will not be disabled to process as refresh_active parameter is not -1
- db = DbMemory()
- self.config["period"]["refresh_active"] = 70
- self.ro_task["tasks"][0]["status"] = "DONE"
- self.ro_task["to_check_at"] = 16373242400.994312
- db.create("ro_tasks", self.ro_task)
- disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
- instance = NsWorker(self.worker_index, self.config, self.plugins, db)
- with patch.object(instance, "logger", logging):
- instance._process_pending_tasks(self.ro_task)
- self.assertEqual(
- len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count
- )
+ def test_process_pending_tasks_status_done_vim_aws_with_refresh(self):
+ """Refresh_active parameter is not equal to -1, task status is DONE."""
+ # Task is not disabled to process as refresh_active parameter is not -1
+ db = DbMemory()
+ self.config["period"]["refresh_active"] = 70
+ self.ro_task["tasks"][0]["status"] = "DONE"
+ self.ro_task["to_check_at"] = 16373242400.994312
+ db.create("ro_tasks", self.ro_task)
+ disabled_tasks_count = len(self.get_disabled_tasks(db, "DONE"))
+ instance = NsWorker(self.worker_index, self.config, self.plugins, db)
+ with patch.object(instance, "db_vims", db_vims_aws):
+ instance._process_pending_tasks(self.ro_task)
+ self.assertEqual(
+ len(self.get_disabled_tasks(db, "DONE")), disabled_tasks_count
+ )
@patch("osm_ng_ro.ns_thread.makedirs", return_value="")
def test_create_file_cert(self, mock_makedirs):
)
assert vim_config["config"]["ca_cert_content"] == "test"
+ def test_get_next_refresh_vim_type_openstack(self):
+ next_refresh = 163535353434.3434
+ result = self.instance._get_next_refresh(self.ro_task, next_refresh)
+ self.assertEqual(result, -1)
+
+ def test_get_next_refresh_vim_type_openstack_refresh_disabled(self):
+ next_refresh = 163535353434.3434
+ self.instance.refresh_config.active = -1
+ result = self.instance._get_next_refresh(self.ro_task, next_refresh)
+ self.assertEqual(result, -1)
+
+ def test_get_next_refresh_vim_type_aws_refresh_disabled(self):
+ self.db_vims = db_vims_aws
+ next_refresh = 163535353434.3434
+ self.instance.refresh_config.active = -1
+ result = self.instance._get_next_refresh(self.ro_task, next_refresh)
+ self.assertEqual(result, -1)
+
+ def test_get_next_refresh_vim_type_aws(self):
+ self.instance.db_vims = db_vims_aws
+ next_refresh = 163535353434.3434
+ self.instance.refresh_config.active = 140
+ result = self.instance._get_next_refresh(self.ro_task, next_refresh)
+ self.assertEqual(result, next_refresh + 140)
+
class TestVimInteractionNet(unittest.TestCase):
def setUp(self):
db_vims = {
"vim_openstack_1": {
"config": {},
+ "vim_type": "openstack",
},
}
instance = VimInteractionNet(db, logger, my_vims, db_vims)
db_vims = {
"vim_openstack_1": {
"config": {},
+ "vim_type": "openstack",
},
}
instance = VimInteractionNet(db, logger, my_vims, db_vims)
db_vims = {
"vim_openstack_1": {
"config": {},
+ "vim_type": "openstack",
},
}
instance = VimInteractionNet(db, logger, my_vims, db_vims)
db_vims = {
"vim_openstack_1": {
"config": {},
+ "vim_type": "openstack",
},
}
instance = VimInteractionNet(db, logger, my_vims, db_vims)
db_vims = {
"vim_openstack_1": {
"config": {},
+ "vim_type": "openstack",
},
}
instance = VimInteractionNet(db, logger, my_vims, db_vims)
self.config = config
self.logger = logger
self.to_terminate = False
- self.loop = None
self.db = None
self.task_locked_time = config["global"]["task_locked_time"]
self.task_relock_time = config["global"]["task_relock_time"]
self.task_max_locked_time = config["global"]["task_max_locked_time"]
- def start(self, db, loop):
+ def start(self, db):
self.db = db
- self.loop = loop
@staticmethod
def add_lock_object(database_table, database_object, thread_object):
async def renew_locks(self):
while not self.to_terminate:
if not self.renew_list:
- await asyncio.sleep(
- self.task_locked_time - self.task_relock_time, loop=self.loop
- )
+ await asyncio.sleep(self.task_locked_time - self.task_relock_time)
continue
lock_object = self.renew_list[0]
)
else:
# wait until it is time to re-lock it
- await asyncio.sleep(time_to_relock, loop=self.loop)
+ await asyncio.sleep(time_to_relock)
def stop(self):
# unlock all locked items
self.next_check_unused_vim = now + self.TIME_CHECK_UNUSED_VIM
self.engine.unload_unused_vims()
- await asyncio.sleep(self.MAX_TIME_UNATTENDED, loop=self.loop)
+ await asyncio.sleep(self.MAX_TIME_UNATTENDED)
async def aiomain(self):
kafka_working = True
while not self.to_terminate:
try:
if not self.aiomain_task_kafka:
- # await self.msg.aiowrite("admin", "echo", "dummy message", loop=self.loop)
for kafka_topic in self.kafka_topics:
- await self.msg.aiowrite(
- kafka_topic, "echo", "dummy message", loop=self.loop
- )
+ await self.msg.aiowrite(kafka_topic, "echo", "dummy message")
kafka_working = True
self.logger.debug("Starting vim_account subscription task")
self.aiomain_task_kafka = asyncio.ensure_future(
self.msg.aioread(
self.kafka_topics,
- loop=self.loop,
group_id=False,
aiocallback=self._msg_callback,
),
- loop=self.loop,
)
if not self.aiomain_task_vim:
- self.aiomain_task_vim = asyncio.ensure_future(
- self.vim_watcher(), loop=self.loop
- )
+ self.aiomain_task_vim = asyncio.ensure_future(self.vim_watcher())
if not self.aiomain_task_renew_lock:
self.aiomain_task_renew_lock = asyncio.ensure_future(
- self.lock_renew.renew_locks(), loop=self.loop
+ self.lock_renew.renew_locks()
)
done, _ = await asyncio.wait(
self.aiomain_task_renew_lock,
],
timeout=None,
- loop=self.loop,
return_when=asyncio.FIRST_COMPLETED,
)
)
kafka_working = False
- await asyncio.sleep(10, loop=self.loop)
+ await asyncio.sleep(10)
def run(self):
"""
)
)
- self.lock_renew.start(self.db, self.loop)
+ self.lock_renew.start(self.db)
if not self.msg:
config_msg = self.config["message"].copy()
- config_msg["loop"] = self.loop
if config_msg["driver"] == "local":
self.msg = msglocal.MsgLocal()
self.logger.info("Starting")
while not self.to_terminate:
try:
- self.loop.run_until_complete(
- asyncio.ensure_future(self.aiomain(), loop=self.loop)
- )
- # except asyncio.CancelledError:
- # break # if cancelled it should end, breaking loop
+ asyncio.run(self.main_task())
except Exception as e:
if not self.to_terminate:
self.logger.exception(
self._stop()
self.loop.close()
+ async def main_task(self):
+ task = asyncio.ensure_future(self.aiomain())
+ await task
+
async def _msg_callback(self, topic, command, params):
"""
Callback to process a received message from kafka
self.lock_renew.to_terminate = True
if self.aiomain_task_kafka:
- self.loop.call_soon_threadsafe(self.aiomain_task_kafka.cancel)
+ self.loop.call_soon_threadsafe(self.aiomain_task_kafka.cancel())
if self.aiomain_task_vim:
- self.loop.call_soon_threadsafe(self.aiomain_task_vim.cancel)
+ self.loop.call_soon_threadsafe(self.aiomain_task_vim.cancel())
if self.aiomain_task_renew_lock:
- self.loop.call_soon_threadsafe(self.aiomain_task_renew_lock.cancel)
+ self.loop.call_soon_threadsafe(self.aiomain_task_renew_lock.cancel())
self.lock_renew.stop()
logutils
importlib-metadata
Jinja2
+python-novaclient
}
+def check_if_assert_not_called(mocks: list):
+ for mocking in mocks:
+ mocking.assert_not_called()
+
+
class Status:
def __init__(self, s):
self.status = s
self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
self.assertDictEqual(port_dict, result_dict)
- def test_prepare_port_dict_mac_ip_addr_no_mac_and_ip(self):
+ def test_prepare_port_dict_mac_ip_addr_empty_net(self):
"""mac address and ip address does not exist."""
net = {}
port_dict = {}
self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
self.assertDictEqual(port_dict, result_dict)
+ def test_prepare_port_dict_mac_ip_addr_dual(self):
+ """mac address, ipv4 and ipv6 addresses exist."""
+ net = {
+ "mac_address": mac_address,
+ "ip_address": ["10.0.1.5", "2345:0425:2CA1:0000:0000:0567:5673:23b5"],
+ }
+ port_dict = {}
+ result_dict = {
+ "mac_address": mac_address,
+ "fixed_ips": [
+ {"ip_address": "10.0.1.5"},
+ {"ip_address": "2345:0425:2CA1:0000:0000:0567:5673:23b5"},
+ ],
+ }
+ self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
+ self.assertDictEqual(port_dict, result_dict)
+
+ def test_prepare_port_dict_mac_ip_addr_dual_ip_addr_is_not_list(self):
+ """mac address, ipv4 and ipv6 addresses exist."""
+ net = {
+ "mac_address": mac_address,
+ "ip_address": "10.0.1.5",
+ }
+ port_dict = {}
+ result_dict = {
+ "mac_address": mac_address,
+ "fixed_ips": [
+ {"ip_address": "10.0.1.5"},
+ ],
+ }
+ self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
+ self.assertDictEqual(port_dict, result_dict)
+
+ def test_prepare_port_dict_mac_ip_addr_dual_net_without_ip_addr(self):
+ """mac address, ipv4 and ipv6 addresses exist."""
+ net = {
+ "mac_address": mac_address,
+ "ip_address": [],
+ }
+ port_dict = {}
+ result_dict = {
+ "mac_address": mac_address,
+ }
+ self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
+ self.assertDictEqual(port_dict, result_dict)
+
+ def test_prepare_port_dict_mac_ip_addr_dual_net_without_mac_addr(self):
+ """mac address, ipv4 and ipv6 addresses exist."""
+ net = {
+ "ip_address": ["10.0.1.5", "2345:0425:2CA1:0000:0000:0567:5673:23b5"],
+ }
+ port_dict = {}
+ result_dict = {
+ "fixed_ips": [
+ {"ip_address": "10.0.1.5"},
+ {"ip_address": "2345:0425:2CA1:0000:0000:0567:5673:23b5"},
+ ],
+ }
+ self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
+ self.assertDictEqual(port_dict, result_dict)
+
def test_create_new_port(self):
"""new port has id and mac address."""
new_port = {
net, port_dict, created_items = {}, {}, {}
expected_result = new_port
expected_net = {
- "mac_adress": mac_address,
+ "mac_address": mac_address,
"vim_id": port_id,
}
expected_created_items = {f"port:{port_id}": True}
self.assertDictEqual(result, created_items)
def test_update_block_device_mapping_empty_volume(self):
- """"""
volume = ""
block_device_mapping = {}
base_disk_index = 100
self.assertEqual(created_items, {})
def test_update_block_device_mapping_invalid_volume(self):
- """"""
volume = "Volume-A"
block_device_mapping = {}
base_disk_index = 100
self.assertEqual(created_items, {})
def test_update_block_device_mapping(self):
- """"""
volume = MagicMock(autospec=True)
volume.id = volume_id
block_device_mapping = {}
)
def test_update_block_device_mapping_with_keep_flag(self):
- """"""
volume = MagicMock(autospec=True)
volume.id = volume_id
block_device_mapping = {}
with self.assertRaises(AttributeError):
self.vimconn._extract_items_wth_keep_flag_from_created_items(created_items)
+ @patch.object(vimconnector, "_reload_connection", new_callable=CopyingMock())
+ def test_get_monitoring_data(self, mock_reload_conection):
+ servers = ["server1", "server2"]
+ ports = {"ports": ["port1", "port2"]}
+ self.vimconn.nova.servers.list.return_value = servers
+ self.vimconn.neutron.list_ports.return_value = ports
+ result = self.vimconn.get_monitoring_data()
+ self.assertTupleEqual(result, (servers, ports))
+ mock_reload_conection.assert_called_once()
+ self.vimconn.nova.servers.list.assert_called_once_with(detailed=True)
+ self.vimconn.neutron.list_ports.assert_called_once()
+
+ @patch.object(vimconnector, "_reload_connection", new_callable=CopyingMock())
+ def test_get_monitoring_data_reload_connection_raises(self, mock_reload_conection):
+ mock_reload_conection.side_effect = VimConnNotFoundException(
+ "Connection object not found."
+ )
+ with self.assertRaises(VimConnException) as err:
+ result = self.vimconn.get_monitoring_data()
+ self.assertTupleEqual(result, None)
+ self.assertEqual(
+ str(err.exception.args[0]),
+ "Exception in monitoring while getting VMs and ports status: Connection object not found.",
+ )
+ mock_reload_conection.assert_called_once()
+ check_if_assert_not_called(
+ [self.vimconn.nova.servers.list, self.vimconn.neutron.list_ports]
+ )
+
+ @patch.object(vimconnector, "_reload_connection", new_callable=CopyingMock())
+ def test_get_monitoring_data_server_list_raises(self, mock_reload_conection):
+ self.vimconn.nova.servers.list.side_effect = VimConnConnectionException(
+ "Can not connect to Cloud API."
+ )
+ with self.assertRaises(VimConnException) as err:
+ result = self.vimconn.get_monitoring_data()
+ self.assertTupleEqual(result, None)
+ self.assertEqual(
+ str(err.exception.args[0]),
+ "Exception in monitoring while getting VMs and ports status: Can not connect to Cloud API.",
+ )
+ mock_reload_conection.assert_called_once()
+ self.vimconn.nova.servers.list.assert_called_once_with(detailed=True)
+ self.vimconn.neutron.list_ports.assert_not_called()
+
+ @patch.object(vimconnector, "_reload_connection", new_callable=CopyingMock())
+ def test_get_monitoring_data_list_ports_raises(self, mock_reload_conection):
+ self.vimconn.neutron.list_ports.side_effect = VimConnConnectionException(
+ "Can not connect to Cloud API."
+ )
+ with self.assertRaises(VimConnException) as err:
+ result = self.vimconn.get_monitoring_data()
+ self.assertTupleEqual(result, None)
+ self.assertEqual(
+ str(err.exception.args[0]),
+ "Exception in monitoring while getting VMs and ports status: Can not connect to Cloud API.",
+ )
+ mock_reload_conection.assert_called_once()
+ self.vimconn.nova.servers.list.assert_called_once_with(detailed=True)
+ self.vimconn.neutron.list_ports.assert_called_once()
+
class TestNewFlavor(unittest.TestCase):
@patch("logging.getLogger", autospec=True)
self.new_flavor.id = "075d2482-5edb-43e3-91b3-234e65b6268a"
self.vimconn.nova.flavors.create.return_value = self.new_flavor
- @staticmethod
- def check_if_assert_not_called(mocks: list):
- for mocking in mocks:
- mocking.assert_not_called()
-
@patch.object(vimconnector, "process_vio_numa_nodes", new_callable=CopyingMock())
@patch.object(vimconnector, "process_numa_memory", new_callable=CopyingMock())
@patch.object(vimconnector, "process_numa_vcpu", new_callable=CopyingMock())
),
)
self.assertDictEqual(extra_specs, expected_extra_specs)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[
mock_process_numa_threads,
mock_process_numa_cores,
),
)
self.assertDictEqual(extra_specs, expected_extra_specs)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[
mock_process_numa_threads,
mock_process_numa_cores,
mock_process_numa_paired_threads.side_effect = [6, 6]
self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
- self.check_if_assert_not_called(
- [mock_process_numa_threads, mock_process_numa_cores]
- )
+ check_if_assert_not_called([mock_process_numa_threads, mock_process_numa_cores])
self.assertEqual(mock_process_numa_memory.call_count, 2)
self.assertEqual(mock_process_numa_vcpu.call_count, 2)
self.assertEqual(mock_process_numa_paired_threads.call_count, 2)
self.vimconn.vim_type = "VIO"
mock_process_numa_paired_threads.side_effect = [4, 4]
self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
- self.check_if_assert_not_called(
- [mock_process_numa_threads, mock_process_numa_cores]
- )
+ check_if_assert_not_called([mock_process_numa_threads, mock_process_numa_cores])
self.assertEqual(mock_process_numa_paired_threads.call_count, 2)
self.assertEqual(mock_process_numa_memory.call_count, 2)
self.assertEqual(mock_process_numa_vcpu.call_count, 2)
mock_process_numa_cores.side_effect = [1, 2]
self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[mock_process_numa_threads, mock_process_numa_paired_threads]
)
self.assertEqual(mock_process_numa_cores.call_count, 2)
self.vimconn.vim_type = "VIO"
mock_process_numa_cores.side_effect = [1, 2]
self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[mock_process_numa_threads, mock_process_numa_paired_threads]
)
self.assertEqual(mock_process_numa_memory.call_count, 2)
self.vimconn.vim_type = "VIO"
mock_process_numa_threads.return_value = 3
self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[
mock_process_numa_memory,
mock_process_numa_vcpu,
mock_process_numa_threads.return_value = 3
self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[
mock_process_numa_memory,
mock_process_numa_vcpu,
expected_extra_specs = {"hw:numa_nodes": "0"}
self.vimconn.vim_type = "VIO"
self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[
mock_process_numa_memory,
mock_process_numa_vcpu,
mock_process_numa_threads.return_value = None
self.vimconn._process_numa_parameters_of_flavor(numas, extra_specs)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[
mock_process_numa_memory,
mock_process_numa_vcpu,
extended = {}
extra_specs = {}
self.vimconn._process_extended_config_of_flavor(extended, extra_specs)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[mock_process_resource_quota, mock_process_numa_parameters_of_flavor]
)
self.assertEqual(extra_specs, {})
self.vimconn.nova.flavors.create.assert_called_once_with(
name=name1, ram=3, vcpus=vcpus, disk=50, ephemeral=0, swap=0, is_public=True
)
- self.check_if_assert_not_called(
- [self.new_flavor.set_keys, mock_format_exception]
- )
+ check_if_assert_not_called([self.new_flavor.set_keys, mock_format_exception])
@patch.object(vimconnector, "_get_flavor_details", new_callable=CopyingMock())
@patch.object(
self.vimconn.nova.flavors.create.assert_called_once_with(
name=name1, ram=3, vcpus=8, disk=50, ephemeral=0, swap=0, is_public=True
)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[mock_change_flavor_name, mock_format_exception, self.new_flavor.set_keys]
)
self.vimconn.nova.flavors.create.assert_called_once_with(
name=name1, ram=3, vcpus=8, disk=50, ephemeral=0, swap=0, is_public=True
)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[
self.new_flavor.set_keys,
mock_extended_config_of_flavor,
self.assertEqual(
str(call_mock_format_exception[0][0]), str(ClientException(error_msg))
)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[
mock_change_flavor_name,
mock_get_flavor_details,
self.assertEqual(
str(call_mock_format_exception[0][0]), str(KeyError(error_msg))
)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[
mock_reload_connection,
mock_change_flavor_name,
swap=0,
is_public=True,
)
- self.check_if_assert_not_called(
- [self.new_flavor.set_keys, mock_format_exception]
- )
+ check_if_assert_not_called([self.new_flavor.set_keys, mock_format_exception])
@patch.object(vimconnector, "_get_flavor_details", new_callable=CopyingMock())
@patch.object(
swap=0,
is_public=True,
)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[
self.new_flavor.set_keys,
mock_extended_config_of_flavor,
self.assertEqual(mock_get_flavor_details.call_count, 3)
self.assertEqual(self.vimconn.nova.flavors.create.call_count, 3)
self.assertEqual(mock_reload_connection.call_count, 3)
- self.check_if_assert_not_called(
+ check_if_assert_not_called(
[mock_change_flavor_name, mock_extended_config_of_flavor]
)
_call_mock_format_exception = mock_format_exception.call_args
numa_nodes = 0
extra_specs = {"hw:numa_nodes": "0"}
expected_extra_spec = {
- "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
"vmware:latency_sensitivity_level": "high",
"hw:numa_nodes": "0",
}
expected_extra_spec = {
"vmware:latency_sensitivity_level": "high",
"hw:numa_nodes": "None",
- "vmware:extra_config": '{"numa.nodeAffinity":"0"}',
}
self.vimconn.process_vio_numa_nodes(numa_nodes, extra_specs)
self.assertDictEqual(extra_specs, expected_extra_spec)
endpoint_type=self.endpoint_type,
region_name=region_name,
)
- self.cinder = self.session["cinder"] = cClient.Client(
- 2,
- session=sess,
- endpoint_type=self.endpoint_type,
- region_name=region_name,
- )
+
+ if sess.get_all_version_data(service_type="volumev2"):
+ self.cinder = self.session["cinder"] = cClient.Client(
+ 2,
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
+ else:
+ self.cinder = self.session["cinder"] = cClient.Client(
+ 3,
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
try:
self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
if not ip_profile.get("subnet_address"):
# Fake subnet is required
- subnet_rand = random.randint(0, 255)
+ subnet_rand = random.SystemRandom().randint(0, 255)
ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
if "ip_version" not in ip_profile:
ip_str = str(netaddr.IPAddress(ip_int))
subnet["allocation_pools"][0]["end"] = ip_str
+ if (
+ ip_profile.get("ipv6_address_mode")
+ and ip_profile["ip_version"] != "IPv4"
+ ):
+ subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
+ # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
+ # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
+ subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
+
# self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
self.neutron.create_subnet({"subnet": subnet})
extra_specs (dict): Extra specs dict to be updated
"""
- # If there is not any numa, numas_nodes equals to 0.
- if not numa_nodes:
- extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
-
# If there are several numas, we do not define specific affinity.
extra_specs["vmware:latency_sensitivity_level"] = "high"
if net.get("mac_address"):
port_dict["mac_address"] = net["mac_address"]
- if net.get("ip_address"):
- port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+ ip_dual_list = []
+ if ip_list := net.get("ip_address"):
+ if not isinstance(ip_list, list):
+ ip_list = [ip_list]
+ for ip in ip_list:
+ ip_dict = {"ip_address": ip}
+ ip_dual_list.append(ip_dict)
+ port_dict["fixed_ips"] = ip_dual_list
# TODO add "subnet_id": <subnet_id>
def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
"""
new_port = self.neutron.create_port({"port": port_dict})
created_items["port:" + str(new_port["port"]["id"])] = True
- net["mac_adress"] = new_port["port"]["mac_address"]
+ net["mac_address"] = new_port["port"]["mac_address"]
net["vim_id"] = new_port["port"]["id"]
return new_port
self.__wait_for_vm(vm_id, "ACTIVE")
instance_status = self.get_vdu_state(vm_id)[0]
return instance_status
+
+ def get_monitoring_data(self):
+ try:
+ self.logger.debug("Getting servers and ports data from Openstack VIMs.")
+ self._reload_connection()
+ all_servers = self.nova.servers.list(detailed=True)
+ all_ports = self.neutron.list_ports()
+ return all_servers, all_ports
+ except (
+ vimconn.VimConnException,
+ vimconn.VimConnNotFoundException,
+ vimconn.VimConnConnectionException,
+ ) as e:
+ raise vimconn.VimConnException(
+ f"Exception in monitoring while getting VMs and ports status: {str(e)}"
+ )
from copy import deepcopy
import logging
-from random import randrange
+from random import SystemRandom
from uuid import uuid4
from osm_ro_plugin import vimconn
if iface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH") and self.config.get(
"sdn-port-mapping"
):
- compute_index = randrange(len(self.config["sdn-port-mapping"]))
- port_index = randrange(
+ compute_index = SystemRandom().randrange(
+ len(self.config["sdn-port-mapping"])
+ )
+ port_index = SystemRandom().randrange(
len(self.config["sdn-port-mapping"][compute_index]["ports"])
)
interface["compute_node"] = self.config["sdn-port-mapping"][
userdata = None
userdata_list = []
+ # For more information, check https://cloudinit.readthedocs.io/en/latest/reference/merging.html
+ # Basically, with this, we don't override the provider's cloud config
+ merge_how = yaml.safe_dump(
+ {
+ "merge_how": [
+ {
+ "name": "list",
+ "settings": ["append", "recurse_dict", "recurse_list"],
+ },
+ {
+ "name": "dict",
+ "settings": ["no_replace", "recurse_list", "recurse_dict"],
+ },
+ ]
+ },
+ indent=4,
+ default_flow_style=False,
+ )
+
if isinstance(cloud_config, dict):
if cloud_config.get("user-data"):
if isinstance(cloud_config["user-data"], str):
- userdata_list.append(cloud_config["user-data"])
+ userdata_list.append(cloud_config["user-data"] + f"\n{merge_how}")
else:
for u in cloud_config["user-data"]:
- userdata_list.append(u)
+ userdata_list.append(u + f"\n{merge_how}")
if cloud_config.get("boot-data-drive") is not None:
config_drive = cloud_config["boot-data-drive"]
# default user
if cloud_config.get("key-pairs"):
userdata_dict["ssh-authorized-keys"] = cloud_config["key-pairs"]
- userdata_dict["users"] = [
- {
- "default": None,
- "ssh-authorized-keys": cloud_config["key-pairs"],
+ userdata_dict["system_info"] = {
+ "default_user": {
+ "ssh_authorized_keys": cloud_config["key-pairs"],
}
- ]
+ }
+ userdata_dict["users"] = ["default"]
if cloud_config.get("users"):
if "users" not in userdata_dict:
userdata_list.append(
"#cloud-config\n"
+ yaml.safe_dump(userdata_dict, indent=4, default_flow_style=False)
+ + f"\n{merge_how}"
)
userdata = self._create_mimemultipart(userdata_list)
self.logger.debug("userdata: %s", userdata)
rm -rf dists
mkdir -p pool/RO
mv deb_dist/*.deb pool/RO/
-mkdir -p dists/unstable/RO/binary-amd64/
-apt-ftparchive packages pool/RO > dists/unstable/RO/binary-amd64/Packages
-gzip -9fk dists/unstable/RO/binary-amd64/Packages
+
dist_ro_vim_gcp"
TOX_ENV_LIST="$(echo $PACKAGES | sed "s/ /,/g")"
-PROCESSES=$(expr `nproc --a` / 2)
TOX_PARALLEL_NO_SPINNER=1 tox -e $TOX_ENV_LIST
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+security:
+ - |
+ Coverity fix for CWE 330: Use of Insufficiently Random Values
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+security:
+ - |
+ Coverity fix for CWE 688: Function Call With Incorrect Variable or Reference as Argument
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix Bug 2098 - Get VDUs from VNFR when Heal op has no additionalPrameters.
+ With this fix, when Heal is requested without vdu or count-index parameters
+ RO will recreate all VDUs from VNFR
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 10960 Performance optimizations for the polling of VM status in RO.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 10975: Get flavor-id from additionalParams if specified.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 10978: Add support of ipv6_address_mode and ipv6_ra_mode to openstack connector.
\ No newline at end of file
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 10979 - Static IPv6 Dual Stack IP Assignment for Openstack VIM
+ This feature enables assigning static IPv6 assignment to VNFs to enable
+ dual stack IP assignment.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix Bug 2202 Adding support for cinder V3 API with V2 API for persistent volumes
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix Bug 2216 All deployments with EPA in VIO land in NUMA node 0
+ This fix removes the hardcoded decision that makes all deployments
+ withe EPA land in NUMA node 0.
+ The fix removes the extra_spec "vmware:extra_config" that was previously
+ set to '{"numa.nodeAffinity":"0"}'.
+ It maintains the extra_spec "vmware:latency_sensitivity_level", set to "high"
+ for deployments with EPA.
+
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ The run_coroutine_threadsafe() function is used to schedule a coroutine object from a different thread and returns a concurrent.futures.Future.
+ run_coroutine_threadsafe is unnecessary to run the main task and replaced with asyncio.run().
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ This fixes bug 2217. Modified the cloud-init merge configs and defined the default SSH keys within the system_info instead of users
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ vim_details was set to None while reporting VM deletion by mistake. vim_details are kept by this fix.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+upgrade:
+ - |
+ Removing tox and pip installation using pip from Dockerfile.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+ - |
+ ip_profile is received ready to use from LCM, so the method to translate it and its unit tests are removed
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+upgrade:
+ - |
+ Upgrade Ubuntu from 20.04 to 22.04 and Python from 3.8 to 3.10.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+ - |
+ Update stage-build to run tox sequentially.
# via
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
# aiokafka
-motor==1.3.1
+motor==3.1.2
# via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git@paas
# via -r requirements-dev.in
-packaging==23.0
+packaging==23.1
# via
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
# aiokafka
# temporalio
pycryptodome==3.17
# via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
-pymongo==3.13.0
+pymongo==4.3.3
# via
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=paas
# motor
#######################################################################################
-e RO-plugin
# via -r requirements-test.in
-coverage==7.2.1
+coverage==7.2.5
# via -r requirements-test.in
-mock==5.0.1
+mock==5.0.2
# via -r requirements-test.in
-nose2==0.12.0
+nose2==0.13.0
# via -r requirements-test.in
# via openstacksdk
atpublic==3.1.1
# via flufl-enum
-attrs==22.2.0
+attrs==23.1.0
# via
# cmd2
# jsonschema
# azure-mgmt-compute
# azure-mgmt-network
# azure-mgmt-resource
-azure-core==1.26.3
+azure-core==1.26.4
# via
# azure-identity
# azure-mgmt-core
# via -r RO-VIM-azure/requirements.in
azure-mgmt-compute==29.1.0
# via -r RO-VIM-azure/requirements.in
-azure-mgmt-core==1.3.2
+azure-mgmt-core==1.4.0
# via
# azure-mgmt-compute
# azure-mgmt-network
# azure-mgmt-resource
-azure-mgmt-network==22.2.0
+azure-mgmt-network==23.0.1
# via -r RO-VIM-azure/requirements.in
-azure-mgmt-resource==22.0.0
+azure-mgmt-resource==23.0.0
# via -r RO-VIM-azure/requirements.in
bcrypt==4.0.1
# via paramiko
# via -r RO-VIM-aws/requirements.in
cachetools==5.3.0
# via google-auth
-certifi==2022.12.7
+certifi==2023.5.7
# via
# msrest
# requests
# via
# cryptography
# pynacl
-charset-normalizer==3.0.1
+charset-normalizer==3.1.0
# via requests
cheroot==9.0.0
# via cherrypy
cherrypy==18.1.2
# via -r NG-RO/requirements.in
-cliff==4.2.0
+cliff==4.3.0
# via
# osc-lib
# python-neutronclient
# python-openstackclient
cmd2==2.4.3
# via cliff
-cryptography==39.0.1
+cryptography==40.0.2
# via
# -r NG-RO/requirements.in
# adal
# paramiko
# pyjwt
# pyopenssl
-cvprac==1.2.2
+cvprac==1.3.1
# via -r RO-SDN-arista_cloudvision/requirements.in
debtcollector==2.5.0
# via
# via
# dogpile-cache
# openstacksdk
-dogpile-cache==1.1.8
+dogpile-cache==1.2.0
# via openstacksdk
flufl-enum==5.0.1
# via pyvcloud
google-api-core==2.11.0
# via google-api-python-client
-google-api-python-client==2.80.0
+google-api-python-client==2.86.0
# via -r RO-VIM-gcp/requirements.in
-google-auth==2.16.1
+google-auth==2.17.3
# via
# -r RO-VIM-gcp/requirements.in
# google-api-core
# via google-api-python-client
google-cloud==0.34.0
# via -r RO-VIM-gcp/requirements.in
-googleapis-common-protos==1.58.0
+googleapis-common-protos==1.59.0
# via google-api-core
-httplib2==0.21.0
+httplib2==0.22.0
# via
# google-api-python-client
# google-auth-httplib2
# via pyvcloud
idna==3.4
# via requests
-importlib-metadata==6.0.0
+importlib-metadata==6.6.0
# via
# -r NG-RO/requirements.in
# cliff
# python-novaclient
# python-openstackclient
isodate==0.6.1
- # via msrest
+ # via
+ # azure-mgmt-network
+ # azure-mgmt-resource
+ # msrest
jaraco-functools==3.6.0
# via
# cheroot
# cheroot
# cherrypy
# jaraco-functools
-msal==1.21.0
+msal==1.22.0
# via
# azure-identity
# msal-extensions
msal-extensions==1.0.0
# via azure-identity
-msgpack==1.0.4
+msgpack==1.0.5
# via oslo-serialization
msrest==0.7.1
# via
# -r RO-VIM-azure/requirements.in
# azure-mgmt-compute
- # azure-mgmt-network
- # azure-mgmt-resource
# msrestazure
msrestazure==0.6.4
# via -r RO-VIM-azure/requirements.in
# oslo-utils
oauthlib==3.2.2
# via requests-oauthlib
-openstacksdk==1.0.1
+openstacksdk==1.1.0
# via
# os-client-config
# osc-lib
# via
# keystoneauth1
# openstacksdk
-osc-lib==2.7.0
+osc-lib==2.8.0
# via
# python-neutronclient
# python-openstackclient
# python-neutronclient
# python-novaclient
# python-openstackclient
-oslo-log==5.1.0
+oslo-log==5.2.0
# via python-neutronclient
oslo-serialization==5.1.1
# via
# python-neutronclient
# python-novaclient
# python-openstackclient
-packaging==23.0
+packaging==23.1
# via
# oslo-utils
# python-keystoneclient
-paramiko==3.0.0
+paramiko==3.1.0
# via
# -r RO-SDN-dpb/requirements.in
# -r RO-VIM-gcp/requirements.in
# via msal-extensions
portend==3.1.0
# via cherrypy
-prettytable==3.6.0
+prettytable==3.7.0
# via
# -r RO-VIM-vmware/requirements.in
# cliff
# googleapis-common-protos
py-radix==0.10.0
# via ipconflict
-pyasn1==0.4.8
+pyasn1==0.5.0
# via
# pyasn1-modules
# rsa
-pyasn1-modules==0.2.8
+pyasn1-modules==0.3.0
# via google-auth
pycparser==2.21
# via cffi
-pygments==2.14.0
+pygments==2.15.1
# via pyvcloud
pyinotify==0.9.6
# via oslo-log
# msal
pynacl==1.5.0
# via paramiko
-pyopenssl==23.0.0
+pyopenssl==23.1.1
# via python-glanceclient
pyparsing==3.0.9
# via
# via cmd2
pyrsistent==0.19.3
# via jsonschema
+pysocks==1.7.1
+ # via requests
python-cinderclient==7.4.1
# via
# -r RO-VIM-openstack/requirements.in
# via -r RO-VIM-openstack/requirements.in
python-novaclient==18.3.0
# via
+ # -r NG-RO/requirements.in
# -r RO-VIM-openstack/requirements.in
# python-openstackclient
-python-openstackclient==6.1.0
+python-openstackclient==6.2.0
# via -r RO-VIM-openstack/requirements.in
-pytz==2022.7.1
+pytz==2023.3
# via
# oslo-serialization
# oslo-utils
# tempora
pyvcloud==19.1.1
# via -r RO-VIM-vmware/requirements.in
-pyvmomi==8.0.0.1.2
+pyvmomi==8.0.1.0
# via -r RO-VIM-vmware/requirements.in
pyyaml==5.4.1
# via
# openstacksdk
# oslo-config
# pyvcloud
-requests==2.28.2
+requests[socks]==2.30.0
# via
# -r NG-RO/requirements.in
# -r RO-SDN-arista_cloudvision/requirements.in
# via oslo-config
rsa==4.9
# via google-auth
-simplejson==3.18.3
+simplejson==3.19.1
# via
# osc-lib
# python-cinderclient
# python-keystoneclient
# python-novaclient
# python-openstackclient
-tempora==5.2.1
+tempora==5.2.2
# via portend
-tqdm==4.64.1
+tqdm==4.65.0
# via ipconflict
typing-extensions==4.5.0
# via azure-core
uritemplate==4.1.1
# via google-api-python-client
-urllib3==1.26.14
+urllib3==2.0.2
# via requests
uuid==1.30
# via -r RO-SDN-arista_cloudvision/requirements.in
[testenv]
usedevelop = True
-basepython = python3
+basepython = python3.10
setenv = VIRTUAL_ENV={envdir}
PYTHONDONTWRITEBYTECODE = 1
deps = -r{toxinidir}/requirements.txt
[testenv:release_notes]
deps = reno
skip_install = true
-whitelist_externals = bash
+allowlist_externals = bash
commands =
reno new {posargs:new_feature}
bash -c "sed -i -e '1 e head -16 tox.ini' releasenotes/notes/{posargs:new_feature}*.yaml"
-r{toxinidir}/requirements-dev.txt
-r{toxinidir}/requirements-test.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
commands =
sh -c 'rm -f nosetests.xml'
coverage erase
[testenv:pip-compile]
deps = pip-tools==6.6.2
skip_install = true
-whitelist_externals = bash
+allowlist_externals = bash
[
commands =
- bash -c "for file in requirements*.in ; do \
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/NG-RO
commands =
sh -c 'rm -rf osm_ng_ro/requirements.txt deb_dist dist osm_ng_ro.egg-info osm_ng_ro*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-plugin
commands =
sh -c 'rm -rf deb_dist dist osm_ro_plugin.egg-info osm_ro_plugin*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-arista_cloudvision
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_arista_cloudvision.egg-info osm_rosdn_arista_cloudvision*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-dpb
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_dpb.egg-info osm_rosdn_dpb*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-dynpac
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_dynpac.egg-info osm_rosdn_dynpac*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-floodlight_openflow
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_floodlightof.egg-info osm_rosdn_floodlightof*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-ietfl2vpn
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_ietfl2vpn.egg-info osm_rosdn_ietfl2vpn*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-juniper_contrail
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_juniper_contrail.egg-info osm_rosdn_juniper_contrail*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-odl_openflow
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_odlof.egg-info osm_rosdn_odlof*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-onos_openflow
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_onosof.egg-info osm_rosdn_onosof*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-onos_vpls
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_onos_vpls.egg-info osm_rosdn_onos_vpls*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-aws
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_aws.egg-info osm_rovim_aws*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-azure
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_azure.egg-info osm_rovim_azure*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-openstack
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_openstack.egg-info osm_rovim_openstack*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-openvim
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_openvim.egg-info osm_rovim_openvim*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-vmware
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_vmware.egg-info osm_rovim_vmware*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-gcp
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_gcp.egg-info osm_rovim_gcp*.tar.gz'