# devops-stages/stage-build.sh
#
-FROM ubuntu:20.04
+FROM ubuntu:22.04
ARG APT_PROXY
RUN if [ ! -z $APT_PROXY ] ; then \
python3 \
python3-all \
python3-dev \
- python3-setuptools
-
-RUN python3 -m easy_install pip==21.3.1
-RUN pip install tox==3.24.5
+ python3-setuptools \
+ python3-pip \
+ tox
ENV LC_ALL C.UTF-8
ENV LANG C.UTF-8
# under the License.
############################################################################
-FROM ubuntu:20.04 as INSTALL
+FROM ubuntu:22.04 as INSTALL
WORKDIR /build
RUN python3 -m build /build/RO-VIM-gcp && \
python3 -m pip install /build/RO-VIM-gcp/dist/*.whl
-FROM ubuntu:20.04
+RUN python3 -m build /build/RO-SDN-tapi && \
+ python3 -m pip install /build/RO-SDN-tapi/dist/*.whl
+
+FROM ubuntu:22.04
RUN DEBIAN_FRONTEND=noninteractive apt-get --yes update && \
DEBIAN_FRONTEND=noninteractive apt-get --yes install python3-minimal
COPY --from=INSTALL /usr/lib/python3/dist-packages /usr/lib/python3/dist-packages
-COPY --from=INSTALL /usr/local/lib/python3.8/dist-packages /usr/local/lib/python3.8/dist-packages
+COPY --from=INSTALL /usr/local/lib/python3.10/dist-packages /usr/local/lib/python3.10/dist-packages
COPY --from=INSTALL /usr/bin/genisoimage /usr/bin/genisoimage
COPY --from=INSTALL /etc/protocols /etc/protocols
EXPOSE 9090
+# Creating the user for the app
+RUN groupadd -g 1000 appuser && \
+ useradd -u 1000 -g 1000 -d /app appuser && \
+ mkdir -p /app/osm_ro && \
+ mkdir -p /app/storage/kafka && \
+ mkdir /app/log && \
+ chown -R appuser:appuser /app
+
+WORKDIR /app/osm_ro
+
+# Changing the security context
+USER appuser
+
# Two mysql databases are needed (DB and DB_OVIM). Can be hosted on same or separated containers
# These ENV must be provided
ENV RO_DB_HOST=""
vdur_path (str): Path of VDUR in DB
"""
if server.status in openStackvmStatusOk:
- vdur_vim_info_update["vim_status"] = vdur_update[
- vdur_path + ".status"
- ] = server.status
+ vdur_vim_info_update["vim_status"] = vdur_update[vdur_path + ".status"] = (
+ server.status
+ )
else:
- vdur_vim_info_update["vim_status"] = vdur_update[
- vdur_path + ".status"
- ] = server.status
+ vdur_vim_info_update["vim_status"] = vdur_update[vdur_path + ".status"] = (
+ server.status
+ )
vdur_vim_info_update["vim_message"] = "VIM status reported " + server.status
vdur_vim_info_update["vim_details"] = self._get_server_info(server)
vdur_vim_info_update["vim_id"] = server.id
- vdur_vim_info_update["vim_name"] = vdur_update[
- vdur_path + ".name"
- ] = server.name
+ vdur_vim_info_update["vim_name"] = vdur_update[vdur_path + ".name"] = (
+ server.name
+ )
@staticmethod
def get_interface_info(
"""
current_ip_address = MonitorVms._get_current_ip_address(interface_info)
if current_ip_address:
- vdur_update[
- vdur_path + ".interfaces." + str(index) + ".ip-address"
- ] = current_ip_address
+ vdur_update[vdur_path + ".interfaces." + str(index) + ".ip-address"] = (
+ current_ip_address
+ )
if old_interface.get("mgmt_vdu_interface"):
vdur_update[vdur_path + ".ip-address"] = current_ip_address
if old_interface.get("mgmt_vnf_interface"):
vnfr_update[vnfr_id + ".ip-address"] = current_ip_address
- vdur_update[
- vdur_path + ".interfaces." + str(index) + ".mac-address"
- ] = interface_info.get("mac_address")
+ vdur_update[vdur_path + ".interfaces." + str(index) + ".mac-address"] = (
+ interface_info.get("mac_address")
+ )
@staticmethod
def _get_current_ip_address(interface_info: dict) -> Optional[str]:
vdur_vim_info_update["interfaces"][index].update(
{
"mac_address": interface_info["mac_address"],
- "ip_address": interface_info["fixed_ips"][0].get("ip_address")
- if interface_info.get("fixed_ips")
- else None,
+ "ip_address": (
+ interface_info["fixed_ips"][0].get("ip_address")
+ if interface_info.get("fixed_ips")
+ else None
+ ),
"vim_net_id": interface_info["network_id"],
"vim_info": self.serialize(interface_info),
- "compute_node": server.to_dict()["OS-EXT-SRV-ATTR:host"]
- if server.to_dict().get("OS-EXT-SRV-ATTR:host")
- else None,
+ "compute_node": (
+ server.to_dict()["OS-EXT-SRV-ATTR:host"]
+ if server.to_dict().get("OS-EXT-SRV-ATTR:host")
+ else None
+ ),
}
)
for index, old_interface in enumerate(existing_vim_info["interfaces"]):
interface_info = self.get_interface_info(ports, old_interface, server)
if not interface_info:
- vdur_vim_info_update[
- "vim_message"
- ] = f"Interface {old_interface['vim_interface_id']} deleted externally."
+ vdur_vim_info_update["vim_message"] = (
+ f"Interface {old_interface['vim_interface_id']} deleted externally."
+ )
else:
if interface_info.get("status") in openStacknetStatusOk:
# limitations under the License.
##
+from copy import deepcopy
from http import HTTPStatus
from itertools import product
import logging
"image": Ns._process_image_params,
"flavor": Ns._process_flavor_params,
"vdu": Ns._process_vdu_params,
+ "classification": Ns._process_classification_params,
+ "sfi": Ns._process_sfi_params,
+ "sf": Ns._process_sf_params,
+ "sfp": Ns._process_sfp_params,
"affinity-or-anti-affinity-group": Ns._process_affinity_group_params,
+ "shared-volumes": Ns._process_shared_volumes_params,
}
self.db_path_map = {
"net": "vld",
"image": "image",
"flavor": "flavor",
"vdu": "vdur",
+ "classification": "classification",
+ "sfi": "sfi",
+ "sf": "sf",
+ "sfp": "sfp",
"affinity-or-anti-affinity-group": "affinity-or-anti-affinity-group",
+ "shared-volumes": "shared-volumes",
}
def init_db(self, target_version):
# Pinning policy "ISOLATE" uses cores as host should not support SMT architecture
# Pinning policy "PREFER" uses threads in case host supports SMT architecture
numa[
- "cores"
- if guest_epa_quota.get("cpu-thread-pinning-policy") == "ISOLATE"
- else "threads"
+ (
+ "cores"
+ if guest_epa_quota.get("cpu-thread-pinning-policy") == "ISOLATE"
+ else "threads"
+ )
] = max(vcpu_count, 1)
local_epa_vcpu_set = True
db = kwargs.get("db")
target_vdur = {}
+ for vnf in indata.get("vnf", []):
+ for vdur in vnf.get("vdur", []):
+ if vdur.get("ns-flavor-id") == target_flavor.get("id"):
+ target_vdur = vdur
+
+ vim_flavor_id = (
+ target_vdur.get("additionalParams", {}).get("OSM", {}).get("vim_flavor_id")
+ )
+ if vim_flavor_id: # vim-flavor-id was passed so flavor won't be created
+ return {"find_params": {"vim_flavor_id": vim_flavor_id}}
+
flavor_data = {
"disk": int(target_flavor["storage-gb"]),
"ram": int(target_flavor["memory-mb"]),
"vcpus": int(target_flavor["vcpu-count"]),
}
- for vnf in indata.get("vnf", []):
- for vdur in vnf.get("vdur", []):
- if vdur.get("ns-flavor-id") == target_flavor.get("id"):
- target_vdur = vdur
-
if db and isinstance(indata.get("vnf"), list):
vnfd_id = indata.get("vnf")[0].get("vnfd-id")
vnfd = db.get_one("vnfds", {"_id": vnfd_id})
flavor_data_name = flavor_data.copy()
flavor_data_name["name"] = target_flavor["name"]
extra_dict["params"] = {"flavor_data": flavor_data_name}
+ return extra_dict
+
+ @staticmethod
+ def _prefix_ip_address(ip_address):
+ if "/" not in ip_address:
+ ip_address += "/32"
+ return ip_address
+
+ @staticmethod
+ def _process_ip_proto(ip_proto):
+ if ip_proto:
+ if ip_proto == 1:
+ ip_proto = "icmp"
+ elif ip_proto == 6:
+ ip_proto = "tcp"
+ elif ip_proto == 17:
+ ip_proto = "udp"
+ return ip_proto
+
+ @staticmethod
+ def _process_classification_params(
+ target_classification: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """[summary]
+
+ Args:
+ target_classification (Dict[str, Any]): Classification dictionary parameters that needs to be processed to create resource on VIM
+ indata (Dict[str, Any]): Deployment info
+ vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+ Returns:
+ Dict[str, Any]: Return parameters required to create classification and Items on which classification is dependent.
+ """
+ vnfr_id = target_classification["vnfr_id"]
+ vdur_id = target_classification["vdur_id"]
+ port_index = target_classification["ingress_port_index"]
+ extra_dict = {}
+
+ classification_data = {
+ "name": target_classification["id"],
+ "source_port_range_min": target_classification["source-port"],
+ "source_port_range_max": target_classification["source-port"],
+ "destination_port_range_min": target_classification["destination-port"],
+ "destination_port_range_max": target_classification["destination-port"],
+ }
+
+ classification_data["source_ip_prefix"] = Ns._prefix_ip_address(
+ target_classification["source-ip-address"]
+ )
+
+ classification_data["destination_ip_prefix"] = Ns._prefix_ip_address(
+ target_classification["destination-ip-address"]
+ )
+
+ classification_data["protocol"] = Ns._process_ip_proto(
+ int(target_classification["ip-proto"])
+ )
+
+ db = kwargs.get("db")
+ vdu_text = Ns._get_vnfr_vdur_text(db, vnfr_id, vdur_id)
+
+ extra_dict = {"depends_on": [vdu_text]}
+
+ extra_dict = {"depends_on": [vdu_text]}
+ classification_data["logical_source_port"] = "TASK-" + vdu_text
+ classification_data["logical_source_port_index"] = port_index
+
+ extra_dict["params"] = classification_data
+
+ return extra_dict
+
+ @staticmethod
+ def _process_sfi_params(
+ target_sfi: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """[summary]
+
+ Args:
+ target_sfi (Dict[str, Any]): SFI dictionary parameters that needs to be processed to create resource on VIM
+ indata (Dict[str, Any]): deployment info
+ vim_info (Dict[str, Any]): To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+ Returns:
+ Dict[str, Any]: Return parameters required to create SFI and Items on which SFI is dependent.
+ """
+
+ vnfr_id = target_sfi["vnfr_id"]
+ vdur_id = target_sfi["vdur_id"]
+
+ sfi_data = {
+ "name": target_sfi["id"],
+ "ingress_port_index": target_sfi["ingress_port_index"],
+ "egress_port_index": target_sfi["egress_port_index"],
+ }
+
+ db = kwargs.get("db")
+ vdu_text = Ns._get_vnfr_vdur_text(db, vnfr_id, vdur_id)
+
+ extra_dict = {"depends_on": [vdu_text]}
+ sfi_data["ingress_port"] = "TASK-" + vdu_text
+ sfi_data["egress_port"] = "TASK-" + vdu_text
+
+ extra_dict["params"] = sfi_data
+
+ return extra_dict
+
+ @staticmethod
+ def _get_vnfr_vdur_text(db, vnfr_id, vdur_id):
+ vnf_preffix = "vnfrs:{}".format(vnfr_id)
+ db_vnfr = db.get_one("vnfrs", {"_id": vnfr_id})
+ vdur_list = []
+ vdu_text = ""
+
+ if db_vnfr:
+ vdur_list = [
+ vdur["id"] for vdur in db_vnfr["vdur"] if vdur["vdu-id-ref"] == vdur_id
+ ]
+
+ if vdur_list:
+ vdu_text = vnf_preffix + ":vdur." + vdur_list[0]
+
+ return vdu_text
+
+ @staticmethod
+ def _process_sf_params(
+ target_sf: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """[summary]
+
+ Args:
+ target_sf (Dict[str, Any]): SF dictionary parameters that needs to be processed to create resource on VIM
+ indata (Dict[str, Any]): Deployment info.
+ vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+ Returns:
+ Dict[str, Any]: Return parameters required to create SF and Items on which SF is dependent.
+ """
+
+ nsr_id = kwargs.get("nsr_id", "")
+ sfis = target_sf["sfis"]
+ ns_preffix = "nsrs:{}".format(nsr_id)
+ extra_dict = {"depends_on": [], "params": []}
+ sf_data = {"name": target_sf["id"], "sfis": sfis}
+
+ for count, sfi in enumerate(sfis):
+ sfi_text = ns_preffix + ":sfi." + sfi
+ sfis[count] = "TASK-" + sfi_text
+ extra_dict["depends_on"].append(sfi_text)
+
+ extra_dict["params"] = sf_data
+
+ return extra_dict
+
+ @staticmethod
+ def _process_sfp_params(
+ target_sfp: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ """[summary]
+
+ Args:
+ target_sfp (Dict[str, Any]): SFP dictionary parameters that needs to be processed to create resource on VIM.
+ indata (Dict[str, Any]): Deployment info
+ vim_info (Dict[str, Any]):To add items created by OSM on the VIM.
+ target_record_id (str): Task record ID.
+ **kwargs (Dict[str, Any]): Used to send additional information to the task.
+
+ Returns:
+ Dict[str, Any]: Return parameters required to create SFP and Items on which SFP is dependent.
+ """
+
+ nsr_id = kwargs.get("nsr_id")
+ sfs = target_sfp["sfs"]
+ classifications = target_sfp["classifications"]
+ ns_preffix = "nsrs:{}".format(nsr_id)
+ extra_dict = {"depends_on": [], "params": []}
+ sfp_data = {
+ "name": target_sfp["id"],
+ "sfs": sfs,
+ "classifications": classifications,
+ }
+
+ for count, sf in enumerate(sfs):
+ sf_text = ns_preffix + ":sf." + sf
+ sfs[count] = "TASK-" + sf_text
+ extra_dict["depends_on"].append(sf_text)
+
+ for count, classi in enumerate(classifications):
+ classi_text = ns_preffix + ":classification." + classi
+ classifications[count] = "TASK-" + classi_text
+ extra_dict["depends_on"].append(classi_text)
+
+ extra_dict["params"] = sfp_data
return extra_dict
disk_list.append(persistent_root_disk[vsd["id"]])
return persistent_root_disk
+ return persistent_root_disk
@staticmethod
def find_persistent_volumes(
if not virtual_storage_desc.get("vdu-storage-requirements"):
return False
for item in virtual_storage_desc.get("vdu-storage-requirements", {}):
- if item.get("key") == "keep-volume" and item.get("value") == "true":
+ if item.get("key") == "keep-volume" and item.get("value").lower() == "true":
return True
return False
+ @staticmethod
+ def is_shared_volume(
+ virtual_storage_desc: Dict[str, Any], vnfd_id: str
+ ) -> (str, bool):
+ """Function to decide if the volume type is multi attached or not .
+
+ Args:
+ virtual_storage_desc (Dict[str, Any]): virtual storage description dictionary
+ vnfd_id (str): vnfd id
+
+ Returns:
+ bool (True/False)
+ name (str) New name if it is a multiattach disk
+ """
+
+ if vdu_storage_requirements := virtual_storage_desc.get(
+ "vdu-storage-requirements", {}
+ ):
+ for item in vdu_storage_requirements:
+ if (
+ item.get("key") == "multiattach"
+ and item.get("value").lower() == "true"
+ ):
+ name = f"shared-{virtual_storage_desc['id']}-{vnfd_id}"
+ return name, True
+ return virtual_storage_desc["id"], False
+
@staticmethod
def _sort_vdu_interfaces(target_vdu: dict) -> None:
"""Sort the interfaces according to position number.
"size": root_disk["size-of-storage"],
"keep": Ns.is_volume_keeping_required(root_disk),
}
-
disk_list.append(persistent_root_disk[vsd["id"]])
break
persistent_root_disk: dict,
persistent_ordinary_disk: dict,
disk_list: list,
+ extra_dict: dict,
+ vnf_id: str = None,
+ nsr_id: str = None,
) -> None:
"""Fill the disk list by adding persistent ordinary disks.
== "persistent-storage:persistent-storage"
and disk["id"] not in persistent_root_disk.keys()
):
+ name, multiattach = Ns.is_shared_volume(disk, vnf_id)
persistent_ordinary_disk[disk["id"]] = {
+ "name": name,
"size": disk["size-of-storage"],
"keep": Ns.is_volume_keeping_required(disk),
+ "multiattach": multiattach,
}
disk_list.append(persistent_ordinary_disk[disk["id"]])
+ if multiattach: # VDU creation has to wait for shared volumes
+ extra_dict["depends_on"].append(
+ f"nsrs:{nsr_id}:shared-volumes.{name}"
+ )
@staticmethod
def _prepare_vdu_affinity_group_list(
flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
extra_dict = {"depends_on": [image_text, flavor_text]}
net_list = []
-
persistent_root_disk = {}
persistent_ordinary_disk = {}
vdu_instantiation_volumes_list = []
disk_list = []
vnfd_id = vnfr["vnfd-id"]
vnfd = db.get_one("vnfds", {"_id": vnfd_id})
-
# If the position info is provided for all the interfaces, it will be sorted
# according to position number ascendingly.
if all(
if target_vdu.get("additionalParams"):
vdu_instantiation_volumes_list = (
- target_vdu.get("additionalParams").get("OSM").get("vdu_volumes")
+ target_vdu.get("additionalParams").get("OSM", {}).get("vdu_volumes")
)
if vdu_instantiation_volumes_list:
)
# Add the persistent non-root disks to disk_list
Ns._add_persistent_ordinary_disks_to_disk_list(
- target_vdu, persistent_root_disk, persistent_ordinary_disk, disk_list
+ target_vdu,
+ persistent_root_disk,
+ persistent_ordinary_disk,
+ disk_list,
+ extra_dict,
+ vnfd["id"],
+ nsr_id,
)
affinity_group_list = Ns._prepare_vdu_affinity_group_list(
"availability_zone_index": None, # TODO
"availability_zone_list": None, # TODO
}
+ return extra_dict
+ @staticmethod
+ def _process_shared_volumes_params(
+ target_shared_volume: Dict[str, Any],
+ indata: Dict[str, Any],
+ vim_info: Dict[str, Any],
+ target_record_id: str,
+ **kwargs: Dict[str, Any],
+ ) -> Dict[str, Any]:
+ extra_dict = {}
+ shared_volume_data = {
+ "size": target_shared_volume["size-of-storage"],
+ "name": target_shared_volume["id"],
+ "type": target_shared_volume["type-of-storage"],
+ "keep": Ns.is_volume_keeping_required(target_shared_volume),
+ }
+ extra_dict["params"] = shared_volume_data
return extra_dict
@staticmethod
extra_dict["params"] = {
"affinity_group_data": affinity_group_data,
}
-
return extra_dict
@staticmethod
vim_details = {}
vim_details_text = existing_vdu["vim_info"][target_id].get("vim_details", None)
+
if vim_details_text:
vim_details = yaml.safe_load(f"{vim_details_text}")
process_params = None
vdu2cloud_init = indata.get("cloud_init_content") or {}
ro_nsr_public_key = db_ro_nsr["public_key"]
-
# According to the type of item, the path, the target_list,
# the existing_list and the method to process params are set
db_path = self.db_path_map[item]
process_params = self.process_params_function_map[item]
- if item in ("net", "vdu"):
+
+ if item in ("sfp", "classification", "sf", "sfi"):
+ db_record = "nsrs:{}:{}".format(nsr_id, db_path)
+ target_vnffg = indata.get("vnffg", [])[0]
+ target_list = target_vnffg[item]
+ existing_list = db_nsr.get(item, [])
+ elif item in ("net", "vdu"):
# This case is specific for the NS VLD (not applied to VDU)
if vnfr is None:
db_record = "nsrs:{}:{}".format(nsr_id, db_path)
)
target_list = target_vnf.get(db_path, []) if target_vnf else []
existing_list = vnfr.get(db_path, [])
- elif item in ("image", "flavor", "affinity-or-anti-affinity-group"):
+ elif item in (
+ "image",
+ "flavor",
+ "affinity-or-anti-affinity-group",
+ "shared-volumes",
+ ):
db_record = "nsrs:{}:{}".format(nsr_id, db_path)
target_list = indata.get(item, [])
existing_list = db_nsr.get(item, [])
else:
raise NsException("Item not supported: {}", item)
-
# ensure all the target_list elements has an "id". If not assign the index as id
if target_list is None:
target_list = []
for target_index, tl in enumerate(target_list):
if tl and not tl.get("id"):
tl["id"] = str(target_index)
-
# step 1 items (networks,vdus,...) to be deleted/updated
for item_index, existing_item in enumerate(existing_list):
target_item = next(
(t for t in target_list if t["id"] == existing_item["id"]),
None,
)
-
for target_vim, existing_viminfo in existing_item.get(
"vim_info", {}
).items():
# step 2 items (networks,vdus,...) to be created
for target_item in target_list:
item_index = -1
-
for item_index, existing_item in enumerate(existing_list):
if existing_item["id"] == target_item["id"]:
break
}
)
self.logger.debug("calculate_diff_items kwargs={}".format(kwargs))
+ if (
+ process_params == Ns._process_sfi_params
+ or Ns._process_sf_params
+ or Ns._process_classification_params
+ or Ns._process_sfp_params
+ ):
+ kwargs.update({"nsr_id": nsr_id, "db": self.db})
+
+ self.logger.debug("calculate_diff_items kwargs={}".format(kwargs))
extra_dict = process_params(
target_item,
return diff_items, task_index
+ def _process_vnfgd_sfp(self, sfp):
+ processed_sfp = {}
+ # getting sfp name, sfs and classifications in sfp to store it in processed_sfp
+ processed_sfp["id"] = sfp["id"]
+ sfs_in_sfp = [
+ sf["id"] for sf in sfp.get("position-desc-id", [])[0].get("cp-profile-id")
+ ]
+ classifications_in_sfp = [
+ classi["id"]
+ for classi in sfp.get("position-desc-id", [])[0].get("match-attributes")
+ ]
+
+ # creating a list of sfp with sfs and classifications
+ processed_sfp["sfs"] = sfs_in_sfp
+ processed_sfp["classifications"] = classifications_in_sfp
+
+ return processed_sfp
+
+ def _process_vnfgd_sf(self, sf):
+ processed_sf = {}
+ # getting name of sf
+ processed_sf["id"] = sf["id"]
+ # getting sfis in sf
+ sfis_in_sf = sf.get("constituent-profile-elements")
+ sorted_sfis = sorted(sfis_in_sf, key=lambda i: i["order"])
+ # getting sfis names
+ processed_sf["sfis"] = [sfi["id"] for sfi in sorted_sfis]
+
+ return processed_sf
+
+ def _process_vnfgd_sfi(self, sfi, db_vnfrs):
+ processed_sfi = {}
+ # getting name of sfi
+ processed_sfi["id"] = sfi["id"]
+
+ # getting ports in sfi
+ ingress_port = sfi["ingress-constituent-cpd-id"]
+ egress_port = sfi["egress-constituent-cpd-id"]
+ sfi_vnf_member_index = sfi["constituent-base-element-id"]
+
+ processed_sfi["ingress_port"] = ingress_port
+ processed_sfi["egress_port"] = egress_port
+
+ all_vnfrs = db_vnfrs.values()
+
+ sfi_vnfr = [
+ element
+ for element in all_vnfrs
+ if element["member-vnf-index-ref"] == sfi_vnf_member_index
+ ]
+ processed_sfi["vnfr_id"] = sfi_vnfr[0]["id"]
+
+ sfi_vnfr_cp = sfi_vnfr[0]["connection-point"]
+
+ ingress_port_index = [
+ c for c, element in enumerate(sfi_vnfr_cp) if element["id"] == ingress_port
+ ]
+ ingress_port_index = ingress_port_index[0]
+
+ processed_sfi["vdur_id"] = sfi_vnfr_cp[ingress_port_index][
+ "connection-point-vdu-id"
+ ]
+ processed_sfi["ingress_port_index"] = ingress_port_index
+ processed_sfi["egress_port_index"] = ingress_port_index
+
+ if egress_port != ingress_port:
+ egress_port_index = [
+ c
+ for c, element in enumerate(sfi_vnfr_cp)
+ if element["id"] == egress_port
+ ]
+ processed_sfi["egress_port_index"] = egress_port_index
+
+ return processed_sfi
+
+ def _process_vnfgd_classification(self, classification, db_vnfrs):
+ processed_classification = {}
+
+ processed_classification = deepcopy(classification)
+ classi_vnf_member_index = processed_classification[
+ "constituent-base-element-id"
+ ]
+ logical_source_port = processed_classification["constituent-cpd-id"]
+
+ all_vnfrs = db_vnfrs.values()
+
+ classi_vnfr = [
+ element
+ for element in all_vnfrs
+ if element["member-vnf-index-ref"] == classi_vnf_member_index
+ ]
+ processed_classification["vnfr_id"] = classi_vnfr[0]["id"]
+
+ classi_vnfr_cp = classi_vnfr[0]["connection-point"]
+
+ ingress_port_index = [
+ c
+ for c, element in enumerate(classi_vnfr_cp)
+ if element["id"] == logical_source_port
+ ]
+ ingress_port_index = ingress_port_index[0]
+
+ processed_classification["ingress_port_index"] = ingress_port_index
+ processed_classification["vdur_id"] = classi_vnfr_cp[ingress_port_index][
+ "connection-point-vdu-id"
+ ]
+
+ return processed_classification
+
+ def _update_db_nsr_with_vnffg(self, processed_vnffg, vim_info, nsr_id):
+ """This method used to add viminfo dict to sfi, sf sfp and classification in indata and count info in db_nsr.
+
+ Args:
+ processed_vnffg (Dict[str, Any]): deployment info
+ vim_info (Dict): dictionary to store VIM resource information
+ nsr_id (str): NSR id
+
+ Returns: None
+ """
+
+ nsr_sfi = {}
+ nsr_sf = {}
+ nsr_sfp = {}
+ nsr_classification = {}
+ db_nsr_vnffg = deepcopy(processed_vnffg)
+
+ for count, sfi in enumerate(processed_vnffg["sfi"]):
+ sfi["vim_info"] = vim_info
+ sfi_count = "sfi.{}".format(count)
+ nsr_sfi[sfi_count] = db_nsr_vnffg["sfi"][count]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sfi)
+
+ for count, sf in enumerate(processed_vnffg["sf"]):
+ sf["vim_info"] = vim_info
+ sf_count = "sf.{}".format(count)
+ nsr_sf[sf_count] = db_nsr_vnffg["sf"][count]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sf)
+
+ for count, sfp in enumerate(processed_vnffg["sfp"]):
+ sfp["vim_info"] = vim_info
+ sfp_count = "sfp.{}".format(count)
+ nsr_sfp[sfp_count] = db_nsr_vnffg["sfp"][count]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_sfp)
+
+ for count, classi in enumerate(processed_vnffg["classification"]):
+ classi["vim_info"] = vim_info
+ classification_count = "classification.{}".format(count)
+ nsr_classification[classification_count] = db_nsr_vnffg["classification"][
+ count
+ ]
+
+ self.db.set_list("nsrs", {"_id": nsr_id}, nsr_classification)
+
+ def process_vnffgd_descriptor(
+ self,
+ indata: dict,
+ nsr_id: str,
+ db_nsr: dict,
+ db_vnfrs: dict,
+ ) -> dict:
+ """This method used to process vnffgd parameters from descriptor.
+
+ Args:
+ indata (Dict[str, Any]): deployment info
+ nsr_id (str): NSR id
+ db_nsr: NSR record from DB
+ db_vnfrs: VNFRS record from DB
+
+ Returns:
+ Dict: Processed vnffg parameters.
+ """
+
+ processed_vnffg = {}
+ vnffgd = db_nsr.get("nsd", {}).get("vnffgd")
+ vnf_list = indata.get("vnf", [])
+ vim_text = ""
+
+ if vnf_list:
+ vim_text = "vim:" + vnf_list[0].get("vim-account-id", "")
+
+ vim_info = {}
+ vim_info[vim_text] = {}
+ processed_sfps = []
+ processed_classifications = []
+ processed_sfs = []
+ processed_sfis = []
+
+ # setting up intial empty entries for vnffg items in mongodb.
+ self.db.set_list(
+ "nsrs",
+ {"_id": nsr_id},
+ {
+ "sfi": [],
+ "sf": [],
+ "sfp": [],
+ "classification": [],
+ },
+ )
+
+ vnffg = vnffgd[0]
+ # getting sfps
+ sfps = vnffg.get("nfpd")
+ for sfp in sfps:
+ processed_sfp = self._process_vnfgd_sfp(sfp)
+ # appending the list of processed sfps
+ processed_sfps.append(processed_sfp)
+
+ # getting sfs in sfp
+ sfs = sfp.get("position-desc-id")[0].get("cp-profile-id")
+ for sf in sfs:
+ processed_sf = self._process_vnfgd_sf(sf)
+
+ # appending the list of processed sfs
+ processed_sfs.append(processed_sf)
+
+ # getting sfis in sf
+ sfis_in_sf = sf.get("constituent-profile-elements")
+ sorted_sfis = sorted(sfis_in_sf, key=lambda i: i["order"])
+
+ for sfi in sorted_sfis:
+ processed_sfi = self._process_vnfgd_sfi(sfi, db_vnfrs)
+
+ processed_sfis.append(processed_sfi)
+
+ classifications = sfp.get("position-desc-id")[0].get("match-attributes")
+ # getting classifications from sfp
+ for classification in classifications:
+ processed_classification = self._process_vnfgd_classification(
+ classification, db_vnfrs
+ )
+
+ processed_classifications.append(processed_classification)
+
+ processed_vnffg["sfi"] = processed_sfis
+ processed_vnffg["sf"] = processed_sfs
+ processed_vnffg["classification"] = processed_classifications
+ processed_vnffg["sfp"] = processed_sfps
+
+ # adding viminfo dict to sfi, sf sfp and classification
+ self._update_db_nsr_with_vnffg(processed_vnffg, vim_info, nsr_id)
+
+ # updating indata with vnffg porcessed parameters
+ indata["vnffg"].append(processed_vnffg)
+
def calculate_all_differences_to_deploy(
self,
indata,
# set list with diffs:
changes_list = []
+ # processing vnffg from descriptor parameter
+ vnffgd = db_nsr.get("nsd").get("vnffgd")
+ if vnffgd is not None:
+ indata["vnffg"] = []
+ vnf_list = indata["vnf"]
+ processed_vnffg = {}
+
+ # in case of ns-delete
+ if not vnf_list:
+ processed_vnffg["sfi"] = []
+ processed_vnffg["sf"] = []
+ processed_vnffg["classification"] = []
+ processed_vnffg["sfp"] = []
+
+ indata["vnffg"].append(processed_vnffg)
+
+ else:
+ self.process_vnffgd_descriptor(
+ indata=indata,
+ nsr_id=nsr_id,
+ db_nsr=db_nsr,
+ db_vnfrs=db_vnfrs,
+ )
+
+ # getting updated db_nsr having vnffg parameters
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+
+ self.logger.debug(
+ "After processing vnffd parameters indata={} nsr={}".format(
+ indata, db_nsr
+ )
+ )
+
+ for item in ["sfp", "classification", "sf", "sfi"]:
+ self.logger.debug("process NS={} {}".format(nsr_id, item))
+ diff_items, task_index = self.calculate_diff_items(
+ indata=indata,
+ db_nsr=db_nsr,
+ db_ro_nsr=db_ro_nsr,
+ db_nsr_update=db_nsr_update,
+ item=item,
+ tasks_by_target_record_id=tasks_by_target_record_id,
+ action_id=action_id,
+ nsr_id=nsr_id,
+ task_index=task_index,
+ vnfr_id=None,
+ )
+ changes_list += diff_items
+
# NS vld, image and flavor
- for item in ["net", "image", "flavor", "affinity-or-anti-affinity-group"]:
+ for item in [
+ "net",
+ "image",
+ "flavor",
+ "affinity-or-anti-affinity-group",
+ ]:
self.logger.debug("process NS={} {}".format(nsr_id, item))
diff_items, task_index = self.calculate_diff_items(
indata=indata,
# VNF vlds and vdus
for vnfr_id, vnfr in db_vnfrs.items():
# vnfr_id need to be set as global variable for among others nested method _process_vdu_params
- for item in ["net", "vdu"]:
+ for item in ["net", "vdu", "shared-volumes"]:
self.logger.debug("process VNF={} {}".format(vnfr_id, item))
diff_items, task_index = self.calculate_diff_items(
indata=indata,
# Check each VNF of the target
for target_vnf in target_list:
- # Find this VNF in the list from DB
- vnfr_id = target_vnf.get("vnfInstanceId", None)
- if vnfr_id:
- existing_vnf = db_vnfrs.get(vnfr_id)
- db_record = "vnfrs:{}:{}".format(vnfr_id, db_path)
- # vim_account_id = existing_vnf.get("vim-account-id", "")
+ # Find this VNF in the list from DB, raise exception if vnfInstanceId is not found
+ vnfr_id = target_vnf["vnfInstanceId"]
+ existing_vnf = db_vnfrs.get(vnfr_id, {})
+ db_record = "vnfrs:{}:{}".format(vnfr_id, db_path)
+ # vim_account_id = existing_vnf.get("vim-account-id", "")
+ target_vdus = target_vnf.get("additionalParams", {}).get("vdu", [])
# Check each VDU of this VNF
- for target_vdu in target_vnf["additionalParams"].get("vdu", None):
+ if not target_vdus:
+ # Create target_vdu_list from DB, if VDUs are not specified
+ target_vdus = []
+ for existing_vdu in existing_vnf.get("vdur"):
+ vdu_name = existing_vdu.get("vdu-name", None)
+ vdu_index = existing_vdu.get("count-index", 0)
+ vdu_to_be_healed = {"vdu-id": vdu_name, "count-index": vdu_index}
+ target_vdus.append(vdu_to_be_healed)
+ for target_vdu in target_vdus:
vdu_name = target_vdu.get("vdu-id", None)
# For multi instance VDU count-index is mandatory
# For single session VDU count-indes is 0
count_index = target_vdu.get("count-index", 0)
item_index = 0
- existing_instance = None
- for instance in existing_vnf.get("vdur", None):
- if (
- instance["vdu-name"] == vdu_name
- and instance["count-index"] == count_index
- ):
- existing_instance = instance
- break
- else:
- item_index += 1
+ existing_instance = {}
+ if existing_vnf:
+ for instance in existing_vnf.get("vdur", {}):
+ if (
+ instance["vdu-name"] == vdu_name
+ and instance["count-index"] == count_index
+ ):
+ existing_instance = instance
+ break
+ else:
+ item_index += 1
target_record_id = "{}.{}".format(db_record, existing_instance["id"])
return_data = {
"status": global_status,
- "details": ". ".join(details)
- if details
- else "progress {}/{}".format(done, total),
+ "details": (
+ ". ".join(details) if details else "progress {}/{}".format(done, total)
+ ),
"nsr_id": nsr_id,
"action_id": action_id,
"tasks": task_list,
extra_dict,
):
self._assign_vim(target_vim)
- target_record = "vnfrs:{}:vdur.{}".format(vnf_id, vdu_index)
+ target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+ vnf_id, vdu_index, target_vim
+ )
target_record_id = "vnfrs:{}:vdur.{}".format(vnf_id, vdu_id)
deployment_info = {
"action_id": action_id,
):
target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
self._assign_vim(target_vim)
- target_record = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu_index)
+ target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+ vnf["_id"], vdu_index, target_vim
+ )
target_record_id = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu["id"])
deployment_info = {
"action_id": action_id,
):
target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
self._assign_vim(target_vim)
- target_record = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu_index)
+ ns_preffix = "nsrs:{}".format(nsr_id)
+ flavor_text = ns_preffix + ":flavor." + vdu["ns-flavor-id"]
+ extra_dict["depends_on"] = [flavor_text]
+ extra_dict["params"].update({"flavor_id": "TASK-" + flavor_text})
+ target_record = "vnfrs:{}:vdur.{}.vim_info.{}".format(
+ vnf["_id"], vdu_index, target_vim
+ )
target_record_id = "vnfrs:{}:vdur.{}".format(vnf["_id"], vdu["id"])
deployment_info = {
"action_id": action_id,
)
return task
+ def verticalscale_flavor_task(
+ self, vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict
+ ):
+ target_vim, vim_info = next(k_v for k_v in vdu["vim_info"].items())
+ self._assign_vim(target_vim)
+ db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
+ target_record = "nsrs:{}:flavor.{}.vim_info.{}".format(
+ nsr_id, len(db_nsr["flavor"]) - 1, target_vim
+ )
+ target_record_id = "nsrs:{}:flavor.{}".format(nsr_id, len(db_nsr["flavor"]) - 1)
+ deployment_info = {
+ "action_id": action_id,
+ "nsr_id": nsr_id,
+ "task_index": task_index,
+ }
+ task = Ns._create_task(
+ deployment_info=deployment_info,
+ target_id=target_vim,
+ item="flavor",
+ action="CREATE",
+ target_record=target_record,
+ target_record_id=target_record_id,
+ extra_dict=extra_dict,
+ )
+ return task
+
def verticalscale(self, session, indata, version, nsr_id, *args, **kwargs):
task_index = 0
extra_dict = {}
+ flavor_extra_dict = {}
now = time()
action_id = indata.get("action_id", str(uuid4()))
step = ""
"vcpus": numVirtualCpu,
"disk": sizeOfStorage,
}
+ flavor_data = {
+ "ram": virtualMemory,
+ "vcpus": numVirtualCpu,
+ "disk": sizeOfStorage,
+ }
+ flavor_extra_dict["find_params"] = {"flavor_data": flavor_data}
+ flavor_extra_dict["params"] = {"flavor_data": flavor_dict}
db_new_tasks = []
step = "Creating Tasks for vertical scaling"
with self.write_lock:
extra_dict["params"] = {
"vim_vm_id": vdu["vim-id"],
"flavor_dict": flavor_dict,
+ "vdu-id-ref": vdu["vdu-id-ref"],
+ "count-index": vdu["count-index"],
+ "vnf_instance_id": vnf_instance_id,
}
+ task = self.verticalscale_flavor_task(
+ vdu,
+ db_vnfr,
+ vdu_index,
+ action_id,
+ nsr_id,
+ task_index,
+ flavor_extra_dict,
+ )
+ db_new_tasks.append(task)
+ task_index += 1
task = self.verticalscale_task(
vdu,
db_vnfr,
#
##
-""""
+"""
This is thread that interacts with a VIM. It processes TASKs sequentially against a single VIM.
The tasks are stored at database in table ro_tasks
A single ro_task refers to a VIM element (flavor, image, network, ...).
:param target_dict: dictionary to be read
:param args: list of keys to read from target_dict
:param kwargs: only can contain default=value to return if key is not present in the nested dictionary
- :return: The wanted value if exist, None or default otherwise
+ :return: The wanted value if exists, None or default otherwise
"""
for key in args:
if not isinstance(target_dict, dict) or key not in target_dict:
ro_task["target_id"],
vim_id,
ro_vim_item_update.get("vim_status"),
- ro_vim_item_update.get("vim_message")
- if ro_vim_item_update.get("vim_status") != "ACTIVE"
- else "",
+ (
+ ro_vim_item_update.get("vim_message")
+ if ro_vim_item_update.get("vim_status") != "ACTIVE"
+ else ""
+ ),
)
)
return "DONE", ro_vim_item_update_ok
+class VimInteractionClassification(VimInteractionBase):
+ def new(self, ro_task, task_index, task_depends):
+ task = ro_task["tasks"][task_index]
+ task_id = task["task_id"]
+ created = False
+ target_vim = self.my_vims[ro_task["target_id"]]
+
+ try:
+ created = True
+ params = task["params"]
+ params_copy = deepcopy(params)
+
+ name = params_copy.pop("name")
+ logical_source_port_index = int(
+ params_copy.pop("logical_source_port_index")
+ )
+ logical_source_port = params_copy["logical_source_port"]
+
+ if logical_source_port.startswith("TASK-"):
+ vm_id = task_depends[logical_source_port]
+ params_copy["logical_source_port"] = target_vim.refresh_vms_status(
+ [vm_id]
+ )[vm_id]["interfaces"][logical_source_port_index]["vim_interface_id"]
+
+ vim_classification_id = target_vim.new_classification(
+ name, "legacy_flow_classifier", params_copy
+ )
+
+ ro_vim_item_update = {
+ "vim_id": vim_classification_id,
+ "vim_status": "DONE",
+ "created": created,
+ "vim_details": None,
+ "vim_message": None,
+ }
+ self.logger.debug(
+ "task={} {} created={}".format(task_id, ro_task["target_id"], created)
+ )
+
+ return "DONE", ro_vim_item_update
+ except (vimconn.VimConnException, NsWorkerException) as e:
+ self.logger.debug(traceback.format_exc())
+ self.logger.error(
+ "task={} {} new-vm: {}".format(task_id, ro_task["target_id"], e)
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "created": created,
+ "vim_message": str(e),
+ }
+
+ return "FAILED", ro_vim_item_update
+
+ def delete(self, ro_task, task_index):
+ task = ro_task["tasks"][task_index]
+ task_id = task["task_id"]
+ classification_vim_id = ro_task["vim_info"]["vim_id"]
+ ro_vim_item_update_ok = {
+ "vim_status": "DELETED",
+ "created": False,
+ "vim_message": "DELETED",
+ "vim_id": None,
+ }
+
+ try:
+ if classification_vim_id:
+ target_vim = self.my_vims[ro_task["target_id"]]
+ target_vim.delete_classification(classification_vim_id)
+ except vimconn.VimConnNotFoundException:
+ ro_vim_item_update_ok["vim_message"] = "already deleted"
+ except vimconn.VimConnException as e:
+ self.logger.error(
+ "ro_task={} vim={} del-classification={}: {}".format(
+ ro_task["_id"], ro_task["target_id"], classification_vim_id, e
+ )
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "vim_message": "Error while deleting: {}".format(e),
+ }
+
+ return "FAILED", ro_vim_item_update
+
+ self.logger.debug(
+ "task={} {} del-classification={} {}".format(
+ task_id,
+ ro_task["target_id"],
+ classification_vim_id,
+ ro_vim_item_update_ok.get("vim_message", ""),
+ )
+ )
+
+ return "DONE", ro_vim_item_update_ok
+
+
+class VimInteractionSfi(VimInteractionBase):
+ def new(self, ro_task, task_index, task_depends):
+ task = ro_task["tasks"][task_index]
+ task_id = task["task_id"]
+ created = False
+ target_vim = self.my_vims[ro_task["target_id"]]
+
+ try:
+ created = True
+ params = task["params"]
+ params_copy = deepcopy(params)
+ name = params_copy["name"]
+ ingress_port = params_copy["ingress_port"]
+ egress_port = params_copy["egress_port"]
+ ingress_port_index = params_copy["ingress_port_index"]
+ egress_port_index = params_copy["egress_port_index"]
+
+ ingress_port_id = ingress_port
+ egress_port_id = egress_port
+
+ vm_id = task_depends[ingress_port]
+
+ if ingress_port.startswith("TASK-"):
+ ingress_port_id = target_vim.refresh_vms_status([vm_id])[vm_id][
+ "interfaces"
+ ][ingress_port_index]["vim_interface_id"]
+
+ if ingress_port == egress_port:
+ egress_port_id = ingress_port_id
+ else:
+ if egress_port.startswith("TASK-"):
+ egress_port_id = target_vim.refresh_vms_status([vm_id])[vm_id][
+ "interfaces"
+ ][egress_port_index]["vim_interface_id"]
+
+ ingress_port_id_list = [ingress_port_id]
+ egress_port_id_list = [egress_port_id]
+
+ vim_sfi_id = target_vim.new_sfi(
+ name, ingress_port_id_list, egress_port_id_list, sfc_encap=False
+ )
+
+ ro_vim_item_update = {
+ "vim_id": vim_sfi_id,
+ "vim_status": "DONE",
+ "created": created,
+ "vim_details": None,
+ "vim_message": None,
+ }
+ self.logger.debug(
+ "task={} {} created={}".format(task_id, ro_task["target_id"], created)
+ )
+
+ return "DONE", ro_vim_item_update
+ except (vimconn.VimConnException, NsWorkerException) as e:
+ self.logger.debug(traceback.format_exc())
+ self.logger.error(
+ "task={} {} new-vm: {}".format(task_id, ro_task["target_id"], e)
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "created": created,
+ "vim_message": str(e),
+ }
+
+ return "FAILED", ro_vim_item_update
+
+ def delete(self, ro_task, task_index):
+ task = ro_task["tasks"][task_index]
+ task_id = task["task_id"]
+ sfi_vim_id = ro_task["vim_info"]["vim_id"]
+ ro_vim_item_update_ok = {
+ "vim_status": "DELETED",
+ "created": False,
+ "vim_message": "DELETED",
+ "vim_id": None,
+ }
+
+ try:
+ if sfi_vim_id:
+ target_vim = self.my_vims[ro_task["target_id"]]
+ target_vim.delete_sfi(sfi_vim_id)
+ except vimconn.VimConnNotFoundException:
+ ro_vim_item_update_ok["vim_message"] = "already deleted"
+ except vimconn.VimConnException as e:
+ self.logger.error(
+ "ro_task={} vim={} del-sfi={}: {}".format(
+ ro_task["_id"], ro_task["target_id"], sfi_vim_id, e
+ )
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "vim_message": "Error while deleting: {}".format(e),
+ }
+
+ return "FAILED", ro_vim_item_update
+
+ self.logger.debug(
+ "task={} {} del-sfi={} {}".format(
+ task_id,
+ ro_task["target_id"],
+ sfi_vim_id,
+ ro_vim_item_update_ok.get("vim_message", ""),
+ )
+ )
+
+ return "DONE", ro_vim_item_update_ok
+
+
+class VimInteractionSf(VimInteractionBase):
+ def new(self, ro_task, task_index, task_depends):
+ task = ro_task["tasks"][task_index]
+ task_id = task["task_id"]
+ created = False
+ target_vim = self.my_vims[ro_task["target_id"]]
+
+ try:
+ created = True
+ params = task["params"]
+ params_copy = deepcopy(params)
+ name = params_copy["name"]
+ sfi_list = params_copy["sfis"]
+ sfi_id_list = []
+
+ for sfi in sfi_list:
+ sfi_id = task_depends[sfi] if sfi.startswith("TASK-") else sfi
+ sfi_id_list.append(sfi_id)
+
+ vim_sf_id = target_vim.new_sf(name, sfi_id_list, sfc_encap=False)
+
+ ro_vim_item_update = {
+ "vim_id": vim_sf_id,
+ "vim_status": "DONE",
+ "created": created,
+ "vim_details": None,
+ "vim_message": None,
+ }
+ self.logger.debug(
+ "task={} {} created={}".format(task_id, ro_task["target_id"], created)
+ )
+
+ return "DONE", ro_vim_item_update
+ except (vimconn.VimConnException, NsWorkerException) as e:
+ self.logger.debug(traceback.format_exc())
+ self.logger.error(
+ "task={} {} new-vm: {}".format(task_id, ro_task["target_id"], e)
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "created": created,
+ "vim_message": str(e),
+ }
+
+ return "FAILED", ro_vim_item_update
+
+ def delete(self, ro_task, task_index):
+ task = ro_task["tasks"][task_index]
+ task_id = task["task_id"]
+ sf_vim_id = ro_task["vim_info"]["vim_id"]
+ ro_vim_item_update_ok = {
+ "vim_status": "DELETED",
+ "created": False,
+ "vim_message": "DELETED",
+ "vim_id": None,
+ }
+
+ try:
+ if sf_vim_id:
+ target_vim = self.my_vims[ro_task["target_id"]]
+ target_vim.delete_sf(sf_vim_id)
+ except vimconn.VimConnNotFoundException:
+ ro_vim_item_update_ok["vim_message"] = "already deleted"
+ except vimconn.VimConnException as e:
+ self.logger.error(
+ "ro_task={} vim={} del-sf={}: {}".format(
+ ro_task["_id"], ro_task["target_id"], sf_vim_id, e
+ )
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "vim_message": "Error while deleting: {}".format(e),
+ }
+
+ return "FAILED", ro_vim_item_update
+
+ self.logger.debug(
+ "task={} {} del-sf={} {}".format(
+ task_id,
+ ro_task["target_id"],
+ sf_vim_id,
+ ro_vim_item_update_ok.get("vim_message", ""),
+ )
+ )
+
+ return "DONE", ro_vim_item_update_ok
+
+
+class VimInteractionSfp(VimInteractionBase):
+ def new(self, ro_task, task_index, task_depends):
+ task = ro_task["tasks"][task_index]
+ task_id = task["task_id"]
+ created = False
+ target_vim = self.my_vims[ro_task["target_id"]]
+
+ try:
+ created = True
+ params = task["params"]
+ params_copy = deepcopy(params)
+ name = params_copy["name"]
+ sf_list = params_copy["sfs"]
+ classification_list = params_copy["classifications"]
+
+ classification_id_list = []
+ sf_id_list = []
+
+ for classification in classification_list:
+ classi_id = (
+ task_depends[classification]
+ if classification.startswith("TASK-")
+ else classification
+ )
+ classification_id_list.append(classi_id)
+
+ for sf in sf_list:
+ sf_id = task_depends[sf] if sf.startswith("TASK-") else sf
+ sf_id_list.append(sf_id)
+
+ vim_sfp_id = target_vim.new_sfp(
+ name, classification_id_list, sf_id_list, sfc_encap=False
+ )
+
+ ro_vim_item_update = {
+ "vim_id": vim_sfp_id,
+ "vim_status": "DONE",
+ "created": created,
+ "vim_details": None,
+ "vim_message": None,
+ }
+ self.logger.debug(
+ "task={} {} created={}".format(task_id, ro_task["target_id"], created)
+ )
+
+ return "DONE", ro_vim_item_update
+ except (vimconn.VimConnException, NsWorkerException) as e:
+ self.logger.debug(traceback.format_exc())
+ self.logger.error(
+ "task={} {} new-vm: {}".format(task_id, ro_task["target_id"], e)
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "created": created,
+ "vim_message": str(e),
+ }
+
+ return "FAILED", ro_vim_item_update
+
+ def delete(self, ro_task, task_index):
+ task = ro_task["tasks"][task_index]
+ task_id = task["task_id"]
+ sfp_vim_id = ro_task["vim_info"]["vim_id"]
+ ro_vim_item_update_ok = {
+ "vim_status": "DELETED",
+ "created": False,
+ "vim_message": "DELETED",
+ "vim_id": None,
+ }
+
+ try:
+ if sfp_vim_id:
+ target_vim = self.my_vims[ro_task["target_id"]]
+ target_vim.delete_sfp(sfp_vim_id)
+ except vimconn.VimConnNotFoundException:
+ ro_vim_item_update_ok["vim_message"] = "already deleted"
+ except vimconn.VimConnException as e:
+ self.logger.error(
+ "ro_task={} vim={} del-sfp={}: {}".format(
+ ro_task["_id"], ro_task["target_id"], sfp_vim_id, e
+ )
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "vim_message": "Error while deleting: {}".format(e),
+ }
+
+ return "FAILED", ro_vim_item_update
+
+ self.logger.debug(
+ "task={} {} del-sfp={} {}".format(
+ task_id,
+ ro_task["target_id"],
+ sfp_vim_id,
+ ro_vim_item_update_ok.get("vim_message", ""),
+ )
+ )
+
+ return "DONE", ro_vim_item_update_ok
+
+
class VimInteractionVdu(VimInteractionBase):
max_retries_inject_ssh_key = 20 # 20 times
time_retries_inject_ssh_key = 30 # wevery 30 seconds
task = ro_task["tasks"][task_index]
task_id = task["task_id"]
created = False
- created_items = {}
target_vim = self.my_vims[ro_task["target_id"]]
-
try:
created = True
params = task["params"]
)
affinity_group["affinity_group_id"] = affinity_group_id
-
vim_vm_id, created_items = target_vim.new_vminstance(**params_copy)
interfaces = [iface["vim_id"] for iface in params_copy["net_list"]]
ro_task["target_id"],
vim_id,
ro_vim_item_update.get("vim_status"),
- ro_vim_item_update.get("vim_message")
- if ro_vim_item_update.get("vim_status") != "ACTIVE"
- else "",
+ (
+ ro_vim_item_update.get("vim_message")
+ if ro_vim_item_update.get("vim_status") != "ACTIVE"
+ else ""
+ ),
)
)
try:
# FIND
+ vim_image_id = ""
if task.get("find_params"):
- vim_images = target_vim.get_image_list(**task["find_params"])
+ vim_images = target_vim.get_image_list(
+ task["find_params"].get("filter_dict", {})
+ )
if not vim_images:
raise NsWorkerExceptionNotFound(
ro_vim_item_update = {
"vim_id": vim_image_id,
- "vim_status": "DONE",
+ "vim_status": "ACTIVE",
"created": created,
"created_items": created_items,
"vim_details": None,
return "FAILED", ro_vim_item_update
+class VimInteractionSharedVolume(VimInteractionBase):
+ def delete(self, ro_task, task_index):
+ task = ro_task["tasks"][task_index]
+ task_id = task["task_id"]
+ shared_volume_vim_id = ro_task["vim_info"]["vim_id"]
+ created_items = ro_task["vim_info"]["created_items"]
+ ro_vim_item_update_ok = {
+ "vim_status": "DELETED",
+ "created": False,
+ "vim_message": "DELETED",
+ "vim_id": None,
+ }
+ if created_items and created_items.get(shared_volume_vim_id).get("keep"):
+ ro_vim_item_update_ok = {
+ "vim_status": "ACTIVE",
+ "created": False,
+ "vim_message": None,
+ }
+ return "DONE", ro_vim_item_update_ok
+ try:
+ if shared_volume_vim_id:
+ target_vim = self.my_vims[ro_task["target_id"]]
+ target_vim.delete_shared_volumes(shared_volume_vim_id)
+ except vimconn.VimConnNotFoundException:
+ ro_vim_item_update_ok["vim_message"] = "already deleted"
+ except vimconn.VimConnException as e:
+ self.logger.error(
+ "ro_task={} vim={} del-shared-volume={}: {}".format(
+ ro_task["_id"], ro_task["target_id"], shared_volume_vim_id, e
+ )
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "vim_message": "Error while deleting: {}".format(e),
+ }
+
+ return "FAILED", ro_vim_item_update
+
+ self.logger.debug(
+ "task={} {} del-shared-volume={} {}".format(
+ task_id,
+ ro_task["target_id"],
+ shared_volume_vim_id,
+ ro_vim_item_update_ok.get("vim_message", ""),
+ )
+ )
+
+ return "DONE", ro_vim_item_update_ok
+
+ def new(self, ro_task, task_index, task_depends):
+ task = ro_task["tasks"][task_index]
+ task_id = task["task_id"]
+ created = False
+ created_items = {}
+ target_vim = self.my_vims[ro_task["target_id"]]
+
+ try:
+ shared_volume_vim_id = None
+ shared_volume_data = None
+
+ if task.get("params"):
+ shared_volume_data = task["params"]
+
+ if shared_volume_data:
+ self.logger.info(
+ f"Creating the new shared_volume for {shared_volume_data}\n"
+ )
+ (
+ shared_volume_name,
+ shared_volume_vim_id,
+ ) = target_vim.new_shared_volumes(shared_volume_data)
+ created = True
+ created_items[shared_volume_vim_id] = {
+ "name": shared_volume_name,
+ "keep": shared_volume_data.get("keep"),
+ }
+
+ ro_vim_item_update = {
+ "vim_id": shared_volume_vim_id,
+ "vim_status": "ACTIVE",
+ "created": created,
+ "created_items": created_items,
+ "vim_details": None,
+ "vim_message": None,
+ }
+ self.logger.debug(
+ "task={} {} new-shared-volume={} created={}".format(
+ task_id, ro_task["target_id"], shared_volume_vim_id, created
+ )
+ )
+
+ return "DONE", ro_vim_item_update
+ except (vimconn.VimConnException, NsWorkerException) as e:
+ self.logger.error(
+ "task={} vim={} new-shared-volume:"
+ " {}".format(task_id, ro_task["target_id"], e)
+ )
+ ro_vim_item_update = {
+ "vim_status": "VIM_ERROR",
+ "created": created,
+ "vim_message": str(e),
+ }
+
+ return "FAILED", ro_vim_item_update
+
+
class VimInteractionFlavor(VimInteractionBase):
def delete(self, ro_task, task_index):
task = ro_task["tasks"][task_index]
created = False
created_items = {}
target_vim = self.my_vims[ro_task["target_id"]]
-
try:
# FIND
vim_flavor_id = None
- if task.get("find_params"):
+ if task.get("find_params", {}).get("vim_flavor_id"):
+ vim_flavor_id = task["find_params"]["vim_flavor_id"]
+ elif task.get("find_params", {}).get("flavor_data"):
try:
flavor_data = task["find_params"]["flavor_data"]
vim_flavor_id = target_vim.get_flavor_id_from_data(flavor_data)
ro_vim_item_update = {
"vim_id": vim_flavor_id,
- "vim_status": "DONE",
+ "vim_status": "ACTIVE",
"created": created,
"created_items": created_items,
"vim_details": None,
try:
affinity_group_vim_id = None
affinity_group_data = None
+ param_affinity_group_id = ""
if task.get("params"):
affinity_group_data = task["params"].get("affinity_group_data")
ro_vim_item_update = {
"vim_id": affinity_group_vim_id,
- "vim_status": "DONE",
+ "vim_status": "ACTIVE",
"created": created,
"created_items": created_items,
"vim_details": None,
target_vim = self.my_vims[ro_task["target_id"]]
try:
+ vim_vm_id = ""
if task.get("params"):
vim_vm_id = task["params"].get("vim_vm_id")
action = task["params"].get("action")
# created = True
ro_vim_item_update = {
"vim_id": vim_vm_id,
- "vim_status": "DONE",
+ "vim_status": "ACTIVE",
"created": created,
"created_items": created_items,
"vim_details": None,
@staticmethod
def _match_pci(port_pci, mapping):
"""
- Check if port_pci matches with mapping
- mapping can have brackets to indicate that several chars are accepted. e.g
+ Check if port_pci matches with mapping.
+ The mapping can have brackets to indicate that several chars are accepted. e.g
pci '0000:af:10.1' matches with '0000:af:1[01].[1357]'
:param port_pci: text
:param mapping: text, can contain brackets to indicate several chars are available
try:
# CREATE
+ db_vim = {}
params = task["params"]
vlds_to_connect = params.get("vlds", [])
associated_vim = params.get("target_vim")
new_port = {
"service_endpoint_id": pmap.get("service_endpoint_id")
or service_endpoint_id,
- "service_endpoint_encapsulation_type": "dot1q"
- if port["type"] == "SR-IOV"
- else None,
+ "service_endpoint_encapsulation_type": (
+ "dot1q" if port["type"] == "SR-IOV" else None
+ ),
"service_endpoint_encapsulation_info": {
"vlan": port.get("vlan"),
"mac": port.get("mac-address"),
refreshed_vim_info = {}
try:
+ vim_vm_id = ""
if task.get("params"):
vim_vm_id = task["params"].get("vim_vm_id")
migrate_host = task["params"].get("migrate_host")
target_vim = self.my_vims[ro_task["target_id"]]
try:
+ params = task["params"]
+ params_copy = deepcopy(params)
+ target_flavor_uuid = task_depends[params_copy["flavor_id"]]
+ vim_vm_id = ""
if task.get("params"):
- vim_vm_id = task["params"].get("vim_vm_id")
- flavor_dict = task["params"].get("flavor_dict")
- self.logger.info("flavor_dict %s", flavor_dict)
-
- try:
- target_flavor_uuid = target_vim.get_flavor_id_from_data(flavor_dict)
- except Exception as e:
- self.logger.info("Cannot find any flavor matching %s.", str(e))
- try:
- target_flavor_uuid = target_vim.new_flavor(flavor_dict)
- except Exception as e:
- self.logger.error("Error creating flavor at VIM %s.", str(e))
+ self.logger.info("vim_vm_id %s", vim_vm_id)
if target_flavor_uuid is not None:
resized_status = target_vim.resize_instance(
ro_vim_item_update = {
"vim_id": vim_vm_id,
- "vim_status": "DONE",
+ "vim_status": "ACTIVE",
"created": created,
"created_items": created_items,
"vim_details": None,
self.db = db
self.item2class = {
"net": VimInteractionNet(self.db, self.my_vims, self.db_vims, self.logger),
+ "shared-volumes": VimInteractionSharedVolume(
+ self.db, self.my_vims, self.db_vims, self.logger
+ ),
+ "classification": VimInteractionClassification(
+ self.db, self.my_vims, self.db_vims, self.logger
+ ),
+ "sfi": VimInteractionSfi(self.db, self.my_vims, self.db_vims, self.logger),
+ "sf": VimInteractionSf(self.db, self.my_vims, self.db_vims, self.logger),
+ "sfp": VimInteractionSfp(self.db, self.my_vims, self.db_vims, self.logger),
"vdu": VimInteractionVdu(self.db, self.my_vims, self.db_vims, self.logger),
"image": VimInteractionImage(
self.db, self.my_vims, self.db_vims, self.logger
target_database = (
"vim_accounts"
if target == "vim"
- else "wim_accounts"
- if target == "wim"
- else "sdns"
+ else "wim_accounts" if target == "wim" else "sdns"
)
+ error_text = ""
try:
step = "Getting {} from db".format(target_id)
"""
Load or reload a vim_account, sdn_controller or wim_account.
Read content from database, load the plugin if not loaded.
- In case of error loading the plugin, it load a failing VIM_connector
+ In case of error loading the plugin, it loads a failing VIM_connector
It fills self db_vims dictionary, my_vims dictionary and vim_targets list
:param target_id: Contains type:_id; where type can be 'vim', ...
:return: None if ok, descriptive text if error
target_database = (
"vim_accounts"
if target == "vim"
- else "wim_accounts"
- if target == "wim"
- else "sdns"
+ else "wim_accounts" if target == "wim" else "sdns"
)
plugin_name = ""
vim = None
and task["action"] == "CREATE"
):
# set to finished
- db_update["tasks.{}.status".format(index)] = task[
- "status"
- ] = "FINISHED"
+ db_update["tasks.{}.status".format(index)] = task["status"] = (
+ "FINISHED"
+ )
elif task["action"] == "CREATE" and task["status"] not in (
"FINISHED",
"SUPERSEDED",
"""
my_task = ro_task["tasks"][task_index]
task_id = my_task["task_id"]
- task_status = None
if my_task["status"] == "FAILED":
return None, None # TODO need to be retry??
task_path = "tasks.{}.status".format(task_index)
try:
db_vim_info_update = None
+ dependency_ro_task = {}
if task["status"] == "SCHEDULED":
# check if tasks that this depends on have been completed
task_depends[dependency_task_id] = dependency_ro_task[
"vim_info"
]["vim_id"]
- task_depends[
- "TASK-{}".format(dependency_task_id)
- ] = dependency_ro_task["vim_info"]["vim_id"]
+ task_depends["TASK-{}".format(dependency_task_id)] = (
+ dependency_ro_task["vim_info"]["vim_id"]
+ )
if dependency_not_completed:
self.logger.warning(
lock_object = LockRenew.add_lock_object(
"ro_tasks", ro_task, self
)
-
if task["action"] == "DELETE":
(
new_status,
new_status, db_vim_info_update = self.item2class[
task["item"]
].new(ro_task, task_index, task_depends)
- # self._create_task(ro_task, task_index, task_depends, db_ro_task_update)
_update_refresh(new_status)
else:
refresh_at = ro_task["vim_info"]["refresh_at"]
lock_object["locked_at"],
lock_object["locked_at"] + self.task_locked_time,
]
- # locked_at contains two times to avoid race condition. In case the lock has been renew, it will
+ # locked_at contains two times to avoid race condition. In case the lock has been renewed, it will
# contain exactly locked_at + self.task_locked_time
LockRenew.remove_lock_object(lock_object)
# modify own task. Try filtering by to_next_check. For race condition if to_check_at has been modified,
# outside this task (by ro_nbi) do not update it
db_ro_task_update["locked_by"] = None
- # locked_at converted to int only for debugging. When has not decimals it means it has been unlocked
+ # locked_at converted to int only for debugging. When it is not decimals it means it has been unlocked
db_ro_task_update["locked_at"] = int(now - self.task_locked_time)
db_ro_task_update["modified_at"] = now
db_ro_task_update["to_check_at"] = next_check_at
[period]
# use env for OSMRO_PERIOD_XXX
-refresh_active: 60 # default 1 min
+refresh_active: -1 # disabling VM monitoring of VIMs by default
refresh_build: 15 # default 15 seconds
refresh_image: 3600 * 10
refresh_error: 600
indata = filecontent.file # .read()
if filecontent.content_type.value:
- cherrypy.request.headers[
- "Content-Type"
- ] = filecontent.content_type.value
+ cherrypy.request.headers["Content-Type"] = (
+ filecontent.content_type.value
+ )
else:
# raise cherrypy.HTTPError(HTTPStatus.Not_Acceptable,
# "Only 'Content-Type' of type 'application/json' or
if accept:
if "application/json" in accept:
- cherrypy.response.headers[
- "Content-Type"
- ] = "application/json; charset=utf-8"
+ cherrypy.response.headers["Content-Type"] = (
+ "application/json; charset=utf-8"
+ )
a = json.dumps(data, indent=4) + "\n"
return a.encode("utf8")
return ",".join(folders) + " folders deleted\n"
elif args and args[0] == "login":
if not cherrypy.request.headers.get("Authorization"):
- cherrypy.response.headers[
- "WWW-Authenticate"
- ] = 'Basic realm="Access to OSM site", charset="UTF-8"'
+ cherrypy.response.headers["WWW-Authenticate"] = (
+ 'Basic realm="Access to OSM site", charset="UTF-8"'
+ )
cherrypy.response.status = HTTPStatus.UNAUTHORIZED.value
elif args and args[0] == "login2":
if not cherrypy.request.headers.get("Authorization"):
- cherrypy.response.headers[
- "WWW-Authenticate"
- ] = 'Bearer realm="Access to OSM site"'
+ cherrypy.response.headers["WWW-Authenticate"] = (
+ 'Bearer realm="Access to OSM site"'
+ )
cherrypy.response.status = HTTPStatus.UNAUTHORIZED.value
elif args and args[0] == "sleep":
sleep_time = 5
**kwargs,
):
token_info = None
- outdata = None
+ outdata = {}
_format = None
method = "DONE"
rollback = []
cherrypy.response.status = (
HTTPStatus.ACCEPTED.value
if not done
- else HTTPStatus.OK.value
- if outdata is not None
- else HTTPStatus.NO_CONTENT.value
+ else (
+ HTTPStatus.OK.value
+ if outdata is not None
+ else HTTPStatus.NO_CONTENT.value
+ )
)
return self._format_out(outdata, token_info, _format)
http_code_name = e.http_code.name
cherrypy.log("Exception {}".format(e))
else:
- http_code_value = (
- cherrypy.response.status
- ) = HTTPStatus.BAD_REQUEST.value # INTERNAL_SERVER_ERROR
+ http_code_value = cherrypy.response.status = (
+ HTTPStatus.BAD_REQUEST.value
+ ) # INTERNAL_SERVER_ERROR
cherrypy.log("CRITICAL: Exception {}".format(e), traceback=True)
http_code_name = HTTPStatus.BAD_REQUEST.name
"start": True,
},
}
+
tasks_by_target_record_id = {
"nsrs:th47f48-9870-4169-b758-9732e1ff40f3": {
"extra_dict": {
- [ ls, -l, / ]
- [ sh, -xc, "echo $(date) '& rm -rf /'" ]
"""
+vdu_id = "bb9c43f9-10a2-4569-a8a8-957c3528b6d1"
+vnf_id = "665b4165-ce24-4320-bf19-b9a45bade49f"
+target_vim = "vim:f9f370ac-0d44-41a7-9000-457f2332bc35"
+action_id = "bb937f49-3870-4169-b758-9732e1ff40f3"
+nsr_id_2 = "993166fe-723e-4680-ac4b-b1af2541ae31"
+target_record_1 = "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.1.vim_info.vim:f9f370ac-0d44-41a7-9000-457f2332bc35"
+target_record_id = (
+ "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:"
+ "vdur.bb9c43f9-10a2-4569-a8a8-957c3528b6d1"
+)
+expected_result_vertical_scale = {
+ "target_id": target_vim,
+ "action_id": "bb937f49-3870-4169-b758-9732e1ff40f3",
+ "nsr_id": "993166fe-723e-4680-ac4b-b1af2541ae31",
+ "task_id": "bb937f49-3870-4169-b758-9732e1ff40f3:1",
+ "status": "SCHEDULED",
+ "action": "EXEC",
+ "item": "verticalscale",
+ "target_record": target_record_1,
+ "target_record_id": target_record_id,
+ "params": {
+ "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
+ "flavor_dict": "flavor_dict",
+ "flavor_id": "TASK-nsrs:993166fe-723e-4680-ac4b-b1af2541ae31:flavor.0",
+ },
+ "depends_on": ["nsrs:993166fe-723e-4680-ac4b-b1af2541ae31:flavor.0"],
+}
+vdu = {
+ "id": vdu_id,
+ "vim_info": {target_vim: {"interfaces": []}},
+ "ns-flavor-id": "0",
+}
+vnf = {"_id": vnf_id}
+extra_dict_vertical_scale = {
+ "params": {
+ "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
+ "flavor_dict": "flavor_dict",
+ },
+}
+extra_dict_migrate = {
+ "params": {
+ "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
+ "migrate_host": "migrateToHost",
+ },
+}
+expected_result_migrate = {
+ "target_id": target_vim,
+ "action_id": "bb937f49-3870-4169-b758-9732e1ff40f3",
+ "nsr_id": "993166fe-723e-4680-ac4b-b1af2541ae31",
+ "task_id": "bb937f49-3870-4169-b758-9732e1ff40f3:1",
+ "status": "SCHEDULED",
+ "action": "EXEC",
+ "item": "migrate",
+ "target_record": "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.1.vim_info.vim:f9f370ac-0d44-41a7-9000-457f2332bc35",
+ "target_record_id": target_record_id,
+ "params": {
+ "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
+ "migrate_host": "migrateToHost",
+ },
+}
+expected_result_rebuild_start_stop = {
+ "target_id": target_vim,
+ "action_id": "bb937f49-3870-4169-b758-9732e1ff40f3",
+ "nsr_id": "993166fe-723e-4680-ac4b-b1af2541ae31",
+ "task_id": "bb937f49-3870-4169-b758-9732e1ff40f3:0",
+ "status": "SCHEDULED",
+ "action": "EXEC",
+ "item": "update",
+ "target_record_id": "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.bb9c43f9-10a2-4569-a8a8-957c3528b6d1",
+}
+
+
+class TestException(Exception):
+ pass
class CopyingMock(MagicMock):
self.assertTrue(epa_params.called)
self.assertDictEqual(result, expected_result)
+ @patch("osm_ng_ro.ns.Ns._process_epa_params")
+ def test__process_flavor_params_with_vim_flavor_id(
+ self,
+ epa_params,
+ ):
+ expected_result = {
+ "find_params": {
+ "vim_flavor_id": "test.flavor",
+ },
+ }
+ target_flavor = {
+ "id": "test_id",
+ "name": "test",
+ "storage-gb": "10",
+ "memory-mb": "1024",
+ "vcpu-count": "2",
+ }
+ indata = {
+ "vnf": [
+ {
+ "vdur": [
+ {
+ "ns-flavor-id": "test_id",
+ "additionalParams": {
+ "OSM": {"vim_flavor_id": "test.flavor"}
+ },
+ },
+ ],
+ "vnfd-id": "ad6356e3-698c-43bf-9901-3aae9e9b9d18",
+ },
+ ],
+ }
+ vim_info = {}
+ target_record_id = ""
+
+ epa_params.return_value = {}
+
+ result = Ns._process_flavor_params(
+ target_flavor=target_flavor,
+ indata=indata,
+ vim_info=vim_info,
+ target_record_id=target_record_id,
+ )
+
+ self.assertFalse(epa_params.called)
+ self.assertDictEqual(result, expected_result)
+
@patch("osm_ng_ro.ns.Ns._process_epa_params")
def test__process_flavor_params(
self,
self.assertEqual(result, expected_result)
@patch("osm_ng_ro.ns.Ns._assign_vim")
- def test__rebuild_start_stop_task(self, assign_vim):
+ def test__rebuild_start_stop_task__successful(self, assign_vim):
self.ns = Ns()
extra_dict = {}
actions = ["start", "stop", "rebuild"]
- vdu_id = "bb9c43f9-10a2-4569-a8a8-957c3528b6d1"
- vnf_id = "665b4165-ce24-4320-bf19-b9a45bade49f"
vdu_index = "0"
- action_id = "bb937f49-3870-4169-b758-9732e1ff40f3"
- nsr_id = "993166fe-723e-4680-ac4b-b1af2541ae31"
task_index = 0
- target_vim = "vim:f9f370ac-0d44-41a7-9000-457f2332bc35"
- t = "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.bb9c43f9-10a2-4569-a8a8-957c3528b6d1"
for action in actions:
- expected_result = {
- "target_id": "vim:f9f370ac-0d44-41a7-9000-457f2332bc35",
- "action_id": "bb937f49-3870-4169-b758-9732e1ff40f3",
- "nsr_id": "993166fe-723e-4680-ac4b-b1af2541ae31",
- "task_id": "bb937f49-3870-4169-b758-9732e1ff40f3:0",
- "status": "SCHEDULED",
- "action": "EXEC",
- "item": "update",
- "target_record": "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.0",
- "target_record_id": t,
- "params": {
- "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
- "action": action,
- },
+ params = {
+ "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
+ "action": action,
}
- extra_dict["params"] = {
+ extra_dict["params"] = params
+ expected_result = deepcopy(expected_result_rebuild_start_stop)
+ expected_result["target_record"] = (
+ "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.0.vim_info.vim:f9f370ac-0d44-41a7-9000-457f2332bc35"
+ )
+ expected_result["params"] = params
+ task = self.ns.rebuild_start_stop_task(
+ vdu_id,
+ vnf_id,
+ vdu_index,
+ action_id,
+ nsr_id_2,
+ task_index,
+ target_vim,
+ extra_dict,
+ )
+ self.assertDictEqual(task, expected_result)
+
+ @patch("osm_ng_ro.ns.Ns._assign_vim")
+ def test__rebuild_start_stop_task__empty_extra_dict__task_without_params(
+ self, assign_vim
+ ):
+ self.ns = Ns()
+ extra_dict = {}
+ actions = ["start", "stop", "rebuild"]
+ vdu_index = "0"
+ task_index = 0
+ expected_result = deepcopy(expected_result_rebuild_start_stop)
+ expected_result["target_record"] = (
+ "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.0.vim_info.vim:f9f370ac-0d44-41a7-9000-457f2332bc35"
+ )
+ for _ in actions:
+ task = self.ns.rebuild_start_stop_task(
+ vdu_id,
+ vnf_id,
+ vdu_index,
+ action_id,
+ nsr_id_2,
+ task_index,
+ target_vim,
+ extra_dict,
+ )
+ self.assertDictEqual(task, expected_result)
+
+ @patch("osm_ng_ro.ns.Ns._assign_vim")
+ def test__rebuild_start_stop_task__different_vdu_index__target_record_changes(
+ self, assign_vim
+ ):
+ self.ns = Ns()
+ extra_dict = {}
+ actions = ["start", "stop", "rebuild"]
+ vdu_index = "4"
+ task_index = 0
+ for action in actions:
+ params = {
"vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
"action": action,
}
+ extra_dict["params"] = params
+ expected_result = deepcopy(expected_result_rebuild_start_stop)
+ expected_result["target_record"] = (
+ "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.4.vim_info.vim:f9f370ac-0d44-41a7-9000-457f2332bc35"
+ )
+ expected_result["params"] = params
task = self.ns.rebuild_start_stop_task(
vdu_id,
vnf_id,
vdu_index,
action_id,
- nsr_id,
+ nsr_id_2,
task_index,
target_vim,
extra_dict,
)
- self.assertEqual(task.get("action_id"), action_id)
- self.assertEqual(task.get("nsr_id"), nsr_id)
- self.assertEqual(task.get("target_id"), target_vim)
self.assertDictEqual(task, expected_result)
@patch("osm_ng_ro.ns.Ns._assign_vim")
- def test_verticalscale_task(self, assign_vim):
+ def test__rebuild_start_stop_task__different_task_index__task_id_changes(
+ self, assign_vim
+ ):
self.ns = Ns()
extra_dict = {}
+ actions = ["start", "stop", "rebuild"]
+ vdu_index = "0"
+ task_index = 3
+ for action in actions:
+ params = {
+ "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
+ "action": action,
+ }
+ extra_dict["params"] = params
+ expected_result = deepcopy(expected_result_rebuild_start_stop)
+ expected_result["target_record"] = (
+ "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.0.vim_info.vim:f9f370ac-0d44-41a7-9000-457f2332bc35"
+ )
+ expected_result["params"] = params
+ expected_result["task_id"] = "bb937f49-3870-4169-b758-9732e1ff40f3:3"
+ task = self.ns.rebuild_start_stop_task(
+ vdu_id,
+ vnf_id,
+ vdu_index,
+ action_id,
+ nsr_id_2,
+ task_index,
+ target_vim,
+ extra_dict,
+ )
+ self.assertDictEqual(task, expected_result)
+
+ @patch("osm_ng_ro.ns.Ns._assign_vim")
+ def test__rebuild_start_stop_task__assign_vim_raises__task_is_not_created(
+ self, assign_vim
+ ):
+ self.ns = Ns()
+ extra_dict = {}
+ actions = ["start", "stop", "rebuild"]
+ vdu_index = "0"
+ task_index = 0
+ for action in actions:
+ params = {
+ "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
+ "action": action,
+ }
+ extra_dict["params"] = params
+ assign_vim.side_effect = TestException("Can not connect to VIM.")
+ with self.assertRaises(TestException) as err:
+ task = self.ns.rebuild_start_stop_task(
+ vdu_id,
+ vnf_id,
+ vdu_index,
+ action_id,
+ nsr_id_2,
+ task_index,
+ target_vim,
+ extra_dict,
+ )
+ self.assertEqual(task, None)
+ self.assertEqual(str(err.exception), "Can not connect to VIM.")
+
+ @patch("osm_ng_ro.ns.Ns._assign_vim")
+ def test_verticalscale_task__successful(self, assign_vim):
+ self.ns = Ns()
vdu_index = "1"
- action_id = "bb937f49-3870-4169-b758-9732e1ff40f3"
- nsr_id = "993166fe-723e-4680-ac4b-b1af2541ae31"
task_index = 1
- target_record_id = (
- "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:"
- "vdur.bb9c43f9-10a2-4569-a8a8-957c3528b6d1"
+ task = self.ns.verticalscale_task(
+ vdu,
+ vnf,
+ vdu_index,
+ action_id,
+ nsr_id_2,
+ task_index,
+ extra_dict_vertical_scale,
+ )
+ self.assertDictEqual(task, expected_result_vertical_scale)
+
+ @patch("osm_ng_ro.ns.Ns._assign_vim")
+ def test_verticalscale_task__task_index_changes__task_id_changes(self, assign_vim):
+ self.ns = Ns()
+ vdu_index = "1"
+ task_index = 2
+ expected_result = deepcopy(expected_result_vertical_scale)
+ expected_result["task_id"] = "bb937f49-3870-4169-b758-9732e1ff40f3:2"
+ task = self.ns.verticalscale_task(
+ vdu,
+ vnf,
+ vdu_index,
+ action_id,
+ nsr_id_2,
+ task_index,
+ extra_dict_vertical_scale,
)
+ self.assertDictEqual(task, expected_result)
+ @patch("osm_ng_ro.ns.Ns._assign_vim")
+ def test_verticalscale_task__empty_extra_dict__expected_result_without_params(
+ self, assign_vim
+ ):
+ self.ns = Ns()
+ extra_dict = {"params": {}}
+ vdu_index = "1"
+ task_index = 1
expected_result = {
"target_id": "vim:f9f370ac-0d44-41a7-9000-457f2332bc35",
"action_id": "bb937f49-3870-4169-b758-9732e1ff40f3",
"status": "SCHEDULED",
"action": "EXEC",
"item": "verticalscale",
- "target_record": "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.1",
- "target_record_id": target_record_id,
+ "target_record": "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.1.vim_info.vim:f9f370ac-0d44-41a7-9000-457f2332bc35",
+ "target_record_id": "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.bb9c43f9-10a2-4569-a8a8-957c3528b6d1",
"params": {
- "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
- "flavor_dict": "flavor_dict",
+ "flavor_id": "TASK-nsrs:993166fe-723e-4680-ac4b-b1af2541ae31:flavor.0"
},
+ "depends_on": ["nsrs:993166fe-723e-4680-ac4b-b1af2541ae31:flavor.0"],
}
- vdu = {
- "id": "bb9c43f9-10a2-4569-a8a8-957c3528b6d1",
- "vim_info": {
- "vim:f9f370ac-0d44-41a7-9000-457f2332bc35": {"interfaces": []}
- },
- }
- vnf = {"_id": "665b4165-ce24-4320-bf19-b9a45bade49f"}
- extra_dict["params"] = {
- "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
- "flavor_dict": "flavor_dict",
- }
+
task = self.ns.verticalscale_task(
- vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict
+ vdu, vnf, vdu_index, action_id, nsr_id_2, task_index, extra_dict
)
-
self.assertDictEqual(task, expected_result)
@patch("osm_ng_ro.ns.Ns._assign_vim")
- def test_migrate_task(self, assign_vim):
+ def test_verticalscale_task__assign_vim_raises__task_is_not_created(
+ self, assign_vim
+ ):
+ self.ns = Ns()
+ vdu_index = "1"
+ task_index = 1
+ assign_vim.side_effect = TestException("Can not connect to VIM.")
+ with self.assertRaises(TestException) as err:
+ task = self.ns.verticalscale_task(
+ vdu,
+ vnf,
+ vdu_index,
+ action_id,
+ nsr_id_2,
+ task_index,
+ extra_dict_vertical_scale,
+ )
+ self.assertEqual(task, {})
+ self.assertEqual(str(err.exception), "Can not connect to VIM.")
+
+ @patch("osm_ng_ro.ns.Ns._assign_vim")
+ def test_migrate_task__successful(self, assign_vim):
self.ns = Ns()
- extra_dict = {}
vdu_index = "1"
- action_id = "bb937f49-3870-4169-b758-9732e1ff40f3"
- nsr_id = "993166fe-723e-4680-ac4b-b1af2541ae31"
task_index = 1
- target_record_id = (
- "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:"
- "vdur.bb9c43f9-10a2-4569-a8a8-957c3528b6d1"
+ task = self.ns.migrate_task(
+ vdu, vnf, vdu_index, action_id, nsr_id_2, task_index, extra_dict_migrate
)
+ self.assertDictEqual(task, expected_result_migrate)
- expected_result = {
- "target_id": "vim:f9f370ac-0d44-41a7-9000-457f2332bc35",
- "action_id": "bb937f49-3870-4169-b758-9732e1ff40f3",
- "nsr_id": "993166fe-723e-4680-ac4b-b1af2541ae31",
- "task_id": "bb937f49-3870-4169-b758-9732e1ff40f3:1",
- "status": "SCHEDULED",
- "action": "EXEC",
- "item": "migrate",
- "target_record": "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.1",
- "target_record_id": target_record_id,
- "params": {
- "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
- "migrate_host": "migrateToHost",
- },
- }
- vdu = {
- "id": "bb9c43f9-10a2-4569-a8a8-957c3528b6d1",
- "vim_info": {
- "vim:f9f370ac-0d44-41a7-9000-457f2332bc35": {"interfaces": []}
- },
- }
- vnf = {"_id": "665b4165-ce24-4320-bf19-b9a45bade49f"}
- extra_dict["params"] = {
- "vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
- "migrate_host": "migrateToHost",
- }
+ @patch("osm_ng_ro.ns.Ns._assign_vim")
+ def test_migrate_task__empty_extra_dict__task_without_params(self, assign_vim):
+ self.ns = Ns()
+ extra_dict = {}
+ vdu_index = "1"
+ task_index = 1
+ expected_result = deepcopy(expected_result_migrate)
+ expected_result.pop("params")
task = self.ns.migrate_task(
- vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict
+ vdu, vnf, vdu_index, action_id, nsr_id_2, task_index, extra_dict
)
+ self.assertDictEqual(task, expected_result)
+ @patch("osm_ng_ro.ns.Ns._assign_vim")
+ def test_migrate_task__different_vdu_index__target_record_with_different_vdu_index(
+ self, assign_vim
+ ):
+ self.ns = Ns()
+ vdu_index = "4"
+ task_index = 1
+ expected_result = deepcopy(expected_result_migrate)
+ expected_result["target_record"] = (
+ "vnfrs:665b4165-ce24-4320-bf19-b9a45bade49f:vdur.4.vim_info.vim:f9f370ac-0d44-41a7-9000-457f2332bc35"
+ )
+ task = self.ns.migrate_task(
+ vdu, vnf, vdu_index, action_id, nsr_id_2, task_index, extra_dict_migrate
+ )
self.assertDictEqual(task, expected_result)
+ @patch("osm_ng_ro.ns.Ns._assign_vim")
+ def test_migrate_task__assign_vim_raises__task_is_not_created(self, assign_vim):
+ self.ns = Ns()
+ vdu_index = "1"
+ task_index = 1
+ assign_vim.side_effect = TestException("Can not connect to VIM.")
+ with self.assertRaises(TestException) as err:
+ task = self.ns.migrate_task(
+ vdu, vnf, vdu_index, action_id, nsr_id, task_index, extra_dict_migrate
+ )
+ self.assertDictEqual(task, {})
+ self.assertEqual(str(err.exception), "Can not connect to VIM.")
+
class TestProcessVduParams(unittest.TestCase):
def setUp(self):
persist_root_disk = self.ns.find_persistent_root_volumes(
vnfd, target_vdu, vdu_instantiation_volumes_list, disk_list
)
- self.assertEqual(persist_root_disk, None)
+ self.assertEqual(persist_root_disk, {})
mock_volume_keeping_required.assert_not_called()
self.assertEqual(disk_list, [])
}
persistent_ordinary_disk = {}
disk_list = []
+ extra_dict = {}
expected_disk_list = [
{
"size": "10",
"keep": False,
+ "multiattach": False,
+ "name": "persistent-volume2",
}
]
self.ns._add_persistent_ordinary_disks_to_disk_list(
- target_vdu, persistent_root_disk, persistent_ordinary_disk, disk_list
+ target_vdu,
+ persistent_root_disk,
+ persistent_ordinary_disk,
+ disk_list,
+ extra_dict,
)
self.assertEqual(disk_list, expected_disk_list)
mock_volume_keeping_required.assert_called_once_with(ordinary_disk)
}
persistent_ordinary_disk = {}
disk_list = []
+ extra_dict = {}
self.ns._add_persistent_ordinary_disks_to_disk_list(
- target_vdu, persistent_root_disk, persistent_ordinary_disk, disk_list
+ target_vdu,
+ persistent_root_disk,
+ persistent_ordinary_disk,
+ disk_list,
+ extra_dict,
)
self.assertEqual(disk_list, [])
mock_volume_keeping_required.assert_not_called()
vsd = deepcopy(vnfd_wth_persistent_storage)["virtual-storage-desc"]
with self.assertRaises(AttributeError):
Ns._select_persistent_root_disk(vsd, vdu)
+
+
+class TestSFC(unittest.TestCase):
+ def setUp(self):
+ self.ns = Ns()
+ self.logger = CopyingMock(autospec=True)
+
+ @patch("osm_ng_ro.ns.Ns._prefix_ip_address")
+ @patch("osm_ng_ro.ns.Ns._process_ip_proto")
+ @patch("osm_ng_ro.ns.Ns._get_vnfr_vdur_text")
+ def test_process_classification_params(
+ self, mock_get_vnfr_vdur_text, mock_process_ip_proto, mock_prefix_ip_address
+ ):
+ db = Mock()
+ mock_prefix_ip_address.side_effect = ["10.10.10.10/32", "20.20.20.20/32"]
+ mock_process_ip_proto.return_value = "tcp"
+ mock_get_vnfr_vdur_text.return_value = "vdur_text"
+ vim_info, indata, target_record_id = {}, {}, ""
+ target_classification = {
+ "vnfr_id": "1234",
+ "source-ip-address": "10.10.10.10",
+ "destination-ip-address": "20.20.20.20",
+ "ip-proto": "6",
+ "id": "rule1",
+ "source-port": "0",
+ "destination-port": 5555,
+ "vdur_id": "5678",
+ "ingress_port_index": 0,
+ "vim_info": vim_info,
+ }
+ kwargs = {"db": db}
+
+ expected_result = {
+ "depends_on": ["vdur_text"],
+ "params": {
+ "destination_ip_prefix": "20.20.20.20/32",
+ "destination_port_range_max": 5555,
+ "destination_port_range_min": 5555,
+ "logical_source_port": "TASK-vdur_text",
+ "logical_source_port_index": 0,
+ "name": "rule1",
+ "protocol": "tcp",
+ "source_ip_prefix": "10.10.10.10/32",
+ "source_port_range_max": "0",
+ "source_port_range_min": "0",
+ },
+ }
+
+ result = self.ns._process_classification_params(
+ target_classification, indata, vim_info, target_record_id, **kwargs
+ )
+ self.assertEqual(expected_result, result)
+
+ def test_process_sfp_params(self):
+ sf_text = "nsrs:1234:sf.sf1"
+ classi_text = "nsrs:1234:classification.rule1"
+ vim_info, indata, target_record_id = {}, {}, ""
+ target_sfp = {
+ "id": "sfp1",
+ "sfs": ["sf1"],
+ "classifications": ["rule1"],
+ "vim_info": vim_info,
+ }
+
+ kwargs = {"nsr_id": "1234"}
+
+ expected_result = {
+ "depends_on": [sf_text, classi_text],
+ "params": {
+ "name": "sfp1",
+ "sfs": ["TASK-" + sf_text],
+ "classifications": ["TASK-" + classi_text],
+ },
+ }
+
+ result = self.ns._process_sfp_params(
+ target_sfp, indata, vim_info, target_record_id, **kwargs
+ )
+ self.assertEqual(expected_result, result)
+
+ def test_process_sf_params(self):
+ sfi_text = "nsrs::sfi.sfi1"
+ vim_info, indata, target_record_id = {}, {}, ""
+ target_sf = {"id": "sf1", "sfis": ["sfi1"], "vim_info": vim_info}
+
+ kwargs = {"ns_id": "1234"}
+
+ expected_result = {
+ "depends_on": [sfi_text],
+ "params": {
+ "name": "sf1",
+ "sfis": ["TASK-" + sfi_text],
+ },
+ }
+
+ result = self.ns._process_sf_params(
+ target_sf, indata, vim_info, target_record_id, **kwargs
+ )
+ self.assertEqual(expected_result, result)
+
+ @patch("osm_ng_ro.ns.Ns._get_vnfr_vdur_text")
+ def test_process_sfi_params(self, mock_get_vnfr_vdur_text):
+ db = Mock()
+ mock_get_vnfr_vdur_text.return_value = "vdur_text"
+ vim_info, indata, target_record_id = {}, {}, ""
+ target_sfi = {
+ "id": "sfi1",
+ "ingress_port": "vnf-cp0-ext",
+ "egress_port": "vnf-cp0-ext",
+ "vnfr_id": "1234",
+ "vdur_id": "5678",
+ "ingress_port_index": 0,
+ "egress_port_index": 0,
+ "vim_info": {},
+ }
+ kwargs = {"db": db}
+
+ expected_result = {
+ "depends_on": ["vdur_text"],
+ "params": {
+ "name": "sfi1",
+ "ingress_port": "TASK-vdur_text",
+ "egress_port": "TASK-vdur_text",
+ "ingress_port_index": 0,
+ "egress_port_index": 0,
+ },
+ }
+
+ result = self.ns._process_sfi_params(
+ target_sfi, indata, vim_info, target_record_id, **kwargs
+ )
+ self.assertEqual(expected_result, result)
+
+ def test_process_vnfgd_sfp(self):
+ sfp = {
+ "id": "sfp1",
+ "position-desc-id": [
+ {
+ "id": "position1",
+ "cp-profile-id": [{"id": "sf1"}],
+ "match-attributes": [{"id": "rule1"}],
+ }
+ ],
+ }
+ expected_result = {"id": "sfp1", "sfs": ["sf1"], "classifications": ["rule1"]}
+
+ result = self.ns._process_vnfgd_sfp(sfp)
+ self.assertEqual(expected_result, result)
+
+ def test_process_vnfgd_sf(self):
+ sf = {"id": "sf1", "constituent-profile-elements": [{"id": "sfi1", "order": 0}]}
+ expected_result = {"id": "sf1", "sfis": ["sfi1"]}
+
+ result = self.ns._process_vnfgd_sf(sf)
+ self.assertEqual(expected_result, result)
+
+ def test_process_vnfgd_sfi(self):
+ sfi = {
+ "id": "sfi1",
+ "constituent-base-element-id": "vnf",
+ "order": 0,
+ "ingress-constituent-cpd-id": "vnf-cp0-ext",
+ "egress-constituent-cpd-id": "vnf-cp0-ext",
+ }
+ db_vnfrs = {
+ "1234": {
+ "id": "1234",
+ "member-vnf-index-ref": "vnf",
+ "connection-point": [
+ {
+ "name": "vnf-cp0-ext",
+ "connection-point-id": "vdu-eth0-int",
+ "connection-point-vdu-id": "5678",
+ "id": "vnf-cp0-ext",
+ }
+ ],
+ }
+ }
+ expected_result = {
+ "id": "sfi1",
+ "ingress_port": "vnf-cp0-ext",
+ "egress_port": "vnf-cp0-ext",
+ "vnfr_id": "1234",
+ "vdur_id": "5678",
+ "ingress_port_index": 0,
+ "egress_port_index": 0,
+ }
+
+ result = self.ns._process_vnfgd_sfi(sfi, db_vnfrs)
+ self.assertEqual(expected_result, result)
+
+ def test_process_vnfgd_classification(self):
+ classification = {
+ "id": "rule1",
+ "ip-proto": 6,
+ "source-ip-address": "10.10.10.10",
+ "destination-ip-address": "20.20.20.20",
+ "constituent-base-element-id": "vnf",
+ "constituent-cpd-id": "vnf-cp0-ext",
+ "destination-port": 5555,
+ }
+ db_vnfrs = {
+ "1234": {
+ "id": "1234",
+ "member-vnf-index-ref": "vnf",
+ "connection-point": [
+ {
+ "name": "vnf-cp0-ext",
+ "connection-point-id": "vdu-eth0-int",
+ "connection-point-vdu-id": "5678",
+ "id": "vnf-cp0-ext",
+ }
+ ],
+ }
+ }
+
+ expected_result = {
+ "id": "rule1",
+ "ip-proto": 6,
+ "source-ip-address": "10.10.10.10",
+ "destination-ip-address": "20.20.20.20",
+ "destination-port": 5555,
+ "vnfr_id": "1234",
+ "vdur_id": "5678",
+ "ingress_port_index": 0,
+ "constituent-base-element-id": "vnf",
+ "constituent-cpd-id": "vnf-cp0-ext",
+ }
+
+ result = self.ns._process_vnfgd_classification(classification, db_vnfrs)
+ self.assertEqual(expected_result, result)
VimInteractionMigration,
VimInteractionNet,
VimInteractionResize,
+ VimInteractionSharedVolume,
)
from osm_ro_plugin.vimconn import VimConnConnectionException, VimConnException
instance.refresh(ro_task)
+class TestVimInteractionSharedVolume(unittest.TestCase):
+ def setUp(self):
+ module_name = "osm_ro_plugin"
+ self.target_vim = MagicMock(name=f"{module_name}.vimconn.VimConnector")
+ self.task_depends = None
+
+ patches = [patch(f"{module_name}.vimconn.VimConnector", self.target_vim)]
+
+ # Enabling mocks and add cleanups
+ for mock in patches:
+ mock.start()
+ self.addCleanup(mock.stop)
+
+ def test__new_shared_volume_ok(self):
+ """
+ create a shared volume with attributes set in params
+ """
+ db = "test_db"
+ logger = "test_logger"
+ my_vims = "test-vim"
+ db_vims = {
+ 0: {
+ "config": {},
+ },
+ }
+
+ instance = VimInteractionSharedVolume(db, logger, my_vims, db_vims)
+ with patch.object(instance, "my_vims", [self.target_vim]), patch.object(
+ instance, "logger", logging
+ ), patch.object(instance, "db_vims", db_vims):
+ ro_task = {
+ "target_id": 0,
+ "tasks": {
+ "task_index_1": {
+ "target_id": 0,
+ "action_id": "123456",
+ "nsr_id": "654321",
+ "task_id": "123456:1",
+ "status": "SCHEDULED",
+ "action": "CREATE",
+ "item": "test_item",
+ "target_record": "test_target_record",
+ "target_record_id": "test_target_record_id",
+ # values coming from extra_dict
+ "params": {
+ "shared_volume_data": {
+ "size": "10",
+ "name": "shared-volume",
+ "type": "multiattach",
+ }
+ },
+ "find_params": {},
+ "depends_on": "test_depends_on",
+ },
+ },
+ }
+ task_index = "task_index_1"
+ self.target_vim.new_shared_volumes.return_value = ("", "shared-volume")
+ result = instance.new(ro_task, task_index, self.task_depends)
+ self.assertEqual(result[0], "DONE")
+ self.assertEqual(result[1].get("vim_id"), "shared-volume")
+ self.assertEqual(result[1].get("created"), True)
+ self.assertEqual(result[1].get("vim_status"), "ACTIVE")
+
+ def test__new_shared_volume_failed(self):
+ """
+ create a shared volume with attributes set in params failed
+ """
+ db = "test_db"
+ logger = "test_logger"
+ my_vims = "test-vim"
+ db_vims = {
+ 0: {
+ "config": {},
+ },
+ }
+
+ instance = VimInteractionSharedVolume(db, logger, my_vims, db_vims)
+ with patch.object(instance, "my_vims", [self.target_vim]), patch.object(
+ instance, "logger", logging
+ ), patch.object(instance, "db_vims", db_vims):
+ ro_task = {
+ "target_id": 0,
+ "tasks": {
+ "task_index_1": {
+ "target_id": 0,
+ "action_id": "123456",
+ "nsr_id": "654321",
+ "task_id": "123456:1",
+ "status": "SCHEDULED",
+ "action": "CREATE",
+ "item": "test_item",
+ "target_record": "test_target_record",
+ "target_record_id": "test_target_record_id",
+ # values coming from extra_dict
+ "params": {
+ "shared_volume_data": {
+ "size": "10",
+ "name": "shared-volume",
+ "type": "multiattach",
+ }
+ },
+ "find_params": {},
+ "depends_on": "test_depends_on",
+ },
+ },
+ }
+ task_index = "task_index_1"
+ self.target_vim.new_shared_volumes.side_effect = VimConnException(
+ "Connection failed."
+ )
+ result = instance.new(ro_task, task_index, self.task_depends)
+ self.assertEqual(result[0], "FAILED")
+ self.assertEqual(result[1].get("vim_message"), "Connection failed.")
+ self.assertEqual(result[1].get("created"), False)
+ self.assertEqual(result[1].get("vim_status"), "VIM_ERROR")
+
+ def test__delete_shared_volume_ok(self):
+ """
+ Delete a shared volume with attributes set in params
+ """
+ db = "test_db"
+ logger = "test_logger"
+ my_vims = "test-vim"
+ db_vims = {
+ 0: {
+ "config": {},
+ },
+ }
+
+ instance = VimInteractionSharedVolume(db, logger, my_vims, db_vims)
+ with patch.object(instance, "my_vims", [self.target_vim]), patch.object(
+ instance, "logger", logging
+ ), patch.object(instance, "db_vims", db_vims):
+ ro_task = {
+ "target_id": 0,
+ "tasks": {
+ "task_index_3": {
+ "target_id": 0,
+ "task_id": "123456:1",
+ },
+ },
+ "vim_info": {
+ "created": False,
+ "created_items": None,
+ "vim_id": "sample_shared_volume_id_3",
+ "vim_name": "sample_shared_volume_3",
+ "vim_status": None,
+ "vim_details": "some-details",
+ "vim_message": None,
+ "refresh_at": None,
+ },
+ }
+
+ task_index = "task_index_3"
+ self.target_vim.delete_shared_volumes.return_value = True
+ result = instance.delete(ro_task, task_index)
+ self.assertEqual(result[0], "DONE")
+ self.assertEqual(result[1].get("vim_id"), None)
+ self.assertEqual(result[1].get("created"), False)
+ self.assertEqual(result[1].get("vim_status"), "DELETED")
+
+ def test__delete_shared_volume_failed(self):
+ """
+ Delete a shared volume with attributes set in params failed
+ """
+ db = "test_db"
+ logger = "test_logger"
+ my_vims = "test-vim"
+ db_vims = {
+ 0: {
+ "config": {},
+ },
+ }
+
+ instance = VimInteractionSharedVolume(db, logger, my_vims, db_vims)
+ with patch.object(instance, "my_vims", [self.target_vim]), patch.object(
+ instance, "logger", logging
+ ), patch.object(instance, "db_vims", db_vims):
+ ro_task = {
+ "_id": "122436:1",
+ "target_id": 0,
+ "tasks": {
+ "task_index_3": {
+ "target_id": 0,
+ "task_id": "123456:1",
+ },
+ },
+ "vim_info": {
+ "created": False,
+ "created_items": None,
+ "vim_id": "sample_shared_volume_id_3",
+ "vim_name": "sample_shared_volume_3",
+ "vim_status": None,
+ "vim_details": "some-details",
+ "vim_message": None,
+ "refresh_at": None,
+ },
+ }
+
+ task_index = "task_index_3"
+ self.target_vim.delete_shared_volumes.side_effect = VimConnException(
+ "Connection failed."
+ )
+ result = instance.delete(ro_task, task_index)
+ self.assertEqual(result[0], "FAILED")
+ self.assertEqual(
+ result[1].get("vim_message"), "Error while deleting: Connection failed."
+ )
+ self.assertEqual(result[1].get("vim_status"), "VIM_ERROR")
+
+
class TestVimInteractionAffinityGroup(unittest.TestCase):
def setUp(self):
module_name = "osm_ro_plugin"
self.assertEqual(result[0], "DONE")
self.assertEqual(result[1].get("vim_id"), "sample_affinity_group_id_1")
self.assertEqual(result[1].get("created"), True)
- self.assertEqual(result[1].get("vim_status"), "DONE")
+ self.assertEqual(result[1].get("vim_status"), "ACTIVE")
def test__new_affinity_group_failed(self):
"""
self.assertEqual(result[0], "DONE")
self.assertEqual(result[1].get("vim_id"), None)
self.assertEqual(result[1].get("created"), False)
- self.assertEqual(result[1].get("vim_status"), "DONE")
+ self.assertEqual(result[1].get("vim_status"), "ACTIVE")
def test__delete_affinity_group_ok(self):
"""
"params": {
"vim_vm_id": "f37b18ef-3caa-4dc9-ab91-15c669b16396",
"flavor_dict": "flavor_dict",
+ "flavor_id": "TASK-nsrs:993166fe-723e-4680-ac4b-b1af2541ae31:flavor.0",
},
}
},
}
+ task_depends = {
+ "TASK-nsrs:993166fe-723e-4680-ac4b-b1af2541ae31:flavor.0": "1"
+ }
task_index = "task_index_1"
- result = instance.exec(ro_task, task_index, self.task_depends)
+ result = instance.exec(ro_task, task_index, task_depends)
self.assertEqual(result[0], "DONE")
- self.assertEqual(result[1].get("vim_status"), "DONE")
+ self.assertEqual(result[1].get("vim_status"), "ACTIVE")
class TestVimInteractionMigration(unittest.TestCase):
},
"image": deploy_item_list,
"flavor": deploy_item_list,
+ "shared-volumes": deploy_item_list,
"ns": {
"type": "object",
"properties": {
self.config = config
self.logger = logger
self.to_terminate = False
- self.loop = None
self.db = None
self.task_locked_time = config["global"]["task_locked_time"]
self.task_relock_time = config["global"]["task_relock_time"]
self.task_max_locked_time = config["global"]["task_max_locked_time"]
- def start(self, db, loop):
+ def start(self, db):
self.db = db
- self.loop = loop
@staticmethod
def add_lock_object(database_table, database_object, thread_object):
async def renew_locks(self):
while not self.to_terminate:
if not self.renew_list:
- await asyncio.sleep(
- self.task_locked_time - self.task_relock_time, loop=self.loop
- )
+ await asyncio.sleep(self.task_locked_time - self.task_relock_time)
continue
lock_object = self.renew_list[0]
)
else:
# wait until it is time to re-lock it
- await asyncio.sleep(time_to_relock, loop=self.loop)
+ await asyncio.sleep(time_to_relock)
def stop(self):
# unlock all locked items
self.next_check_unused_vim = now + self.TIME_CHECK_UNUSED_VIM
self.engine.unload_unused_vims()
- await asyncio.sleep(self.MAX_TIME_UNATTENDED, loop=self.loop)
+ await asyncio.sleep(self.MAX_TIME_UNATTENDED)
async def aiomain(self):
kafka_working = True
while not self.to_terminate:
try:
if not self.aiomain_task_kafka:
- # await self.msg.aiowrite("admin", "echo", "dummy message", loop=self.loop)
for kafka_topic in self.kafka_topics:
- await self.msg.aiowrite(
- kafka_topic, "echo", "dummy message", loop=self.loop
- )
+ await self.msg.aiowrite(kafka_topic, "echo", "dummy message")
kafka_working = True
self.logger.debug("Starting vim_account subscription task")
self.aiomain_task_kafka = asyncio.ensure_future(
self.msg.aioread(
self.kafka_topics,
- loop=self.loop,
group_id=False,
aiocallback=self._msg_callback,
),
- loop=self.loop,
)
if not self.aiomain_task_vim:
- self.aiomain_task_vim = asyncio.ensure_future(
- self.vim_watcher(), loop=self.loop
- )
+ self.aiomain_task_vim = asyncio.ensure_future(self.vim_watcher())
if not self.aiomain_task_renew_lock:
self.aiomain_task_renew_lock = asyncio.ensure_future(
- self.lock_renew.renew_locks(), loop=self.loop
+ self.lock_renew.renew_locks()
)
done, _ = await asyncio.wait(
self.aiomain_task_renew_lock,
],
timeout=None,
- loop=self.loop,
return_when=asyncio.FIRST_COMPLETED,
)
)
kafka_working = False
- await asyncio.sleep(10, loop=self.loop)
+ await asyncio.sleep(10)
def run(self):
"""
)
)
- self.lock_renew.start(self.db, self.loop)
+ self.lock_renew.start(self.db)
if not self.msg:
config_msg = self.config["message"].copy()
- config_msg["loop"] = self.loop
if config_msg["driver"] == "local":
self.msg = msglocal.MsgLocal()
self.logger.info("Starting")
while not self.to_terminate:
try:
- self.loop.run_until_complete(
- asyncio.ensure_future(self.aiomain(), loop=self.loop)
- )
- # except asyncio.CancelledError:
- # break # if cancelled it should end, breaking loop
+ asyncio.run(self.main_task())
except Exception as e:
if not self.to_terminate:
self.logger.exception(
self._stop()
self.loop.close()
+ async def main_task(self):
+ task = asyncio.ensure_future(self.aiomain())
+ await task
+
async def _msg_callback(self, topic, command, params):
"""
Callback to process a received message from kafka
self.lock_renew.to_terminate = True
if self.aiomain_task_kafka:
- self.loop.call_soon_threadsafe(self.aiomain_task_kafka.cancel)
+ self.loop.call_soon_threadsafe(self.aiomain_task_kafka.cancel())
if self.aiomain_task_vim:
- self.loop.call_soon_threadsafe(self.aiomain_task_vim.cancel)
+ self.loop.call_soon_threadsafe(self.aiomain_task_vim.cancel())
if self.aiomain_task_renew_lock:
- self.loop.call_soon_threadsafe(self.aiomain_task_renew_lock.cancel)
+ self.loop.call_soon_threadsafe(self.aiomain_task_renew_lock.cancel())
self.lock_renew.stop()
self.logger.debug("get_of_rules " + error_text)
info = of_response.json()
- if type(info) != dict:
+ if not info.isinstance(dict):
self.logger.error(
"get_of_rules. Unexpected response not a dict %s", str(type(info))
)
self.logger.debug("obtain_port_correspondence " + error_text)
info = of_response.json()
- if type(info) != dict:
+ if not info.isinstance(dict):
raise OpenflowConnUnexpectedResponse(
"unexpected openflow port-desc response, "
"not a dict. Wrong version?"
"'portDesc' not found. Wrong version?"
)
- if (
- type(info["portDesc"]) != list
- and type(info["portDesc"]) != tuple
- ):
+ if not info["portDesc"].isinstance(list) and not info[
+ "portDesc"
+ ].isinstance(tuple):
raise OpenflowConnUnexpectedResponse(
"unexpected openflow port-desc response at "
"'portDesc', not a list. Wrong version?"
site_network_accesses = {}
site_network_access_list = []
site_network_access_list.append(site_network_access)
- site_network_accesses[
- "ietf-l2vpn-svc:site-network-access"
- ] = site_network_access_list
+ site_network_accesses["ietf-l2vpn-svc:site-network-access"] = (
+ site_network_access_list
+ )
conn_info_d = {}
conn_info_d["site"] = connection_point_wan_info["service_mapping_info"][
"site-id"
site_network_accesses = {}
site_network_access_list = []
site_network_access_list.append(site_network_access)
- site_network_accesses[
- "ietf-l2vpn-svc:site-network-access"
- ] = site_network_access_list
+ site_network_accesses["ietf-l2vpn-svc:site-network-access"] = (
+ site_network_access_list
+ )
try:
endpoint_site_network_access_edit = (
self.logger.debug("get_of_switches " + error_text)
info = of_response.json()
- if type(info) != dict:
+ if not info.isinstance(dict):
self.logger.error(
"get_of_switches. Unexpected response, not a dict: %s", str(info)
)
info = of_response.json()
- if type(info) != dict:
+ if not info.isinstance(dict):
self.logger.error(
"get_of_rules. Unexpected response, not a dict: %s",
str(info),
url = url + "/"
self.url = url + "onos/v1/network/configuration"
+ self.hosts_url = url + "onos/v1/hosts"
self.logger.info("ONOS VPLS Connector Initialized.")
def check_credentials(self):
"Exception posting onos network config: {}".format(e)
)
+ def _delete_onos_hosts(self, onos_host_list):
+ try:
+ for host_id in onos_host_list:
+ url = f"{self.hosts_url}/{host_id}"
+ onos_resp = requests.delete(
+ url, auth=HTTPBasicAuth(self.user, self.password)
+ )
+ status_code = onos_resp.status_code
+
+ if status_code != requests.codes.ok:
+ self.logger.info(
+ "Error deleting ONOS host, status code: {}".format(status_code)
+ )
+
+ raise SdnConnectorError(
+ "Error deleting ONOS host, status code: {}".format(status_code),
+ http_code=status_code,
+ )
+ except requests.exceptions.ConnectionError as e:
+ self.logger.info("Exception connecting to onos: %s", e)
+
+ raise SdnConnectorError("Error connecting to onos: {}".format(e))
+ except Exception as e:
+ self.logger.info("Exception posting onos network config: %s", e)
+
+ raise SdnConnectorError(
+ "Exception posting onos network config: {}".format(e)
+ )
+
def create_connectivity_service(self, service_type, connection_points, **kwargs):
self.logger.debug(
"create_connectivity_service, service_type: {}, connection_points: {}".format(
created_ifs = conn_info.get("interfaces", [])
# Obtain current config
onos_config = self._get_onos_netconfig()
+ conn_service_host_list = []
try:
# Removes ports used by network from onos config
+ # In addition, it stores host identifiers (e.g. "FA:16:3E:43:9F:4A/1001")
+ # in conn_service_host_list to be deleted by self._delete_onos_hosts
for vpls in (
onos_config.get("apps", {})
.get("org.onosproject.vpls", {})
.get("vplsList", {})
):
if vpls["name"] == service_uuid:
+ self.logger.debug(f"vpls service to be deleted: {vpls}")
# iterate interfaces to check if must delete them
for interface in vpls["interfaces"]:
for port in onos_config["ports"].values():
)
)
port["interfaces"].remove(port_interface)
+ # TODO: store host_id
+ # host_id = ""
+ # conn_service_host_list.append(f"{host_id}")
onos_config["apps"]["org.onosproject.vpls"]["vpls"][
"vplsList"
].remove(vpls)
self._pop_last_update_time(onos_config)
self._post_onos_netconfig(onos_config)
+ self._delete_onos_hosts(conn_service_host_list)
self.logger.debug(
"deleted connectivity service uuid: {}".format(service_uuid)
)
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+#######################################################################################
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains the methods to compose the conn_info data structures for the
+Transport API (TAPI) WIM connector."""
+
+
+def conn_info_compose_unidirectional(
+ service_az_uuid,
+ service_az_endpoints,
+ service_za_uuid,
+ service_za_endpoints,
+ requested_capacity=None,
+ vlan_constraint=None,
+):
+ conn_info_az = {
+ "uuid": service_az_uuid,
+ "endpoints": service_az_endpoints,
+ }
+ conn_info_za = {
+ "uuid": service_za_uuid,
+ "endpoints": service_za_endpoints,
+ }
+ if requested_capacity is not None:
+ conn_info_az["requested_capacity"] = requested_capacity
+ conn_info_za["requested_capacity"] = requested_capacity
+ if vlan_constraint is not None:
+ conn_info_az["vlan_constraint"] = vlan_constraint
+ conn_info_za["vlan_constraint"] = vlan_constraint
+ conn_info = {
+ "az": conn_info_az,
+ "za": conn_info_za,
+ "bidirectional": False,
+ }
+ return conn_info
+
+
+def conn_info_compose_bidirectional(
+ service_uuid,
+ service_endpoints,
+ requested_capacity=None,
+ vlan_constraint=None,
+):
+ conn_info = {
+ "uuid": service_uuid,
+ "endpoints": service_endpoints,
+ "bidirectional": True,
+ }
+ if requested_capacity is not None:
+ conn_info["requested_capacity"] = requested_capacity
+ if vlan_constraint is not None:
+ conn_info["vlan_constraint"] = vlan_constraint
+ return conn_info
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains the exception classes the Transport API (TAPI) WIM connector
+can raise in case of error."""
+
+
+from http import HTTPStatus
+
+from osm_ro_plugin.sdnconn import SdnConnectorError
+
+from .log_messages import (
+ _PREFIX,
+)
+
+
+class WimTapiError(SdnConnectorError):
+ """Base Exception for all WIM TAPI related errors."""
+
+ def __init__(self, message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value):
+ super().__init__(_PREFIX + message)
+ self.http_code = http_code
+
+
+class WimTapiConnectionPointsBadFormat(SdnConnectorError):
+ def __init__(self, connection_points):
+ MESSAGE = "ConnectionPoints({:s}) must be a list or tuple of length 2"
+ message = MESSAGE.format(str(connection_points))
+ super().__init__(message, http_code=HTTPStatus.BAD_REQUEST)
+
+
+class WimTapiIncongruentDirectionality(WimTapiError):
+ def __init__(self, services, service_endpoint_id):
+ MESSAGE = "Incongruent directionality: services={:s} service_endpoint_id={:s}"
+ message = MESSAGE.format(str(services), str(service_endpoint_id))
+ super().__init__(message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR)
+
+
+class WimTapiIncongruentEndPoints(WimTapiError):
+ def __init__(self, services, service_endpoint_id):
+ MESSAGE = "Incongruent endpoints: services={:s} service_endpoint_id={:s}"
+ message = MESSAGE.format(str(services), str(service_endpoint_id))
+ super().__init__(message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR)
+
+
+class WimTapiMissingConnPointField(WimTapiError):
+ def __init__(self, connection_point, field_name):
+ MESSAGE = "ConnectionPoint({:s}) has no field '{:s}'"
+ message = MESSAGE.format(str(connection_point), str(field_name))
+ super().__init__(message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR)
+
+
+class WimTapiMissingMappingField(WimTapiError):
+ def __init__(self, mapping, field_name):
+ MESSAGE = "Mapping({:s}) has no field '{:s}'"
+ message = MESSAGE.format(str(mapping), str(field_name))
+ super().__init__(message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR)
+
+
+class WimTapiServerNotAvailable(WimTapiError):
+ def __init__(self, message):
+ message = "Server not available: " + message
+ super().__init__(message, http_code=HTTPStatus.SERVICE_UNAVAILABLE)
+
+
+class WimTapiServerRequestFailed(WimTapiError):
+ def __init__(self, message, http_code):
+ message = "Server request failed: " + message
+ super().__init__(message, http_code=http_code)
+
+
+class WimTapiSipNotFound(WimTapiError):
+ def __init__(self, sip_id, sips):
+ MESSAGE = "SIP({:s}) not found in context SIPs({:s})"
+ message = MESSAGE.format(str(sip_id), str(sips))
+ super().__init__(message, http_code=HTTPStatus.INTERNAL_SERVER_ERROR)
+
+
+class WimTapiConnectivityServiceCreateFailed(WimTapiError):
+ def __init__(self, name, service_id, status_code, reply):
+ MESSAGE = "Create ConnectivityService({:s}, {:s}) Failed: reply={:s}"
+ message = MESSAGE.format(str(name), str(service_id), str(reply))
+ super().__init__(message, http_code=status_code)
+
+
+class WimTapiConnectivityServiceGetStatusFailed(WimTapiError):
+ def __init__(self, name, service_id, status_code, reply):
+ MESSAGE = "Get Status of ConnectivityService({:s}, {:s}) Failed: reply={:s}"
+ message = MESSAGE.format(str(name), str(service_id), str(reply))
+ super().__init__(message, http_code=status_code)
+
+
+class WimTapiConnectivityServiceDeleteFailed(WimTapiError):
+ def __init__(self, name, service_id, status_code, reply):
+ MESSAGE = "Delete ConnectivityService({:s}, {:s}) Failed: reply={:s}"
+ message = MESSAGE.format(str(name), str(service_id), str(reply))
+ super().__init__(message, http_code=status_code)
+
+
+class WimTapiUnsupportedServiceType(SdnConnectorError):
+ def __init__(self, service_type, supported_service_types):
+ MESSAGE = "Unsupported ServiceType({:s}). Supported ServiceTypes({:s})"
+ message = MESSAGE.format(str(service_type), str(supported_service_types))
+ super().__init__(message, http_code=HTTPStatus.BAD_REQUEST)
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains the templete strings used to generate log messages for the
+Transport API (TAPI) WIM connector."""
+
+_PREFIX = "WIM TAPI Connector: "
+
+
+LOG_MSG_CREATE_REQUEST = (
+ _PREFIX + "Create Connectivity Service: Request {:s} {:s}: {:s}"
+)
+
+LOG_MSG_CREATE_REPLY = (
+ _PREFIX
+ + "Create Connectivity Service: Reply {:s} {:s}: status_code={:d} reply={:s}"
+)
+
+LOG_MSG_GET_STATUS_REQUEST = (
+ _PREFIX + "Get Connectivity Service Status: Request {:s} {:s}"
+)
+
+LOG_MSG_GET_STATUS_REPLY = (
+ _PREFIX
+ + "Get Connectivity Service Status: Reply {:s} {:s}: status_code={:d} reply={:s}"
+)
+
+LOG_MSG_DELETE_REQUEST = (
+ _PREFIX + "Delete Connectivity Service: Request {:s} {:s}: {:s}"
+)
+
+LOG_MSG_DELETE_REPLY = (
+ _PREFIX
+ + "Delete Connectivity Service: Reply {:s} {:s}: status_code={:d} reply={:s}"
+)
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains the helper methods used to compose the Transport API (TAPI)
+messages sent by the TAPI WIM connector to the WIM."""
+
+
+import copy
+
+from .message_templates import (
+ CREATE_TEMPLATE,
+ DELETE_TEMPLATE,
+ ENDPOINT_TEMPLATE,
+ REQUESTED_CAPACITY_TEMPLATE,
+ VLAN_CONSTRAINT_TEMPLATE,
+)
+
+
+def compose_requested_capacity(capacity, unit="GBPS"):
+ requested_capacity = copy.deepcopy(REQUESTED_CAPACITY_TEMPLATE)
+ total_size = requested_capacity["total-size"]
+ total_size["value"] = capacity
+ total_size["unit"] = "GBPS"
+ return requested_capacity
+
+
+def compose_vlan_constraint(vlan_id):
+ vlan_constraint = copy.deepcopy(VLAN_CONSTRAINT_TEMPLATE)
+ vlan_constraint["vlan-id"] = vlan_id
+ return vlan_constraint
+
+
+def compose_endpoint(sip):
+ sip_uuid = sip["uuid"]
+ endpoint = copy.deepcopy(ENDPOINT_TEMPLATE)
+ endpoint["service-interface-point"]["service-interface-point-uuid"] = sip_uuid
+ endpoint["layer-protocol-name"] = sip["layer-protocol-name"]
+ # TODO: implement smart selection of layer-protocol-qualifier instead of selecting first one available
+ supported_layer_protocol_qualifier = sip["supported-layer-protocol-qualifier"][0]
+ endpoint["layer-protocol-qualifier"] = supported_layer_protocol_qualifier
+ endpoint["local-id"] = sip_uuid
+ return endpoint
+
+
+def compose_create_request(
+ service_uuid,
+ endpoints,
+ bidirectional=False,
+ requested_capacity=None,
+ vlan_constraint=None,
+):
+ request = copy.deepcopy(CREATE_TEMPLATE)
+ con_svc = request["tapi-connectivity:connectivity-service"][0]
+ con_svc["uuid"] = service_uuid
+ con_svc["connectivity-direction"] = (
+ "BIDIRECTIONAL" if bidirectional else "UNIDIRECTIONAL"
+ )
+ con_svc["end-point"] = endpoints
+ if requested_capacity is not None:
+ con_svc["requested-capacity"] = requested_capacity
+ if vlan_constraint is not None:
+ con_svc["vlan-constraint"] = vlan_constraint
+ return request
+
+
+def compose_delete_request(service_uuid):
+ request = copy.deepcopy(DELETE_TEMPLATE)
+ request["tapi-connectivity:input"]["uuid"] = service_uuid
+ return request
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains the template JSON-encoded messages used to compose the Transport
+API (TAPI) messages sent by the TAPI WIM connector to the WIM."""
+
+REQUESTED_CAPACITY_TEMPLATE = {"total-size": {"value": None, "unit": "GBPS"}}
+
+VLAN_CONSTRAINT_TEMPLATE = {"vlan-id": None}
+
+ENDPOINT_TEMPLATE = {
+ "service-interface-point": {"service-interface-point-uuid": None},
+ "layer-protocol-name": None,
+ "layer-protocol-qualifier": None,
+ "local-id": None,
+}
+
+CREATE_TEMPLATE = {
+ "tapi-connectivity:connectivity-service": [
+ {
+ "uuid": None,
+ # "requested-capacity": REQUESTED_CAPACITY_TEMPLATE,
+ "connectivity-direction": "UNIDIRECTIONAL",
+ "end-point": [],
+ # "vlan-constraint": VLAN_CONSTRAINT_TEMPLATE,
+ }
+ ]
+}
+
+DELETE_TEMPLATE = {"tapi-connectivity:input": {"uuid": None}}
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains the ServiceComposer class used by the Transport API (TAPI) WIM
+connector to compose the services based on the service_endpoint_ids and their
+directionality."""
+
+from .exceptions import (
+ WimTapiIncongruentDirectionality,
+ WimTapiIncongruentEndPoints,
+ WimTapiMissingMappingField,
+ WimTapiSipNotFound,
+)
+from .message_composers import (
+ compose_endpoint,
+ compose_requested_capacity,
+ # compose_vlan_constraint,
+)
+
+
+class ServicesComposer:
+ def __init__(self, service_interface_points) -> None:
+ self.sips = service_interface_points
+
+ # if unidirectional
+ # - a single service_endpoint item is created
+ # - the service_endpoint item contains with the 2 bidirectional SIPs
+ # if bidirectional
+ # - two service_endpoint items are created
+ # - each service_endpoint item containing a list of 2 unidirectional SIPs (in, out)
+ self.services = list()
+
+ # TODO: populate dynamically capacity of the connection
+ self.requested_capacity = compose_requested_capacity(1, unit="GBPS")
+
+ self.vlan_constraint = None
+ # TODO: VLAN needs to be processed by connection point; by now deactivated
+ # if connection_point.get("service_endpoint_encapsulation_type") == "dot1q":
+ # encap_info = connection_point.get("service_endpoint_encapsulation_info", {})
+ # vlan_id = encap_info.get("vlan")
+ # if vlan_id is not None:
+ # vlan_constraint = compose_vlan_constraint(vlan_id)
+
+ def add_bidirectional(self, service_endpoint_id):
+ if len(self.services) == 0:
+ # assume bidirectional, SIP is service_endpoint_id
+ service_interface_point = self.sips[service_endpoint_id]
+ self.services.append([compose_endpoint(service_interface_point)])
+ elif len(self.services) == 1:
+ # is bidirectional, SIP is service_endpoint_id
+ if len(self.services[0]) > 1:
+ # too much endpoints per service
+ raise WimTapiIncongruentEndPoints(self.services, service_endpoint_id)
+ self.services[0].append(compose_endpoint(self.sips[service_endpoint_id]))
+ else:
+ raise WimTapiIncongruentDirectionality(self.services, service_endpoint_id)
+
+ def add_unidirectional(self, service_endpoint_id, sip_input, sip_output):
+ if len(self.services) == 0:
+ # assume unidirectional
+ self.services.append([compose_endpoint(self.sips[sip_output])]) # AZ
+ self.services.append([compose_endpoint(self.sips[sip_input])]) # ZA
+ elif len(self.services) == 2:
+ # is unidirectional
+
+ if len(self.services[0]) > 1:
+ # too much endpoints per service
+ raise WimTapiIncongruentEndPoints(self.services[0], service_endpoint_id)
+ self.services[0].append(compose_endpoint(self.sips[sip_input])) # AZ
+
+ if len(self.services[1]) > 1:
+ # too much endpoints per service
+ raise WimTapiIncongruentEndPoints(self.services[1], service_endpoint_id)
+ self.services[1].insert(0, compose_endpoint(self.sips[sip_output])) # ZA
+ else:
+ raise WimTapiIncongruentDirectionality(self.services, service_endpoint_id)
+
+ def add_service_endpoint(self, service_endpoint_id, mapping):
+ service_mapping_info = mapping.get("service_mapping_info", {})
+
+ if (
+ len(service_mapping_info) == 0
+ or "sip_input" not in service_mapping_info
+ or "sip_output" not in service_mapping_info
+ ):
+ # bidirectional (no mapping or no sip_input or no sip_output)
+ if service_endpoint_id not in self.sips:
+ raise WimTapiSipNotFound(service_endpoint_id, self.sips)
+ self.add_bidirectional(service_endpoint_id)
+
+ else:
+ # unidirectional, sip_input and sip_output provided in mapping
+
+ sip_input = service_mapping_info.get("sip_input")
+ if sip_input is None:
+ raise WimTapiMissingMappingField(
+ mapping, "service_mapping_info.sip_input"
+ )
+
+ if sip_input not in self.sips:
+ raise WimTapiSipNotFound(sip_input, self.sips)
+
+ sip_output = service_mapping_info.get("sip_output")
+ if sip_output is None:
+ raise WimTapiMissingMappingField(
+ mapping, "service_mapping_info.sip_output"
+ )
+
+ if sip_output not in self.sips:
+ raise WimTapiSipNotFound(sip_output, self.sips)
+
+ self.add_unidirectional(service_endpoint_id, sip_input, sip_output)
+
+ def is_bidirectional(self):
+ return len(self.services) == 1
+
+ def dump(self, logger):
+ str_data = "\n".join(
+ [
+ "services_composer {",
+ " services={:s}".format(str(self.services)),
+ " requested_capacity={:s}".format(str(self.requested_capacity)),
+ " vlan_constraint={:s}".format(str(self.vlan_constraint)),
+ "}",
+ ]
+ )
+ logger.debug(str_data)
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains the TransportApiClient class used by the Transport API
+(TAPI) WIM connector to interact with the underlying WIM."""
+
+import requests
+
+from .exceptions import (
+ WimTapiConnectivityServiceCreateFailed,
+ WimTapiConnectivityServiceDeleteFailed,
+ WimTapiConnectivityServiceGetStatusFailed,
+ WimTapiServerNotAvailable,
+ WimTapiServerRequestFailed,
+)
+from .log_messages import (
+ LOG_MSG_CREATE_REPLY,
+ LOG_MSG_CREATE_REQUEST,
+ LOG_MSG_DELETE_REPLY,
+ LOG_MSG_DELETE_REQUEST,
+ LOG_MSG_GET_STATUS_REPLY,
+ LOG_MSG_GET_STATUS_REQUEST,
+)
+from .message_composers import (
+ compose_create_request,
+ compose_delete_request,
+)
+
+DEFAULT_TIMEOUT = 30
+
+SUCCESS_HTTP_CODES = {
+ requests.codes.ok, # pylint: disable=no-member
+ requests.codes.created, # pylint: disable=no-member
+ requests.codes.accepted, # pylint: disable=no-member
+ requests.codes.no_content, # pylint: disable=no-member
+}
+
+RESTCONF_DATA_URL = "{:s}/restconf/data"
+RESTCONF_OPER_URL = "{:s}/restconf/operations"
+
+CONTEXT_URL = RESTCONF_DATA_URL + "/tapi-common:context"
+CTX_SIPS_URL = CONTEXT_URL + "/service-interface-point"
+CONN_CTX_URL = CONTEXT_URL + "/tapi-connectivity:connectivity-context"
+CONN_SVC_URL = CONN_CTX_URL + "/connectivity-service"
+DELETE_URL = RESTCONF_OPER_URL + "/tapi-connectivity:delete-connectivity-service"
+
+
+class TransportApiClient:
+ def __init__(self, logger, wim, wim_account, config) -> None:
+ self.logger = logger
+ self.wim_url = wim["wim_url"]
+
+ user = wim_account.get("user")
+ password = wim_account.get("password")
+ self.auth = (
+ None
+ if user is None or user == "" or password is None or password == ""
+ else (user, password)
+ )
+
+ self.headers = {"Content-Type": "application/json"}
+ self.timeout = int(config.get("timeout", DEFAULT_TIMEOUT))
+
+ def get_root_context(self):
+ context_url = CONTEXT_URL.format(self.wim_url)
+
+ try:
+ response = requests.get(
+ context_url, auth=self.auth, headers=self.headers, timeout=self.timeout
+ )
+ http_code = response.status_code
+ except requests.exceptions.RequestException as e:
+ raise WimTapiServerNotAvailable(str(e))
+
+ if http_code != 200:
+ raise WimTapiServerRequestFailed(
+ "Unexpected status code", http_code=http_code
+ )
+
+ return response.json()
+
+ def get_service_interface_points(self):
+ get_sips_url = CTX_SIPS_URL.format(self.wim_url)
+
+ try:
+ response = requests.get(
+ get_sips_url, auth=self.auth, headers=self.headers, timeout=self.timeout
+ )
+ http_code = response.status_code
+ except requests.exceptions.RequestException as e:
+ raise WimTapiServerNotAvailable(str(e))
+
+ if http_code != 200:
+ raise WimTapiServerRequestFailed(
+ "Unexpected status code", http_code=http_code
+ )
+
+ response = response.json()
+ response = response.get("tapi-common:service-interface-point", [])
+ return {sip["uuid"]: sip for sip in response}
+
+ def get_service_status(self, name, service_uuid):
+ self.logger.debug(LOG_MSG_GET_STATUS_REQUEST.format(name, service_uuid))
+
+ try:
+ services_url = CONN_SVC_URL.format(self.wim_url)
+ response = requests.get(
+ services_url, auth=self.auth, headers=self.headers, timeout=self.timeout
+ )
+ self.logger.debug(
+ LOG_MSG_GET_STATUS_REPLY.format(
+ name, service_uuid, response.status_code, response.text
+ )
+ )
+ except requests.exceptions.ConnectionError as e:
+ status_code = e.response.status_code if e.response is not None else 500
+ content = e.response.text if e.response is not None else ""
+ raise WimTapiConnectivityServiceGetStatusFailed(
+ name, service_uuid, status_code, content
+ )
+
+ if response.status_code not in SUCCESS_HTTP_CODES:
+ raise WimTapiConnectivityServiceGetStatusFailed(
+ name, service_uuid, response.status_code, response.text
+ )
+
+ json_response = response.json()
+ connectivity_services = json_response.get(
+ "tapi-connectivity:connectivity-service", []
+ )
+ connectivity_service = next(
+ iter(
+ [
+ connectivity_service
+ for connectivity_service in connectivity_services
+ if connectivity_service.get("uuid") == service_uuid
+ ]
+ ),
+ None,
+ )
+
+ if connectivity_service is None:
+ service_status = {"sdn_status": "ERROR"}
+ else:
+ service_status = {"sdn_status": "ACTIVE"}
+ return service_status
+
+ def create_service(
+ self,
+ name,
+ service_uuid,
+ service_endpoints,
+ bidirectional=False,
+ requested_capacity=None,
+ vlan_constraint=None,
+ ):
+ request_create = compose_create_request(
+ service_uuid,
+ service_endpoints,
+ bidirectional=bidirectional,
+ requested_capacity=requested_capacity,
+ vlan_constraint=vlan_constraint,
+ )
+ self.logger.debug(
+ LOG_MSG_CREATE_REQUEST.format(name, service_uuid, str(request_create))
+ )
+
+ try:
+ create_url = CONN_CTX_URL.format(self.wim_url)
+ response = requests.post(
+ create_url, headers=self.headers, json=request_create, auth=self.auth
+ )
+ self.logger.debug(
+ LOG_MSG_CREATE_REPLY.format(
+ name, service_uuid, response.status_code, response.text
+ )
+ )
+ except requests.exceptions.ConnectionError as e:
+ status_code = e.response.status_code if e.response is not None else 500
+ content = e.response.text if e.response is not None else ""
+ raise WimTapiConnectivityServiceCreateFailed(
+ name, service_uuid, status_code, content
+ )
+
+ if response.status_code not in SUCCESS_HTTP_CODES:
+ raise WimTapiConnectivityServiceCreateFailed(
+ name, service_uuid, response.status_code, response.text
+ )
+
+ def delete_service(self, name, service_uuid):
+ request_delete = compose_delete_request(service_uuid)
+ self.logger.debug(
+ LOG_MSG_DELETE_REQUEST.format(name, service_uuid, str(request_delete))
+ )
+
+ try:
+ delete_url = DELETE_URL.format(self.wim_url)
+ response = requests.post(
+ delete_url, headers=self.headers, json=request_delete, auth=self.auth
+ )
+ self.logger.debug(
+ LOG_MSG_DELETE_REPLY.format(
+ name, service_uuid, response.status_code, response.text
+ )
+ )
+ except requests.exceptions.ConnectionError as e:
+ status_code = e.response.status_code if e.response is not None else 500
+ content = e.response.text if e.response is not None else ""
+ raise WimTapiConnectivityServiceDeleteFailed(
+ name, service_uuid, status_code, content
+ )
+
+ if response.status_code not in SUCCESS_HTTP_CODES:
+ raise WimTapiConnectivityServiceDeleteFailed(
+ name, service_uuid, response.status_code, response.text
+ )
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+#######################################################################################
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains the WIM settings for the unit test used to validate the
+Transport API (TAPI) WIM connector."""
+
+
+from osm_rosdn_tapi.tests.tools import wim_port_mapping
+
+
+WIM_HOST_PORT = ("127.0.0.127", 49000)
+
+# WIM_URL should be populated with the WIM url provided for the WIM connector during its instantiation
+WIM_URL = "http://{:s}:{:d}".format(*WIM_HOST_PORT)
+
+# WIM_ACCOUNT should be populated with the WIM credentials provided for the WIM connector during its instantiation
+WIM_ACCOUNT = {"user": "admin", "password": "admin"}
+
+# WIM_PORT_MAPPING should be populated with the port mapping provided for the WIM connector during its instantiation
+# In this example, SIPs are taken from mock_tapi_handler.py file.
+WIM_PORT_MAPPING = [
+ wim_port_mapping(
+ "dc1",
+ "dc1r1",
+ "eth0",
+ "R1-eth0",
+ service_mapping_info={},
+ ),
+ wim_port_mapping(
+ "dc2",
+ "dc2r2",
+ "eth0",
+ "R2-eth0",
+ service_mapping_info={},
+ ),
+ wim_port_mapping(
+ "dc3",
+ "dc3r3",
+ "eth0",
+ "R3-opt1",
+ service_mapping_info={
+ "sip_input": "R3-opt1-rx",
+ "sip_output": "R3-opt1-tx",
+ },
+ ),
+ wim_port_mapping(
+ "dc4",
+ "dc4r4",
+ "eth0",
+ "R4-opt1",
+ service_mapping_info={
+ "sip_input": "R4-opt1-rx",
+ "sip_output": "R4-opt1-tx",
+ },
+ ),
+]
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains the exception classes the Mock OSM RO module can raise."""
+
+
+_PREFIX = "Mock OSM RO: "
+
+
+class MockOsmRoError(Exception):
+ """Base Exception for all Mock OSM RO related errors."""
+
+ def __init__(self, message):
+ super().__init__(_PREFIX + message)
+
+
+class MockOsmRoServiceNotFound(MockOsmRoError):
+ def __init__(self, service_id):
+ MESSAGE = "ServiceId({:s}) not found"
+ message = MESSAGE.format(str(service_id))
+ super().__init__(message)
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains a Mock OSM RO component that can be used for rapid unit testing.
+
+This code is based on code taken with permission from ETSI TeraFlowSDN project at:
+ https://labs.etsi.org/rep/tfs/controller
+"""
+
+
+from typing import Dict, List
+
+from osm_ro_plugin.sdnconn import SdnConnectorBase
+
+from .exceptions import MockOsmRoServiceNotFound
+
+
+class MockOsmRo:
+ def __init__(
+ self,
+ klass: SdnConnectorBase,
+ url: str,
+ wim_account: Dict,
+ wim_port_mapping: Dict,
+ ) -> None:
+ wim = {"wim_url": url}
+ config = {
+ "mapping_not_needed": False,
+ "service_endpoint_mapping": wim_port_mapping,
+ }
+
+ # Instantiate WIM connector
+ self.wim_connector = klass(wim, wim_account, config=config)
+
+ # Internal DB emulating OSM RO storage provided to WIM Connectors
+ self.conn_info = {}
+
+ def create_connectivity_service(
+ self, service_type: str, connection_points: List[Dict]
+ ) -> str:
+ self.wim_connector.check_credentials()
+ service_uuid, conn_info = self.wim_connector.create_connectivity_service(
+ service_type, connection_points
+ )
+ self.conn_info[service_uuid] = conn_info
+ return service_uuid
+
+ def get_connectivity_service_status(self, service_uuid: str) -> Dict:
+ conn_info = self.conn_info.get(service_uuid)
+ if conn_info is None:
+ raise MockOsmRoServiceNotFound(service_uuid)
+ self.wim_connector.check_credentials()
+ return self.wim_connector.get_connectivity_service_status(
+ service_uuid, conn_info=conn_info
+ )
+
+ def edit_connectivity_service(
+ self, service_uuid: str, connection_points: List[Dict]
+ ) -> None:
+ conn_info = self.conn_info.get(service_uuid)
+ if conn_info is None:
+ raise MockOsmRoServiceNotFound(service_uuid)
+ self.wim_connector.check_credentials()
+ self.wim_connector.edit_connectivity_service(
+ service_uuid, conn_info=conn_info, connection_points=connection_points
+ )
+
+ def delete_connectivity_service(self, service_uuid: str) -> None:
+ conn_info = self.conn_info.get(service_uuid)
+ if conn_info is None:
+ raise MockOsmRoServiceNotFound(service_uuid)
+ self.wim_connector.check_credentials()
+ self.wim_connector.delete_connectivity_service(
+ service_uuid, conn_info=conn_info
+ )
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains a minimalistic Mock Transport API (TAPI) WIM server."""
+
+import http.server
+import json
+import uuid
+
+
+PHOTONIC_PROTOCOL_QUALIFIER = "tapi-photonic-media:PHOTONIC_LAYER_QUALIFIER_NMC"
+DSR_PROTOCOL_QUALIFIER = "tapi-dsr:DIGITAL_SIGNAL_TYPE"
+
+
+def compose_sip(
+ uuid, layer_protocol_name, supported_layer_protocol_qualifier, direction
+):
+ return {
+ "uuid": uuid,
+ "layer-protocol-name": layer_protocol_name,
+ "supported-layer-protocol-qualifier": [supported_layer_protocol_qualifier],
+ "administrative-state": "UNLOCKED",
+ "operational-state": "ENABLED",
+ "direction": direction,
+ }
+
+
+def compose_sip_dsr(uuid):
+ return compose_sip(uuid, "DSR", DSR_PROTOCOL_QUALIFIER, "BIDIRECTIONAL")
+
+
+def compose_sip_photonic_input(uuid):
+ return compose_sip(uuid, "PHOTONIC_MEDIA", PHOTONIC_PROTOCOL_QUALIFIER, "INPUT")
+
+
+def compose_sip_photonic_output(uuid):
+ return compose_sip(uuid, "PHOTONIC_MEDIA", PHOTONIC_PROTOCOL_QUALIFIER, "OUTPUT")
+
+
+CONTEXT = {
+ "uuid": str(uuid.uuid4()),
+ "service-interface-point": [
+ compose_sip_dsr("R1-eth0"),
+ compose_sip_dsr("R2-eth0"),
+ compose_sip_photonic_input("R3-opt1-rx"),
+ compose_sip_photonic_output("R3-opt1-tx"),
+ compose_sip_photonic_input("R4-opt1-rx"),
+ compose_sip_photonic_output("R4-opt1-tx"),
+ ],
+ # topology details not used by the WIM connector
+ "topology-context": {},
+ "connectivity-context": {"connectivity-service": [], "connection": []},
+}
+
+
+class MockTapiRequestHandler(http.server.BaseHTTPRequestHandler):
+ """Mock TAPI Request Handler for the unit tests"""
+
+ def do_GET(self): # pylint: disable=invalid-name
+ """Handle GET requests"""
+ path = self.path.replace("tapi-common:", "").replace("tapi-connectivity:", "")
+
+ if path == "/restconf/data/context":
+ status = 200 # ok
+ headers = {"Content-Type": "application/json"}
+ data = CONTEXT
+ elif path == "/restconf/data/context/service-interface-point":
+ status = 200 # ok
+ headers = {"Content-Type": "application/json"}
+ data = CONTEXT["service-interface-point"]
+ data = {"tapi-common:service-interface-point": data}
+ elif path == "/restconf/data/context/connectivity-context/connectivity-service":
+ status = 200 # ok
+ headers = {"Content-Type": "application/json"}
+ data = CONTEXT["connectivity-context"]["connectivity-service"]
+ data = {"tapi-connectivity:connectivity-service": data}
+ else:
+ status = 404 # not found
+ headers = {}
+ data = {"error": "Not found"}
+
+ self.send_response(status)
+ for header_name, header_value in headers.items():
+ self.send_header(header_name, header_value)
+ self.end_headers()
+ data = json.dumps(data)
+ self.wfile.write(data.encode("UTF-8"))
+
+ def do_POST(self): # pylint: disable=invalid-name
+ """Handle POST requests"""
+ path = self.path.replace("tapi-common:", "").replace("tapi-connectivity:", "")
+ length = int(self.headers["content-length"])
+ data = json.loads(self.rfile.read(length))
+
+ if path == "/restconf/data/context/connectivity-context":
+ if "tapi-connectivity:connectivity-service" in data:
+ data["connectivity-service"] = data.pop(
+ "tapi-connectivity:connectivity-service"
+ )
+
+ if (
+ isinstance(data["connectivity-service"], list)
+ and len(data["connectivity-service"]) > 0
+ ):
+ data["connectivity-service"] = data["connectivity-service"][0]
+
+ conn_svc = data["connectivity-service"]
+ if "connectivity-constraint" in conn_svc:
+ conn_constr = conn_svc.pop("connectivity-constraint")
+ if "requested-capacity" in conn_constr:
+ req_cap = conn_constr.pop("requested-capacity")
+ conn_svc["requested-capacity"] = req_cap
+ if "connectivity-direction" in conn_constr:
+ conn_dir = conn_constr.pop("connectivity-direction")
+ conn_svc["connectivity-direction"] = conn_dir
+
+ connection = {"uuid": conn_svc["uuid"], "connection-end-point": []}
+ conn_svc["connection"] = [{"connection_uuid": conn_svc["uuid"]}]
+
+ CONTEXT["connectivity-context"]["connection"].append(connection)
+ CONTEXT["connectivity-context"]["connectivity-service"].append(conn_svc)
+
+ status = 201 # created
+ headers = {}
+ elif path == "/restconf/operations/delete-connectivity-service":
+ if "tapi-connectivity:input" in data:
+ data["input"] = data.pop("tapi-connectivity:input")
+ conn_svc_uuid = data["input"]["uuid"]
+ conn_ctx = CONTEXT["connectivity-context"]
+
+ # keep connectivity services and connections with different uuid
+ conn_ctx["connection"] = [
+ conn for conn in conn_ctx["connection"] if conn["uuid"] != conn_svc_uuid
+ ]
+ conn_ctx["connectivity-service"] = [
+ conn_svc
+ for conn_svc in conn_ctx["connectivity-service"]
+ if conn_svc["uuid"] != conn_svc_uuid
+ ]
+
+ status = 204 # ok, no content
+ headers = {}
+ else:
+ status = 404 # not found
+ headers = {}
+
+ self.send_response(status)
+ for header_name, header_value in headers.items():
+ self.send_header(header_name, header_value)
+ self.end_headers()
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains the unit tests for the Transport API (TAPI) WIM connector."""
+
+import http.server
+import threading
+import unittest
+
+from osm_rosdn_tapi.exceptions import (
+ WimTapiConnectionPointsBadFormat,
+ WimTapiMissingConnPointField,
+ WimTapiUnsupportedServiceType,
+)
+from osm_rosdn_tapi.tests.constants import (
+ WIM_ACCOUNT,
+ WIM_HOST_PORT,
+ WIM_PORT_MAPPING,
+ WIM_URL,
+)
+from osm_rosdn_tapi.tests.mock_osm_ro import MockOsmRo
+from osm_rosdn_tapi.tests.mock_tapi_handler import MockTapiRequestHandler
+from osm_rosdn_tapi.wimconn_tapi import WimconnectorTAPI
+
+
+SERVICE_TYPE = "ELINE"
+SERVICE_CONNECTION_POINTS_BIDIRECTIONAL = [
+ # SIPs taken from mock_tapi_handler.py
+ {"service_endpoint_id": "R1-eth0"},
+ {"service_endpoint_id": "R2-eth0"},
+]
+SERVICE_CONNECTION_POINTS_UNIDIRECTIONAL = [
+ # SIPs taken from mock_tapi_handler.py
+ {"service_endpoint_id": "R3-opt1"},
+ {"service_endpoint_id": "R4-opt1"},
+]
+
+
+class UnitTests(unittest.TestCase):
+ """Unit tests for Transport API WIM connector"""
+
+ def setUp(self) -> None:
+ self.wim_server = http.server.ThreadingHTTPServer(
+ WIM_HOST_PORT, MockTapiRequestHandler
+ )
+
+ def test_wrong_cases(self):
+ with self.wim_server:
+ wim_server_thread = threading.Thread(target=self.wim_server.serve_forever)
+ wim_server_thread.daemon = True
+ wim_server_thread.start()
+
+ mock_osm_ro_tapi = MockOsmRo(
+ WimconnectorTAPI, WIM_URL, WIM_ACCOUNT, WIM_PORT_MAPPING
+ )
+
+ # Unsupported service type
+ with self.assertRaises(WimTapiUnsupportedServiceType) as test_context:
+ mock_osm_ro_tapi.create_connectivity_service(
+ "ELAN", SERVICE_CONNECTION_POINTS_BIDIRECTIONAL
+ )
+ self.assertEqual(
+ str(test_context.exception.args[0]),
+ "Unsupported ServiceType(ELAN). Supported ServiceTypes({'ELINE'})",
+ )
+
+ # Wrong number of connection_points
+ with self.assertRaises(WimTapiConnectionPointsBadFormat) as test_context:
+ mock_osm_ro_tapi.create_connectivity_service(SERVICE_TYPE, [])
+ self.assertEqual(
+ str(test_context.exception.args[0]),
+ "ConnectionPoints([]) must be a list or tuple of length 2",
+ )
+
+ # Wrong type of connection_points
+ with self.assertRaises(WimTapiConnectionPointsBadFormat) as test_context:
+ mock_osm_ro_tapi.create_connectivity_service(
+ SERVICE_TYPE, {"a": "b", "c": "d"}
+ )
+ self.assertEqual(
+ str(test_context.exception.args[0]),
+ "ConnectionPoints({'a': 'b', 'c': 'd'}) must be a list or tuple of length 2",
+ )
+
+ with self.assertRaises(WimTapiMissingConnPointField) as test_context:
+ mock_osm_ro_tapi.create_connectivity_service(
+ SERVICE_TYPE,
+ [
+ {"wrong_service_endpoint_id": "value"},
+ {"service_endpoint_id": "value"},
+ ],
+ )
+ self.assertEqual(
+ str(test_context.exception.args[0]),
+ "WIM TAPI Connector: ConnectionPoint({'wrong_service_endpoint_id': 'value'}) has no field 'service_endpoint_id'",
+ )
+
+ self.wim_server.shutdown()
+ wim_server_thread.join()
+
+ def test_correct_bidirectional(self):
+ with self.wim_server:
+ wim_server_thread = threading.Thread(target=self.wim_server.serve_forever)
+ wim_server_thread.daemon = True
+ wim_server_thread.start()
+
+ mock_osm_ro_tapi = MockOsmRo(
+ WimconnectorTAPI, WIM_URL, WIM_ACCOUNT, WIM_PORT_MAPPING
+ )
+
+ # Create bidirectional TAPI service
+ service_uuid = mock_osm_ro_tapi.create_connectivity_service(
+ SERVICE_TYPE, SERVICE_CONNECTION_POINTS_BIDIRECTIONAL
+ )
+ self.assertIsInstance(service_uuid, str)
+
+ # Check status of bidirectional TAPI service
+ status = mock_osm_ro_tapi.get_connectivity_service_status(service_uuid)
+ self.assertIsInstance(status, dict)
+ self.assertIn("sdn_status", status)
+ self.assertEqual(status["sdn_status"], "ACTIVE")
+
+ # Delete bidirectional TAPI service
+ mock_osm_ro_tapi.delete_connectivity_service(service_uuid)
+
+ self.wim_server.shutdown()
+ wim_server_thread.join()
+
+ def test_correct_unidirectional(self):
+ with self.wim_server:
+ wim_server_thread = threading.Thread(target=self.wim_server.serve_forever)
+ wim_server_thread.daemon = True
+ wim_server_thread.start()
+
+ mock_osm_ro_tapi = MockOsmRo(
+ WimconnectorTAPI, WIM_URL, WIM_ACCOUNT, WIM_PORT_MAPPING
+ )
+
+ # Create unidirectional TAPI service
+ service_uuid = mock_osm_ro_tapi.create_connectivity_service(
+ SERVICE_TYPE, SERVICE_CONNECTION_POINTS_UNIDIRECTIONAL
+ )
+ self.assertIsInstance(service_uuid, str)
+
+ # Check status of unidirectional TAPI service
+ status = mock_osm_ro_tapi.get_connectivity_service_status(service_uuid)
+ self.assertIsInstance(status, dict)
+ self.assertIn("sdn_status", status)
+ self.assertEqual(status["sdn_status"], "ACTIVE")
+
+ # Delete unidirectional TAPI service
+ mock_osm_ro_tapi.delete_connectivity_service(service_uuid)
+
+ self.wim_server.shutdown()
+ wim_server_thread.join()
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""This file contains a helper methods for the Mock OSM RO component that can be used
+for rapid unit testing.
+
+This code is based on code taken with permission from ETSI TeraFlowSDN project at:
+ https://labs.etsi.org/rep/tfs/controller
+"""
+
+from typing import Dict, Optional
+
+
+# Ref: https://osm.etsi.org/wikipub/index.php/WIM
+# Fields defined according to from osm_ro_plugin.sdnconn import SdnConnectorBase
+def wim_port_mapping(
+ datacenter_id: str,
+ device_id: str,
+ device_interface_id: str,
+ service_endpoint_id: str,
+ switch_dpid: Optional[str] = None,
+ switch_port: Optional[str] = None,
+ service_mapping_info: Dict = {},
+):
+ mapping = {
+ "datacenter_id": datacenter_id,
+ "device_id": device_id,
+ "device_interface_id": device_interface_id,
+ "service_endpoint_id": service_endpoint_id,
+ "service_mapping_info": service_mapping_info,
+ }
+ if switch_dpid is not None:
+ mapping["switch_dpid"] = switch_dpid
+ if switch_port is not None:
+ mapping["switch_port"] = switch_port
+ return mapping
--- /dev/null
+# -*- coding: utf-8 -*-
+
+#######################################################################################
+# This file is part of OSM RO module
+#
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#######################################################################################
+# This work has been performed in the context of the TeraFlow Project -
+# funded by the European Commission under Grant number 101015857 through the
+# Horizon 2020 program.
+# Contributors:
+# - Lluis Gifre <lluis.gifre@cttc.es>
+# - Ricard Vilalta <ricard.vilalta@cttc.es>
+#######################################################################################
+
+"""The SDN/WIM connector is responsible for establishing wide area network
+connectivity.
+
+This SDN/WIM connector implements the standard ONF Transport API (TAPI).
+
+It receives the endpoints and the necessary details to request the Layer 2
+service through the use of the ONF Transport API.
+"""
+
+import logging
+import uuid
+
+from osm_ro_plugin.sdnconn import SdnConnectorBase
+
+from .conn_info import (
+ conn_info_compose_bidirectional,
+ conn_info_compose_unidirectional,
+)
+from .exceptions import (
+ WimTapiConnectionPointsBadFormat,
+ WimTapiMissingConnPointField,
+ WimTapiUnsupportedServiceType,
+)
+from .services_composer import ServicesComposer
+from .tapi_client import TransportApiClient
+
+
+class WimconnectorTAPI(SdnConnectorBase):
+ """ONF TAPI WIM connector"""
+
+ def __init__(self, wim, wim_account, config=None, logger=None):
+ """ONF TAPI WIM connector
+
+ Arguments:
+ wim (dict): WIM record, as stored in the database
+ wim_account (dict): WIM account record, as stored in the database
+ config (optional dict): optional configuration from the configuration database
+ logger (optional Logger): logger to use with this WIM connector
+ The arguments of the constructor are converted to object attributes.
+ An extra property, ``service_endpoint_mapping`` is created from ``config``.
+ """
+ logger = logger or logging.getLogger("ro.sdn.tapi")
+
+ super().__init__(wim, wim_account, config, logger)
+
+ self.logger.debug("self.config={:s}".format(str(self.config)))
+
+ if len(self.service_endpoint_mapping) == 0 and self.config.get(
+ "wim_port_mapping"
+ ):
+ self.service_endpoint_mapping = self.config.get("wim_port_mapping", [])
+
+ self.mappings = {
+ m["service_endpoint_id"]: m for m in self.service_endpoint_mapping
+ }
+
+ self.logger.debug("self.mappings={:s}".format(str(self.mappings)))
+
+ self.tapi_client = TransportApiClient(self.logger, wim, wim_account, config)
+
+ self.logger.info("TAPI WIM Connector Initialized.")
+
+ def check_credentials(self):
+ """Check if the connector itself can access the SDN/WIM with the provided url (wim.wim_url),
+ user (wim_account.user), and password (wim_account.password)
+
+ Raises:
+ SdnConnectorError: Issues regarding authorization, access to
+ external URLs, etc are detected.
+ """
+ _ = self.tapi_client.get_root_context()
+ self.logger.info("Credentials checked")
+
+ def get_connectivity_service_status(self, service_uuid, conn_info=None):
+ """Monitor the status of the connectivity service established
+
+ Arguments:
+ service_uuid (str): UUID of the connectivity service
+ conn_info (dict or None): Information returned by the connector
+ during the service creation/edition and subsequently stored in
+ the database.
+
+ Returns:
+ dict: JSON/YAML-serializable dict that contains a mandatory key
+ ``sdn_status`` associated with one of the following values::
+
+ {'sdn_status': 'ACTIVE'}
+ # The service is up and running.
+
+ {'sdn_status': 'INACTIVE'}
+ # The service was created, but the connector
+ # cannot determine yet if connectivity exists
+ # (ideally, the caller needs to wait and check again).
+
+ {'sdn_status': 'DOWN'}
+ # Connection was previously established,
+ # but an error/failure was detected.
+
+ {'sdn_status': 'ERROR'}
+ # An error occurred when trying to create the service/
+ # establish the connectivity.
+
+ {'sdn_status': 'BUILD'}
+ # Still trying to create the service, the caller
+ # needs to wait and check again.
+
+ Additionally ``error_msg``(**str**) and ``sdn_info``(**dict**)
+ keys can be used to provide additional status explanation or
+ new information available for the connectivity service.
+ """
+ sdn_status = set()
+ bidirectional = conn_info["bidirectional"]
+
+ tapi_client = self.tapi_client
+ if bidirectional:
+ service_uuid = conn_info["uuid"]
+ service_status = tapi_client.get_service_status("<>", service_uuid)
+ sdn_status.add(service_status["sdn_status"])
+ else:
+ service_az_uuid = conn_info["az"]["uuid"]
+ service_za_uuid = conn_info["za"]["uuid"]
+ service_az_status = tapi_client.get_service_status(">>", service_az_uuid)
+ service_za_status = tapi_client.get_service_status("<<", service_za_uuid)
+ sdn_status.add(service_az_status["sdn_status"])
+ sdn_status.add(service_za_status["sdn_status"])
+
+ if len(sdn_status) == 1 and "ACTIVE" in sdn_status:
+ service_status = {"sdn_status": "ACTIVE"}
+ else:
+ service_status = {"sdn_status": "ERROR"}
+
+ return service_status
+
+ def create_connectivity_service(self, service_type, connection_points, **kwargs):
+ """
+ Establish SDN/WAN connectivity between the endpoints
+ :param service_type: (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), ``L3``.
+ :param connection_points: (list): each point corresponds to
+ an entry point to be connected. For WIM: from the DC to the transport network.
+ For SDN: Compute/PCI to the transport network. One
+ connection point serves to identify the specific access and
+ some other service parameters, such as encapsulation type.
+ Each item of the list is a dict with:
+ "service_endpoint_id": (str)(uuid) Same meaning that for 'service_endpoint_mapping' (see __init__)
+ In case the config attribute mapping_not_needed is True, this value is not relevant. In this case
+ it will contain the string "device_id:device_interface_id"
+ "service_endpoint_encapsulation_type": None, "dot1q", ...
+ "service_endpoint_encapsulation_info": (dict) with:
+ "vlan": ..., (int, present if encapsulation is dot1q)
+ "vni": ... (int, present if encapsulation is vxlan),
+ "peers": [(ipv4_1), (ipv4_2)] (present if encapsulation is vxlan)
+ "mac": ...
+ "device_id": ..., same meaning that for 'service_endpoint_mapping' (see __init__)
+ "device_interface_id": same meaning that for 'service_endpoint_mapping' (see __init__)
+ "switch_dpid": ..., present if mapping has been found for this device_id,device_interface_id
+ "swith_port": ... present if mapping has been found for this device_id,device_interface_id
+ "service_mapping_info": present if mapping has been found for this device_id,device_interface_id
+ :param kwargs: For future versions:
+ bandwidth (int): value in kilobytes
+ latency (int): value in milliseconds
+ Other QoS might be passed as keyword arguments.
+ :return: tuple: ``(service_id, conn_info)`` containing:
+ - *service_uuid* (str): UUID of the established connectivity service
+ - *conn_info* (dict or None): Information to be stored at the database (or ``None``).
+ This information will be provided to the :meth:`~.edit_connectivity_service` and :obj:`~.delete`.
+ **MUST** be JSON/YAML-serializable (plain data structures).
+ :raises: SdnConnectorException: In case of error. Nothing should be created in this case.
+ Provide the parameter http_code
+ """
+ supported_service_types = {"ELINE"}
+ if service_type not in supported_service_types:
+ raise WimTapiUnsupportedServiceType(service_type, supported_service_types)
+
+ self.logger.debug("connection_points={:s}".format(str(connection_points)))
+
+ if not isinstance(connection_points, (list, tuple)):
+ raise WimTapiConnectionPointsBadFormat(connection_points)
+
+ if len(connection_points) != 2:
+ raise WimTapiConnectionPointsBadFormat(connection_points)
+
+ sips = self.tapi_client.get_service_interface_points()
+ services_composer = ServicesComposer(sips)
+
+ for connection_point in connection_points:
+ service_endpoint_id = connection_point.get("service_endpoint_id")
+ if service_endpoint_id is None:
+ raise WimTapiMissingConnPointField(
+ connection_point, "service_endpoint_id"
+ )
+
+ mapping = self.mappings.get(service_endpoint_id, {})
+ services_composer.add_service_endpoint(service_endpoint_id, mapping)
+
+ services_composer.dump(self.logger)
+
+ service_uuid, conn_info = self._create_services_and_conn_info(services_composer)
+ return service_uuid, conn_info
+
+ def _create_services_and_conn_info(self, services_composer: ServicesComposer):
+ services = services_composer.services
+ requested_capacity = services_composer.requested_capacity
+ vlan_constraint = services_composer.vlan_constraint
+
+ service_uuid = str(uuid.uuid4())
+
+ if services_composer.is_bidirectional():
+ service_endpoints = services[0]
+ self.tapi_client.create_service(
+ "<>",
+ service_uuid,
+ service_endpoints,
+ bidirectional=True,
+ requested_capacity=requested_capacity,
+ vlan_constraint=vlan_constraint,
+ )
+ conn_info = conn_info_compose_bidirectional(
+ service_uuid,
+ service_endpoints,
+ requested_capacity=requested_capacity,
+ vlan_constraint=vlan_constraint,
+ )
+
+ else:
+ service_uuid = service_uuid[0 : len(service_uuid) - 4] + "00**"
+ service_az_uuid = service_uuid.replace("**", "af")
+ service_az_endpoints = services[0]
+ service_za_uuid = service_uuid.replace("**", "fa")
+ service_za_endpoints = services[1]
+
+ self.tapi_client.create_service(
+ ">>",
+ service_az_uuid,
+ service_az_endpoints,
+ bidirectional=False,
+ requested_capacity=requested_capacity,
+ vlan_constraint=vlan_constraint,
+ )
+ self.tapi_client.create_service(
+ "<<",
+ service_za_uuid,
+ service_za_endpoints,
+ bidirectional=False,
+ requested_capacity=requested_capacity,
+ vlan_constraint=vlan_constraint,
+ )
+ conn_info = conn_info_compose_unidirectional(
+ service_az_uuid,
+ service_az_endpoints,
+ service_za_uuid,
+ service_za_endpoints,
+ requested_capacity=requested_capacity,
+ vlan_constraint=vlan_constraint,
+ )
+
+ return service_uuid, conn_info
+
+ def delete_connectivity_service(self, service_uuid, conn_info=None):
+ """
+ Disconnect multi-site endpoints previously connected
+
+ :param service_uuid: The one returned by create_connectivity_service
+ :param conn_info: The one returned by last call to 'create_connectivity_service' or 'edit_connectivity_service'
+ if they do not return None
+ :return: None
+ :raises: SdnConnectorException: In case of error. The parameter http_code must be filled
+ """
+ bidirectional = conn_info["bidirectional"]
+ if bidirectional:
+ service_uuid = conn_info["uuid"]
+ self.tapi_client.delete_service("<>", service_uuid)
+ else:
+ service_az_uuid = conn_info["az"]["uuid"]
+ service_za_uuid = conn_info["za"]["uuid"]
+ self.tapi_client.delete_service(">>", service_az_uuid)
+ self.tapi_client.delete_service("<<", service_za_uuid)
+
+ def edit_connectivity_service(
+ self, service_uuid, conn_info=None, connection_points=None, **kwargs
+ ):
+ """Change an existing connectivity service.
+
+ This method's arguments and return value follow the same convention as
+ :meth:`~.create_connectivity_service`.
+
+ :param service_uuid: UUID of the connectivity service.
+ :param conn_info: (dict or None): Information previously returned by last call to create_connectivity_service
+ or edit_connectivity_service
+ :param connection_points: (list): If provided, the old list of connection points will be replaced.
+ :param kwargs: Same meaning that create_connectivity_service
+ :return: dict or None: Information to be updated and stored at the database.
+ When ``None`` is returned, no information should be changed.
+ When an empty dict is returned, the database record will be deleted.
+ **MUST** be JSON/YAML-serializable (plain data structures).
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+ raise NotImplementedError
+
+ def clear_all_connectivity_services(self):
+ """Delete all WAN Links in a WIM.
+
+ This method is intended for debugging only, and should delete all the
+ connections controlled by the WIM/SDN, not only the connections that
+ a specific RO is aware of.
+
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+ raise NotImplementedError
+
+ def get_all_active_connectivity_services(self):
+ """Provide information about all active connections provisioned by a
+ WIM.
+
+ Raises:
+ SdnConnectorException: In case of error.
+ """
+ raise NotImplementedError
--- /dev/null
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+requests
--- /dev/null
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rosdn_tapi"
+_version_command = ("git describe --match v* --tags --long --dirty", "pep440-git-full")
+_description = "OSM ro sdn plugin for tapi"
+_author = "OSM Support"
+_author_email = "osmsupport@etsi.org"
+_maintainer = "OSM Support"
+_maintainer_email = "osmsupport@etsi.org"
+_license = "Apache 2.0"
+_url = "https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary"
+
+_readme = """
+===========
+osm-rosdn_tapi
+===========
+
+osm-ro pluging for tapi SDN
+"""
+
+setup(
+ name=_name,
+ description=_description,
+ long_description=_readme,
+ version_command=_version_command,
+ author=_author,
+ author_email=_author_email,
+ maintainer=_maintainer,
+ maintainer_email=_maintainer_email,
+ url=_url,
+ license=_license,
+ packages=[_name],
+ include_package_data=True,
+ setup_requires=["setuptools-version-command"],
+ entry_points={
+ "osm_rosdn.plugins": [
+ "rosdn_tapi = osm_rosdn_tapi.wimconn_tapi:WimconnectorTAPI"
+ ],
+ },
+)
--- /dev/null
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+ interface.private_ip_address
)
else:
- interface_dict[
- "ip_address"
- ] = interface.private_ip_address
+ interface_dict["ip_address"] = (
+ interface.private_ip_address
+ )
instance_dict["interfaces"].append(interface_dict)
except Exception as e:
# Variable that indicates if client must be reloaded or initialized
self.reload_client = True
- self.vnet_address_space = None
+ self.vnet_address_space = []
# LOGGER
self.logger = logging.getLogger("ro.vim.azure")
vnet = self.conn_vnet.virtual_networks.get(
self.vnet_resource_group or self.resource_group, self.vnet_name
)
- self.vnet_address_space = vnet.address_space.address_prefixes[0]
+ self.vnet_address_space = vnet.address_space.address_prefixes
self.vnet_id = vnet.id
return
"location": self.region,
"address_space": {"address_prefixes": ["10.0.0.0/8"]},
}
- self.vnet_address_space = "10.0.0.0/8"
+ self.vnet_address_space = ["10.0.0.0/8"]
self.logger.debug("create base vnet: %s", self.vnet_name)
self.conn_vnet.virtual_networks.begin_create_or_update(
if ip_profile is None:
# get a non used vnet ip range /24 and allocate automatically inside the range self.vnet_address_space
used_subnets = self.get_network_list()
- for ip_range in netaddr.IPNetwork(self.vnet_address_space).subnet(24):
- for used_subnet in used_subnets:
- subnet_range = netaddr.IPNetwork(used_subnet["cidr_block"])
-
- if subnet_range in ip_range or ip_range in subnet_range:
- # this range overlaps with an existing subnet ip range. Breaks and look for another
+ for space in self.vnet_address_space:
+ for ip_range in netaddr.IPNetwork(space).subnet(24):
+ for used_subnet in used_subnets:
+ subnet_range = netaddr.IPNetwork(used_subnet["cidr_block"])
+
+ if subnet_range in ip_range or ip_range in subnet_range:
+ # this range overlaps with an existing subnet ip range. Breaks and look for another
+ break
+ else:
+ ip_profile = {"subnet_address": str(ip_range)}
+ self.logger.debug(
+ "dinamically obtained ip_profile: %s", ip_range
+ )
break
- else:
- ip_profile = {"subnet_address": str(ip_range)}
- self.logger.debug("dinamically obtained ip_profile: %s", ip_range)
+ if ip_profile is not None:
break
else:
raise vimconn.VimConnException(
vm_sizes_list = [
vm_size.as_dict()
for vm_size in self.conn_compute.resource_skus.list(
- "location eq '{}'".format(self.region)
+ filter="location eq '{}'".format(self.region)
)
]
vm_sizes_list = [
vm_size.as_dict()
for vm_size in self.conn_compute.resource_skus.list(
- "location eq '{}'".format(self.region)
+ filter="location eq '{}'".format(self.region)
)
]
vm = self.conn_compute.virtual_machines.get(
self.resource_group, res_name
)
- out_vm["vim_info"] = str(vm)
+ img = vm.storage_profile.image_reference
+ images = self._get_version_image_list(
+ img.publisher, img.offer, img.sku, img.version
+ )
+ vim_info = {
+ "id": vm.id,
+ "name": vm.name,
+ "location": vm.location,
+ "provisioning_state": vm.provisioning_state,
+ "vm_id": vm.vm_id,
+ "type": vm.type,
+ "flavor": {"id": vm.hardware_profile.vm_size},
+ "image": images[0],
+ }
+ out_vm["vim_info"] = str(vim_info)
out_vm["status"] = self.provision_state2osm.get(
vm.provisioning_state, "OTHER"
)
self.logger.debug("Public ip address is: %s", public_ip.ip_address)
ips.append(public_ip.ip_address)
+ subnet = nic_data.ip_configurations[0].subnet.id
+ if subnet:
+ interface_dict["vim_net_id"] = subnet
+
private_ip = nic_data.ip_configurations[0].private_ip_address
ips.append(private_ip)
try:
# Set to client created
- self.conn_compute = googleapiclient.discovery.build("compute", "v1")
+ self.conn_compute = googleapiclient.discovery.build(
+ "compute", "v1", credentials=self.credentials
+ )
except Exception as e:
self._format_vimconn_exception(e)
if not net.get("name"):
continue
else:
- net_iface[
- "subnetwork"
- ] = "regions/%s/subnetworks/" % self.region + net.get("name")
+ net_iface["subnetwork"] = (
+ "regions/%s/subnetworks/" % self.region + net.get("name")
+ )
else:
net_iface["subnetwork"] = net.get("net_id")
+ if net.get("ip_address"):
+ net_iface["networkIP"] = net.get("ip_address")
+
# In order to get an external IP address, the key "accessConfigs" must be used
# in the interace. It has to be of type "ONE_TO_ONE_NAT" and name "External NAT"
if net.get("floating_ip", False) or (
)
)
+ def _get_id_from_image(self, image):
+ """
+ Obtains image_id from the google cloud complete image identifier: image_id will be the last five items
+ """
+ self.logger.debug(f"_get_id_from_image begin: image {image}")
+ try:
+ image_id = "/".join(image.split("/")[-5:])
+ self.logger.debug(f"_get_id_from_image Return: image_id {image_id}")
+ return image_id
+ except Exception as e:
+ raise vimconn.VimConnException(
+ f"Unable to get image_id from image '{image}' Error: '{e}'"
+ )
+
def refresh_nets_status(self, net_list):
"""Get the status of the networks
Params: the list of network identifiers
.execute()
)
- out_vm["vim_info"] = str(vm["name"])
+ disk_source = vm["disks"][0]["source"]
+ self.logger.debug("getting disk information")
+ disk = (
+ self.conn_compute.disks()
+ .get(
+ project=self.project,
+ zone=self.zone,
+ disk=self._get_resource_name_from_resource_id(disk_source),
+ )
+ .execute()
+ )
+ image = {}
+ if disk is not None:
+ self.logger.debug(f"disk: {disk}")
+ image = {
+ "id": self._get_id_from_image(disk["sourceImage"]),
+ "source": disk_source,
+ }
+
+ vim_info = {
+ "id": vm_id,
+ "name": vm["name"],
+ "creationTimestamp": vm["creationTimestamp"],
+ "lastStartTimestamp": vm["lastStartTimestamp"],
+ "vm_id": vm["id"],
+ "kind": vm["kind"],
+ "cpuPlatform": vm["cpuPlatform"],
+ "zone": self._get_resource_name_from_resource_id(vm["zone"]),
+ "machineType": vm["machineType"],
+ "flavor": {
+ "id": self._get_resource_name_from_resource_id(
+ vm["machineType"]
+ )
+ },
+ "image": image,
+ }
+ out_vm["vim_info"] = str(vim_info)
out_vm["status"] = self.provision_state2osm.get(vm["status"], "OTHER")
# In Google Cloud the there is no difference between provision or power status,
for network_interface in interfaces:
interface_dict = {}
interface_dict["vim_interface_id"] = network_interface["name"]
+ interface_dict["vim_net_id"] = network_interface["subnetwork"]
ips = []
ips.append(network_interface["networkIP"])
##
netaddr
-google-api-python-client
-google-auth
+google-api-python-client==2.51.0
+google-auth==2.8.0
+google-api-core==2.8.2
+googleapis-common-protos==1.56.3
google-cloud
paramiko
import logging
import unittest
+import cinderclient.exceptions as cExceptions
from mock import MagicMock, patch
+from neutronclient.common import exceptions as neExceptions
from novaclient import exceptions as nvExceptions
from novaclient.exceptions import ClientException, Conflict
from osm_ro_plugin.vimconn import (
VimConnConnectionException,
VimConnException,
VimConnNotFoundException,
+ VimConnUnexpectedResponse,
)
from osm_rovim_openstack.vimconn_openstack import vimconnector
+from requests.exceptions import ConnectionError
__author__ = "Igor D.C."
__date__ = "$23-aug-2017 23:59:59$"
volume_id = "ac408b73-b9cc-4a6a-a270-82cc4811bd4a"
volume_id2 = "o4e0e83-b9uu-4akk-a234-89cc4811bd4a"
volume_id3 = "44e0e83-t9uu-4akk-a234-p9cc4811bd4a"
+volume_id4 = "91bf5674-5b85-41d1-aa3b-4848e2691088"
virtual_mac_id = "64e0e83-t9uu-4akk-a234-p9cc4811bd4a"
created_items_all_true = {
f"floating_ip:{floating_network_vim_id}": True,
mocking.assert_not_called()
-class Status:
- def __init__(self, s):
+class Volume:
+ def __init__(self, s, type="__DEFAULT__", name="", id=""):
self.status = s
+ self.volume_type = type
+ self.name = name
+ self.id = id
- def __str__(self):
- return self.status
+
+class Server:
+ def __init__(self, name="", status="", flavor="", id=""):
+ self.id = id
+ self.name = name
+ self.status = status
+ self.flavor = flavor
class CopyingMock(MagicMock):
self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
self.assertDictEqual(port_dict, result_dict)
- def test_prepare_port_dict_mac_ip_addr_no_mac_and_ip(self):
+ def test_prepare_port_dict_mac_ip_addr_empty_net(self):
"""mac address and ip address does not exist."""
net = {}
port_dict = {}
self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
self.assertDictEqual(port_dict, result_dict)
+ def test_prepare_port_dict_mac_ip_addr_dual(self):
+ """mac address, ipv4 and ipv6 addresses exist."""
+ net = {
+ "mac_address": mac_address,
+ "ip_address": ["10.0.1.5", "2345:0425:2CA1:0000:0000:0567:5673:23b5"],
+ }
+ port_dict = {}
+ result_dict = {
+ "mac_address": mac_address,
+ "fixed_ips": [
+ {"ip_address": "10.0.1.5"},
+ {"ip_address": "2345:0425:2CA1:0000:0000:0567:5673:23b5"},
+ ],
+ }
+ self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
+ self.assertDictEqual(port_dict, result_dict)
+
+ def test_prepare_port_dict_mac_ip_addr_dual_ip_addr_is_not_list(self):
+ """mac address, ipv4 and ipv6 addresses exist."""
+ net = {
+ "mac_address": mac_address,
+ "ip_address": "10.0.1.5",
+ }
+ port_dict = {}
+ result_dict = {
+ "mac_address": mac_address,
+ "fixed_ips": [
+ {"ip_address": "10.0.1.5"},
+ ],
+ }
+ self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
+ self.assertDictEqual(port_dict, result_dict)
+
+ def test_prepare_port_dict_mac_ip_addr_dual_net_without_ip_addr(self):
+ """mac address, ipv4 and ipv6 addresses exist."""
+ net = {
+ "mac_address": mac_address,
+ "ip_address": [],
+ }
+ port_dict = {}
+ result_dict = {
+ "mac_address": mac_address,
+ }
+ self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
+ self.assertDictEqual(port_dict, result_dict)
+
+ def test_prepare_port_dict_mac_ip_addr_dual_net_without_mac_addr(self):
+ """mac address, ipv4 and ipv6 addresses exist."""
+ net = {
+ "ip_address": ["10.0.1.5", "2345:0425:2CA1:0000:0000:0567:5673:23b5"],
+ }
+ port_dict = {}
+ result_dict = {
+ "fixed_ips": [
+ {"ip_address": "10.0.1.5"},
+ {"ip_address": "2345:0425:2CA1:0000:0000:0567:5673:23b5"},
+ ],
+ }
+ self.vimconn._prepare_port_dict_mac_ip_addr(net, port_dict)
+ self.assertDictEqual(port_dict, result_dict)
+
def test_create_new_port(self):
"""new port has id and mac address."""
new_port = {
net, port_dict, created_items = {}, {}, {}
expected_result = new_port
expected_net = {
- "mac_adress": mac_address,
+ "mac_address": mac_address,
"vim_id": port_id,
}
expected_created_items = {f"port:{port_id}": True}
def test_prepare_persistent_root_volumes_vim_using_volume_id(self):
"""Existing persistent root volume with vim_volume_id."""
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
base_disk_index = ord("a")
disk = {"vim_volume_id": volume_id}
block_device_mapping = {}
expected_existing_vim_volumes = [{"id": volume_id}]
boot_volume_id = self.vimconn._prepare_persistent_root_volumes(
name,
- vm_av_zone,
+ storage_av_zone,
disk,
base_disk_index,
block_device_mapping,
self.assertEqual(existing_vim_volumes, expected_existing_vim_volumes)
self.vimconn.cinder.volumes.create.assert_not_called()
+ @patch.object(vimconnector, "update_block_device_mapping")
+ def test__prepare_shared_volumes_vim_using_volume_id(
+ self, mock_update_block_device_mapping
+ ):
+ """Existing persistent non root volume with vim_volume_id.
+ class Volume:
+ def __init__(self, s, type="__DEFAULT__", name="", id=""):
+ self.status = s
+ self.volume_type = type
+ self.name = name
+ self.id = id
+ volumes = {"shared-volume": volume_id4}
+
+ The device mappeing BEFORE is: {}
+ The device mappeing AFTER is: {'vdb': '8ca50cc6-a779-4513-a1f3-900b8b3987d2'}
+ """
+ base_disk_index = ord("b")
+ disk = {"name": "shared-volume"}
+ block_device_mapping = {}
+ existing_vim_volumes = []
+ created_items = {}
+ expected_block_device_mapping = {}
+ self.vimconn.cinder.volumes.list.return_value = [
+ Volume("available", "multiattach", "shared-volume", volume_id4)
+ ]
+ self.vimconn.cinder.volumes.get.return_value.id = volume_id4
+ self.vimconn.cinder.volumes.get.return_value.status = "available"
+ self.vimconn._prepare_shared_volumes(
+ name,
+ disk,
+ base_disk_index,
+ block_device_mapping,
+ existing_vim_volumes,
+ created_items,
+ )
+ self.vimconn.cinder.volumes.get.assert_called_with(volume_id4)
+ self.assertDictEqual(block_device_mapping, expected_block_device_mapping)
+
@patch.object(vimconnector, "update_block_device_mapping")
def test_prepare_persistent_non_root_volumes_vim_using_volume_id(
self, mock_update_block_device_mapping
):
"""Existing persistent non root volume with vim_volume_id."""
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
base_disk_index = ord("b")
disk = {"vim_volume_id": volume_id}
block_device_mapping = {}
self.vimconn._prepare_non_root_persistent_volumes(
name,
disk,
- vm_av_zone,
+ storage_av_zone,
block_device_mapping,
base_disk_index,
existing_vim_volumes,
self, mock_update_block_device_mapping
):
"""Existing persistent root volume with vim_id."""
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
base_disk_index = ord("a")
disk = {"vim_id": volume_id}
block_device_mapping = {}
expected_existing_vim_volumes = [{"id": volume_id}]
boot_volume_id = self.vimconn._prepare_persistent_root_volumes(
name,
- vm_av_zone,
+ storage_av_zone,
disk,
base_disk_index,
block_device_mapping,
self, mock_update_block_device_mapping
):
"""Existing persistent root volume with vim_id."""
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
base_disk_index = ord("b")
disk = {"vim_id": volume_id}
block_device_mapping = {}
self.vimconn._prepare_non_root_persistent_volumes(
name,
disk,
- vm_av_zone,
+ storage_av_zone,
block_device_mapping,
base_disk_index,
existing_vim_volumes,
):
"""Create persistent root volume."""
self.vimconn.cinder.volumes.create.return_value.id = volume_id2
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
base_disk_index = ord("a")
disk = {"size": 10, "image_id": image_id}
block_device_mapping = {}
expected_boot_vol_id = volume_id2
boot_volume_id = self.vimconn._prepare_persistent_root_volumes(
name,
- vm_av_zone,
+ storage_av_zone,
disk,
base_disk_index,
block_device_mapping,
):
"""Create persistent root volume, disk has keep parameter."""
self.vimconn.cinder.volumes.create.return_value.id = volume_id2
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
base_disk_index = ord("a")
disk = {"size": 10, "image_id": image_id, "keep": True}
block_device_mapping = {}
expected_existing_vim_volumes = []
boot_volume_id = self.vimconn._prepare_persistent_root_volumes(
name,
- vm_av_zone,
+ storage_av_zone,
disk,
base_disk_index,
block_device_mapping,
"""Create persistent non-root volume."""
self.vimconn.cinder = CopyingMock()
self.vimconn.cinder.volumes.create.return_value.id = volume_id2
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
base_disk_index = ord("a")
disk = {"size": 10}
block_device_mapping = {}
self.vimconn._prepare_non_root_persistent_volumes(
name,
disk,
- vm_av_zone,
+ storage_av_zone,
block_device_mapping,
base_disk_index,
existing_vim_volumes,
"""Create persistent non-root volume."""
self.vimconn.cinder = CopyingMock()
self.vimconn.cinder.volumes.create.return_value.id = volume_id2
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
base_disk_index = ord("a")
disk = {"size": 10, "keep": True}
block_device_mapping = {}
self.vimconn._prepare_non_root_persistent_volumes(
name,
disk,
- vm_av_zone,
+ storage_av_zone,
block_device_mapping,
base_disk_index,
existing_vim_volumes,
_call_mock_update_block_device_mapping[0].kwargs["created_items"], {}
)
+ @patch.object(vimconnector, "update_block_device_mapping")
+ def test_new_shared_volumes(self, mock_update_block_device_mapping):
+ """Create shared volume."""
+
+ class MyVolume:
+ name = "my-shared-volume"
+ id = volume_id4
+ availability_zone = ["nova"]
+
+ self.vimconn.storage_availability_zone = ["nova"]
+ self.vimconn.cinder.volumes.create.return_value = MyVolume()
+ shared_volume_data = {"size": 10, "name": "my-shared-volume"}
+ result = self.vimconn.new_shared_volumes(shared_volume_data)
+ self.vimconn.cinder.volumes.create.assert_called_once_with(
+ size=10,
+ name="my-shared-volume",
+ volume_type="multiattach",
+ availability_zone=["nova"],
+ )
+ self.assertEqual(result[0], "my-shared-volume")
+ self.assertEqual(result[1], volume_id4)
+
@patch.object(vimconnector, "update_block_device_mapping")
def test_prepare_persistent_root_volumes_create_raise_exception(
self, mock_update_block_device_mapping
):
"""Create persistent root volume raise exception."""
self.vimconn.cinder.volumes.create.side_effect = Exception
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
base_disk_index = ord("a")
disk = {"size": 10, "image_id": image_id}
block_device_mapping = {}
with self.assertRaises(Exception):
result = self.vimconn._prepare_persistent_root_volumes(
name,
- vm_av_zone,
+ storage_av_zone,
disk,
base_disk_index,
block_device_mapping,
):
"""Create persistent non-root volume raise exception."""
self.vimconn.cinder.volumes.create.side_effect = Exception
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
base_disk_index = ord("b")
disk = {"size": 10}
block_device_mapping = {}
self.vimconn._prepare_non_root_persistent_volumes(
name,
disk,
- vm_av_zone,
+ storage_av_zone,
block_device_mapping,
base_disk_index,
existing_vim_volumes,
f"volume:{volume_id3}": True,
}
self.vimconn.cinder.volumes.get.side_effect = [
- Status("processing"),
- Status("available"),
- Status("available"),
+ Volume("processing"),
+ Volume("available"),
+ Volume("available"),
]
result = self.vimconn._wait_for_created_volumes_availability(
{"id": "44e0e83-b9uu-4akk-t234-p9cc4811bd4a"},
]
self.vimconn.cinder.volumes.get.side_effect = [
- Status("processing"),
- Status("available"),
- Status("available"),
+ Volume("processing"),
+ Volume("available", "multiattach"),
+ Volume("available"),
]
result = self.vimconn._wait_for_existing_volumes_availability(
elapsed_time = 1805
created_items = {f"volume:{volume_id2}": True}
self.vimconn.cinder.volumes.get.side_effect = [
- Status("processing"),
- Status("processing"),
+ Volume("processing"),
+ Volume("processing"),
]
with patch("time.sleep", mock_sleep):
result = self.vimconn._wait_for_created_volumes_availability(
elapsed_time = 1805
existing_vim_volumes = [{"id": volume_id2}]
self.vimconn.cinder.volumes.get.side_effect = [
- Status("processing"),
- Status("processing"),
+ Volume("processing"),
+ Volume("processing"),
]
result = self.vimconn._wait_for_existing_volumes_availability(
existing_vim_volumes = []
created_items = {}
block_device_mapping = {}
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
mock_root_volumes.return_value = root_vol_id
mock_created_vol_availability.return_value = 10
name,
existing_vim_volumes,
created_items,
- vm_av_zone,
+ storage_av_zone,
block_device_mapping,
disk_list2,
)
self.assertEqual(mock_non_root_volumes.call_count, 1)
mock_root_volumes.assert_called_once_with(
name="basicvm",
- vm_av_zone=["nova"],
+ storage_av_zone=["nova"],
disk={"size": 10, "image_id": image_id},
base_disk_index=97,
block_device_mapping={},
mock_non_root_volumes.assert_called_once_with(
name="basicvm",
disk={"size": 20},
- vm_av_zone=["nova"],
+ storage_av_zone=["nova"],
base_disk_index=98,
block_device_mapping={},
existing_vim_volumes=[],
"""Timeout exceeded while waiting for disks."""
existing_vim_volumes = []
created_items = {}
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
block_device_mapping = {}
mock_root_volumes.return_value = root_vol_id
name,
existing_vim_volumes,
created_items,
- vm_av_zone,
+ storage_av_zone,
block_device_mapping,
disk_list2,
)
self.assertEqual(mock_non_root_volumes.call_count, 1)
mock_root_volumes.assert_called_once_with(
name="basicvm",
- vm_av_zone=["nova"],
+ storage_av_zone=["nova"],
disk={"size": 10, "image_id": image_id},
base_disk_index=97,
block_device_mapping={},
mock_non_root_volumes.assert_called_once_with(
name="basicvm",
disk={"size": 20},
- vm_av_zone=["nova"],
+ storage_av_zone=["nova"],
base_disk_index=98,
block_device_mapping={},
existing_vim_volumes=[],
existing_vim_volumes = []
created_items = {}
block_device_mapping = {}
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
mock_created_vol_availability.return_value = 2
mock_existing_vol_availability.return_value = 3
name,
existing_vim_volumes,
created_items,
- vm_av_zone,
+ storage_av_zone,
block_device_mapping,
disk_list,
)
"""Persistent root volumes preparation raises error."""
existing_vim_volumes = []
created_items = {}
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
block_device_mapping = {}
mock_root_volumes.side_effect = Exception()
name,
existing_vim_volumes,
created_items,
- vm_av_zone,
+ storage_av_zone,
block_device_mapping,
disk_list2,
)
mock_existing_vol_availability.assert_not_called()
mock_root_volumes.assert_called_once_with(
name="basicvm",
- vm_av_zone=["nova"],
+ storage_av_zone=["nova"],
disk={"size": 10, "image_id": image_id},
base_disk_index=97,
block_device_mapping={},
"""Non-root volumes preparation raises error."""
existing_vim_volumes = []
created_items = {}
- vm_av_zone = ["nova"]
+ storage_av_zone = ["nova"]
block_device_mapping = {}
mock_root_volumes.return_value = root_vol_id
name,
existing_vim_volumes,
created_items,
- vm_av_zone,
+ storage_av_zone,
block_device_mapping,
disk_list2,
)
self.assertEqual(mock_non_root_volumes.call_count, 1)
mock_root_volumes.assert_called_once_with(
name="basicvm",
- vm_av_zone=["nova"],
+ storage_av_zone=["nova"],
disk={"size": 10, "image_id": image_id},
base_disk_index=97,
block_device_mapping={},
mock_non_root_volumes.assert_called_once_with(
name="basicvm",
disk={"size": 20},
- vm_av_zone=["nova"],
+ storage_av_zone=["nova"],
base_disk_index=98,
block_device_mapping={},
existing_vim_volumes=[],
name=name,
existing_vim_volumes=[],
created_items={},
- vm_av_zone="nova",
+ storage_av_zone="nova",
block_device_mapping={},
disk_list=disk_list2,
)
name=name,
existing_vim_volumes=[],
created_items={},
- vm_av_zone="nova",
+ storage_av_zone="nova",
block_device_mapping={},
disk_list=disk_list2,
)
name=name,
existing_vim_volumes=[],
created_items={},
- vm_av_zone="nova",
+ storage_av_zone="nova",
block_device_mapping={},
disk_list=disk_list2,
)
name=name,
existing_vim_volumes=[],
created_items={},
- vm_av_zone="nova",
+ storage_av_zone="nova",
block_device_mapping={},
disk_list=disk_list2,
)
},
)
- def test_delete_floating_ip_by_id_floating_ip_raises_nvexception(self):
+ def test_delete_floating_ip_by_id__delete_floating_ip_raises_client_exception__operation_is_successful(
+ self,
+ ):
"""netron delete floating ip raises nvExceptions.ClientException."""
created_items = {
f"floating_ip:{floating_network_vim_id}": True,
"Error deleting floating ip: ClientException: Unknown Error (HTTP Client exception occurred.)"
)
- def test_delete_floating_ip_by_id_floating_ip_raises_vimconnexception(self):
+ def test_delete_floating_ip_by_id__delete_floating_ip_raises_connection_error__operation_fails(
+ self,
+ ):
+ """netron delete floating ip raises nvExceptions.ClientException."""
+ created_items = {
+ f"floating_ip:{floating_network_vim_id}": True,
+ f"port:{port_id}": True,
+ }
+ k_id = floating_network_vim_id
+ k = f"floating_ip:{floating_network_vim_id}"
+ self.vimconn.neutron.delete_floatingip.side_effect = ConnectionError(
+ "Connection exception occurred."
+ )
+ with self.assertRaises(VimConnConnectionException):
+ self.vimconn._delete_floating_ip_by_id(k, k_id, created_items)
+ self.vimconn.neutron.delete_floatingip.assert_called_once_with(k_id)
+ self.assertEqual(
+ created_items,
+ {
+ f"floating_ip:{floating_network_vim_id}": True,
+ f"port:{port_id}": True,
+ },
+ )
+ self.vimconn.logger.error.assert_called_once_with(
+ "Error deleting floating ip: ConnectionError: Connection exception occurred."
+ )
+
+ def test_delete_floating_ip_by_id_floating_ip_raises_vimconn_not_found_exception__operation_is_successful(
+ self,
+ ):
"""netron delete floating ip raises VimConnNotFoundException."""
created_items = {
f"floating_ip:{floating_network_vim_id}": True,
self.vimconn.logger.error.assert_not_called()
self.assertEqual(created_items, expected_created_items)
+ def test_delete_shared_volumes(self):
+ """cinder delete shared volumes"""
+ shared_volume_vim_id = volume_id4
+ self.vimconn.cinder.volumes.get.return_value.status = "available"
+ self.vimconn.delete_shared_volumes(shared_volume_vim_id)
+ self.vimconn.cinder.volumes.get.assert_called_once_with(shared_volume_vim_id)
+ self.vimconn.cinder.volumes.delete.assert_called_once_with(shared_volume_vim_id)
+ self.vimconn.logger.error.assert_not_called()
+
def test_delete_volumes_by_id_with_cinder_get_volume_raise_exception(self):
"""cinder get volume raises exception."""
created_items = {
)
self.assertEqual(created_items, expected_created_items)
- def test_delete_volumes_by_id_with_cinder_delete_volume_raise_exception(self):
+ def test_delete_volumes_by_id_with_cinder__delete_volume_raise_client_exception__exception_is_not_raised(
+ self,
+ ):
"""cinder delete volume raises exception."""
created_items = {
f"floating_ip:{floating_network_vim_id}": True,
k = f"volume:{volume_id}"
k_id = volume_id
self.vimconn.cinder.volumes.get.return_value.status = "available"
- self.vimconn.cinder.volumes.delete.side_effect = nvExceptions.ClientException(
- "Connection aborted."
+ self.vimconn.cinder.volumes.delete.side_effect = cExceptions.ClientException(
+ 403, "Connection aborted."
)
result = self.vimconn._delete_volumes_by_id_wth_cinder(
k, k_id, volumes_to_hold, created_items
self.vimconn.cinder.volumes.get.assert_called_once_with(k_id)
self.vimconn.cinder.volumes.delete.assert_called_once_with(k_id)
self.vimconn.logger.error.assert_called_once_with(
- "Error deleting volume: ClientException: Unknown Error (HTTP Connection aborted.)"
+ "Error deleting volume: ClientException: Connection aborted. (HTTP 403)"
+ )
+ self.assertEqual(created_items, expected_created_items)
+
+ def test_delete_volumes_by_id_with_cinder__delete_volume_raise_connection_exception__exception_is_raised(
+ self,
+ ):
+ """cinder delete volume raises exception."""
+ created_items = {
+ f"floating_ip:{floating_network_vim_id}": True,
+ f"volume:{volume_id2}": True,
+ f"volume:{volume_id}": True,
+ f"port:{port_id}": None,
+ }
+ expected_created_items = {
+ f"floating_ip:{floating_network_vim_id}": True,
+ f"volume:{volume_id2}": True,
+ f"volume:{volume_id}": True,
+ f"port:{port_id}": None,
+ }
+ volumes_to_hold = []
+ k = f"volume:{volume_id}"
+ k_id = volume_id
+ self.vimconn.cinder.volumes.get.return_value.status = "available"
+ self.vimconn.cinder.volumes.delete.side_effect = cExceptions.ConnectionError(
+ "Connection failed."
+ )
+ with self.assertRaises(VimConnConnectionException):
+ result = self.vimconn._delete_volumes_by_id_wth_cinder(
+ k, k_id, volumes_to_hold, created_items
+ )
+ self.assertEqual(result, None)
+ self.vimconn.cinder.volumes.get.assert_called_once_with(k_id)
+ self.vimconn.cinder.volumes.delete.assert_called_once_with(k_id)
+ self.vimconn.logger.error.assert_called_once_with(
+ "Error deleting volume: ConnectionError: Connection failed."
)
self.assertEqual(created_items, expected_created_items)
result = self.vimconn._delete_volumes_by_id_wth_cinder(
k, k_id, volumes_to_hold, created_items
)
- self.assertEqual(result, None)
+ self.assertEqual(result, False)
self.vimconn.cinder.volumes.get.assert_not_called()
self.vimconn.cinder.volumes.delete.assert_not_called()
self.vimconn.logger.error.assert_not_called()
def test_delete_ports_by_id_by_neutron(self):
"""neutron delete ports."""
k_id = port_id
- self.vimconn.neutron.list_ports.return_value = {
- "ports": [{"id": port_id}, {"id": port2_id}]
- }
-
self.vimconn._delete_ports_by_id_wth_neutron(k_id)
- self.vimconn.neutron.list_ports.assert_called_once()
self.vimconn.neutron.delete_port.assert_called_once_with(k_id)
self.vimconn.logger.error.assert_not_called()
- def test_delete_ports_by_id_by_neutron_id_not_in_port_list(self):
- """port id not in the port list."""
- k_id = volume_id
- self.vimconn.neutron.list_ports.return_value = {
- "ports": [{"id": port_id}, {"id": port2_id}]
- }
-
- self.vimconn._delete_ports_by_id_wth_neutron(k_id)
- self.vimconn.neutron.list_ports.assert_called_once()
- self.vimconn.neutron.delete_port.assert_not_called()
- self.vimconn.logger.error.assert_not_called()
-
- def test_delete_ports_by_id_by_neutron_list_port_raise_exception(self):
- """neutron list port raises exception."""
- k_id = port_id
- self.vimconn.neutron.list_ports.side_effect = nvExceptions.ClientException(
- "Connection aborted."
- )
- self.vimconn._delete_ports_by_id_wth_neutron(k_id)
- self.vimconn.neutron.list_ports.assert_called_once()
- self.vimconn.neutron.delete_port.assert_not_called()
- self.vimconn.logger.error.assert_called_once_with(
- "Error deleting port: ClientException: Unknown Error (HTTP Connection aborted.)"
- )
-
def test_delete_ports_by_id_by_neutron_delete_port_raise_exception(self):
"""neutron delete port raises exception."""
k_id = port_id
- self.vimconn.neutron.list_ports.return_value = {
- "ports": [{"id": port_id}, {"id": port2_id}]
- }
self.vimconn.neutron.delete_port.side_effect = nvExceptions.ClientException(
"Connection aborted."
)
self.vimconn._delete_ports_by_id_wth_neutron(k_id)
- self.vimconn.neutron.list_ports.assert_called_once()
self.vimconn.neutron.delete_port.assert_called_once_with(k_id)
self.vimconn.logger.error.assert_called_once_with(
"Error deleting port: ClientException: Unknown Error (HTTP Connection aborted.)"
@patch.object(vimconnector, "_get_item_name_id")
@patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
@patch.object(vimconnector, "_delete_floating_ip_by_id")
- def test_delete_created_items_delete_vol_raises(
+ def test_delete_created_items__delete_vol_raises_connection_error__operation_fails(
self,
mock_delete_floating_ip_by_id,
mock_delete_volumes_by_id_wth_cinder,
("floating_ip", f"{floating_network_vim_id}"),
("volume", f"{volume_id}"),
]
- mock_delete_volumes_by_id_wth_cinder.side_effect = ConnectionError(
- "Connection failed."
+ mock_delete_volumes_by_id_wth_cinder.side_effect = (
+ neExceptions.ConnectionFailed("Connection failed.")
)
volumes_to_hold = []
keep_waiting = False
- result = self.vimconn._delete_created_items(
- created_items, volumes_to_hold, keep_waiting
- )
- self.assertEqual(result, False)
+ with self.assertRaises(VimConnConnectionException):
+ result = self.vimconn._delete_created_items(
+ created_items, volumes_to_hold, keep_waiting
+ )
+ self.assertEqual(result, None)
self.assertEqual(mock_get_item_name_id.call_count, 2)
mock_delete_volumes_by_id_wth_cinder.assert_called_once_with(
f"volume:{volume_id}", f"{volume_id}", [], created_items
@patch.object(vimconnector, "_get_item_name_id")
@patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
@patch.object(vimconnector, "_delete_floating_ip_by_id")
- def test_delete_created_items_delete_fip_raises(
+ def test_delete_created_items__delete_fip_raises_connection_error__operation_fails(
self,
mock_delete_floating_ip_by_id,
mock_delete_volumes_by_id_wth_cinder,
)
volumes_to_hold = []
keep_waiting = True
- result = self.vimconn._delete_created_items(
- created_items, volumes_to_hold, keep_waiting
- )
- self.assertEqual(result, True)
- self.assertEqual(mock_get_item_name_id.call_count, 2)
- mock_delete_volumes_by_id_wth_cinder.assert_called_once_with(
- f"volume:{volume_id}", f"{volume_id}", [], created_items
- )
+ with self.assertRaises(VimConnConnectionException):
+ result = self.vimconn._delete_created_items(
+ created_items, volumes_to_hold, keep_waiting
+ )
+ self.assertEqual(result, None)
+ self.assertEqual(mock_get_item_name_id.call_count, 1)
+ mock_delete_volumes_by_id_wth_cinder.assert_not_called()
mock_delete_floating_ip_by_id.assert_called_once_with(
f"floating_ip:{floating_network_vim_id}",
f"{floating_network_vim_id}",
@patch.object(vimconnector, "_get_item_name_id")
@patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
@patch.object(vimconnector, "_delete_floating_ip_by_id")
- def test_delete_created_items_get_item_name_raises(
+ def test_delete_created_items_get_item_name_raises_type_error__operation_fails(
self,
mock_delete_floating_ip_by_id,
mock_delete_volumes_by_id_wth_cinder,
]
volumes_to_hold = []
keep_waiting = False
- result = self.vimconn._delete_created_items(
- created_items, volumes_to_hold, keep_waiting
- )
- self.assertEqual(result, False)
- self.assertEqual(mock_get_item_name_id.call_count, 2)
+ with self.assertRaises(VimConnException):
+ result = self.vimconn._delete_created_items(
+ created_items, volumes_to_hold, keep_waiting
+ )
+ self.assertEqual(result, None)
+ self.assertEqual(mock_get_item_name_id.call_count, 1)
mock_delete_volumes_by_id_wth_cinder.assert_not_called()
mock_delete_floating_ip_by_id.assert_not_called()
_call_logger = self.vimconn.logger.error.call_args_list
self.assertEqual(_call_logger[0][0], ("Error deleting 3: Invalid Type",))
- self.assertEqual(
- _call_logger[1][0],
- (f"Error deleting volume{volume_id}: Invalid attribute",),
- )
@patch.object(vimconnector, "_get_item_name_id")
@patch.object(vimconnector, "_delete_volumes_by_id_wth_cinder")
@patch("time.sleep")
@patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
- @patch.object(vimconnector, "_format_exception")
@patch.object(vimconnector, "_reload_connection")
@patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
@patch.object(vimconnector, "_delete_created_items")
- def test_delete_vminstance_extract_items_wth_keep_raises(
+ def test_delete_vminstance__extract_items_wth_keep_raises_attributeerror__raise_vimconnexception(
self,
mock_delete_created_items,
mock_delete_vm_ports_attached_to_network,
mock_reload_connection,
- mock_format_exception,
mock_extract_items_wth_keep_flag_from_created_items,
mock_sleep,
):
mock_extract_items_wth_keep_flag_from_created_items.side_effect = AttributeError
volumes_to_hold = []
mock_delete_created_items.return_value = False
- with self.assertRaises(AttributeError):
+ with self.assertRaises(VimConnException):
self.vimconn.delete_vminstance(
vm_id, initial_created_items, volumes_to_hold
)
self.vimconn.nova.servers.delete.assert_not_called()
mock_delete_created_items.assert_not_called()
mock_sleep.assert_not_called()
- mock_format_exception.assert_not_called()
mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with(
initial_created_items
)
@patch.object(vimconnector, "_reload_connection")
@patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
@patch.object(vimconnector, "_delete_created_items")
- def test_delete_vminstance_delete_created_items_raises(
+ def test_delete_vminstance__delete_created_items_returns_true__delete_created_items_called_several_times(
self,
mock_delete_created_items,
mock_delete_vm_ports_attached_to_network,
mock_extract_items_wth_keep_flag_from_created_items.return_value = created_items
mock_sleep = MagicMock()
volumes_to_hold = []
- err = ConnectionError("ClientException occurred.")
- mock_delete_created_items.side_effect = err
- with self.assertRaises(ConnectionError) as err:
- self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
- self.assertEqual(str(err), "ClientException occurred.")
+ mock_delete_created_items.side_effect = [True, False]
+ self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
mock_reload_connection.assert_called_once()
mock_delete_vm_ports_attached_to_network.assert_called_once_with(created_items)
self.vimconn.nova.servers.delete.assert_called_once_with(vm_id)
- mock_delete_created_items.assert_called_once()
+ self.assertEqual(mock_delete_created_items.call_count, 2)
mock_sleep.assert_not_called()
mock_extract_items_wth_keep_flag_from_created_items.assert_called_once_with(
created_items
@patch("time.sleep")
@patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
- @patch.object(vimconnector, "_format_exception")
@patch.object(vimconnector, "_reload_connection")
@patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
@patch.object(vimconnector, "_delete_created_items")
- def test_delete_vminstance_delete_vm_ports_raises(
+ def test_delete_vminstance__delete_vm_ports_raises_connection_error__raise_vimconnconnectionexception(
self,
mock_delete_created_items,
mock_delete_vm_ports_attached_to_network,
mock_reload_connection,
- mock_format_exception,
mock_extract_items_wth_keep_flag_from_created_items,
mock_sleep,
):
volumes_to_hold = [f"{volume_id}", f"{volume_id2}"]
err = ConnectionError("ClientException occurred.")
mock_delete_vm_ports_attached_to_network.side_effect = err
- mock_delete_created_items.side_effect = err
- with self.assertRaises(ConnectionError) as err:
+ mock_delete_created_items.return_value = False
+ with self.assertRaises(VimConnConnectionException):
self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
- self.assertEqual(str(err), "ClientException occurred.")
mock_reload_connection.assert_called_once()
mock_delete_vm_ports_attached_to_network.assert_called_once_with(created_items)
self.vimconn.nova.servers.delete.assert_not_called()
@patch("time.sleep")
@patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
- @patch.object(vimconnector, "_format_exception")
@patch.object(vimconnector, "_reload_connection")
@patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
@patch.object(vimconnector, "_delete_created_items")
- def test_delete_vminstance_nova_server_delete_raises(
+ def test_delete_vminstance__nova_server_delete_raises_clientexception__raise_vimconn_unexpected_response(
self,
mock_delete_created_items,
mock_delete_vm_ports_attached_to_network,
mock_reload_connection,
- mock_format_exception,
mock_extract_items_wth_keep_flag_from_created_items,
mock_sleep,
):
created_items = deepcopy(created_items_all_true)
mock_extract_items_wth_keep_flag_from_created_items.return_value = created_items
volumes_to_hold = [f"{volume_id}", f"{volume_id2}"]
- err = VimConnConnectionException("ClientException occurred.")
+ err = nvExceptions.ClientException("ClientException occurred.")
self.vimconn.nova.servers.delete.side_effect = err
mock_delete_created_items.side_effect = err
- with self.assertRaises(VimConnConnectionException) as err:
+ with self.assertRaises(VimConnUnexpectedResponse):
self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
- self.assertEqual(str(err), "ClientException occurred.")
mock_reload_connection.assert_called_once()
mock_delete_vm_ports_attached_to_network.assert_called_once_with(created_items)
self.vimconn.nova.servers.delete.assert_called_once_with(vm_id)
@patch("time.sleep")
@patch.object(vimconnector, "_extract_items_wth_keep_flag_from_created_items")
- @patch.object(vimconnector, "_format_exception")
@patch.object(vimconnector, "_reload_connection")
@patch.object(vimconnector, "_delete_vm_ports_attached_to_network")
@patch.object(vimconnector, "_delete_created_items")
- def test_delete_vminstance_reload_connection_raises(
+ def test_delete_vminstance__reload_connection_raises_connection_error__raises_vimconnconnection_exception(
self,
mock_delete_created_items,
mock_delete_vm_ports_attached_to_network,
mock_reload_connection,
- mock_format_exception,
mock_extract_items_wth_keep_flag_from_created_items,
mock_sleep,
):
err = ConnectionError("ClientException occurred.")
mock_delete_created_items.return_value = False
mock_reload_connection.side_effect = err
- with self.assertRaises(ConnectionError) as err:
+ with self.assertRaises(VimConnConnectionException):
self.vimconn.delete_vminstance(vm_id, created_items, volumes_to_hold)
- self.assertEqual(str(err), "ClientException occurred.")
mock_reload_connection.assert_called_once()
mock_delete_vm_ports_attached_to_network.assert_not_called()
self.vimconn.nova.servers.delete.assert_not_called()
@patch.object(vimconnector, "_reload_connection", new_callable=CopyingMock())
def test_get_monitoring_data(self, mock_reload_conection):
- servers = ["server1", "server2"]
+ flavors = [
+ {"original_name": "flavor1", "id": "367fc1eb-bd22-40f8-a519-ed2fb4e5976b"},
+ {"original_name": "flavor2", "id": "5dcf9732-d17d-40b3-910d-37fc4c5aacc0"},
+ ]
+ servers = [
+ Server(
+ "server1", "ACTIVE", flavors[0], "312200db-42e3-4772-9518-d5db85468392"
+ ),
+ Server(
+ "server2", "ACTIVE", flavors[1], "39a166cf-e4e6-479c-b88c-9ad558cf2cbf"
+ ),
+ ]
ports = {"ports": ["port1", "port2"]}
self.vimconn.nova.servers.list.return_value = servers
self.vimconn.neutron.list_ports.return_value = ports
from typing import Dict, List, Optional, Tuple
from cinderclient import client as cClient
+import cinderclient.exceptions as cExceptions
from glanceclient import client as glClient
import glanceclient.exc as gl1Exceptions
from keystoneauth1 import session
server_timeout = 1800
+def catch_any_exception(func):
+ def format_exception(*args, **kwargs):
+ try:
+ return func(*args, *kwargs)
+ except Exception as e:
+ vimconnector._format_exception(e)
+
+ return format_exception
+
+
class SafeDumper(yaml.SafeDumper):
def represent_data(self, data):
# Openstack APIs use custom subclasses of dict and YAML safe dumper
self.persistent_info = persistent_info
self.availability_zone = persistent_info.get("availability_zone", None)
+ self.storage_availability_zone = None
+ self.vm_av_zone = None
self.session = persistent_info.get("session", {"reload_client": True})
self.my_tenant_id = self.session.get("my_tenant_id")
self.nova = self.session.get("nova")
version = self.config.get("microversion")
if not version:
- version = "2.1"
+ version = "2.60"
# addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
# Titanium cloud and StarlingX
# Types. Also, abstract vimconnector should call the validation
# method before the implemented VIM connectors are called.
- def _format_exception(self, exception):
+ @staticmethod
+ def _format_exception(exception):
"""Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause"""
message_error = str(exception)
tip = ""
(
neExceptions.NetworkNotFoundClient,
nvExceptions.NotFound,
+ nvExceptions.ResourceNotFound,
ksExceptions.NotFound,
gl1Exceptions.HTTPNotFound,
+ cExceptions.NotFound,
),
):
raise vimconn.VimConnNotFoundException(
ConnectionError,
ksExceptions.ConnectionError,
neExceptions.ConnectionFailed,
+ cExceptions.ConnectionError,
),
):
if type(exception).__name__ == "SSLError":
KeyError,
nvExceptions.BadRequest,
ksExceptions.BadRequest,
+ gl1Exceptions.BadRequest,
+ cExceptions.BadRequest,
),
):
+ if message_error == "OS-EXT-SRV-ATTR:host":
+ tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
+ raise vimconn.VimConnInsufficientCredentials(
+ type(exception).__name__ + ": " + message_error + tip
+ )
raise vimconn.VimConnException(
type(exception).__name__ + ": " + message_error
)
+
elif isinstance(
exception,
(
nvExceptions.ClientException,
ksExceptions.ClientException,
neExceptions.NeutronException,
+ cExceptions.ClientException,
),
):
raise vimconn.VimConnUnexpectedResponse(
elif isinstance(exception, vimconn.VimConnException):
raise exception
else: # ()
- self.logger.error("General Exception " + message_error, exc_info=True)
+ logger = logging.getLogger("ro.vim.openstack")
+ logger.error("General Exception " + message_error, exc_info=True)
- raise vimconn.VimConnConnectionException(
+ raise vimconn.VimConnException(
type(exception).__name__ + ": " + message_error
)
"Not found security group {} for this tenant".format(sg)
)
+ def _find_nova_server(self, vm_id):
+ """
+ Returns the VM instance from Openstack and completes it with flavor ID
+ Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
+ """
+ try:
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+ # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+ server_dict = server.to_dict()
+ try:
+ if server_dict["flavor"].get("original_name"):
+ server_dict["flavor"]["id"] = self.nova.flavors.find(
+ name=server_dict["flavor"]["original_name"]
+ ).id
+ except nClient.exceptions.NotFound as e:
+ self.logger.warning(str(e.message))
+ return server_dict
+ except (
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
def check_vim_connectivity(self):
# just get network list to check connectivity and credentials
self.get_network_list(filter_dict={})
Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
"""
self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
-
try:
self._reload_connection()
def new_tenant(self, tenant_name, tenant_description):
"""Adds a new tenant to openstack VIM. Returns the tenant identifier"""
self.logger.debug("Adding a new tenant name: %s", tenant_name)
-
try:
self._reload_connection()
def delete_tenant(self, tenant_id):
"""Delete a tenant from openstack VIM. Returns the old tenant identifier"""
self.logger.debug("Deleting tenant %s from VIM", tenant_id)
-
try:
self._reload_connection()
self.keystone.tenants.delete(tenant_id)
return tenant_id
+
except (
ksExceptions.ConnectionError,
ksExceptions.ClientException,
"dataplane_physical_net"
)
- # if it is non empty list, use the first value. If it is a string use the value directly
+ # if it is non-empty list, use the first value. If it is a string use the value directly
if (
isinstance(provider_physical_network, (tuple, list))
and provider_physical_network
)
if not self.config.get("multisegment_support"):
- network_dict[
- "provider:physical_network"
- ] = provider_physical_network
+ network_dict["provider:physical_network"] = (
+ provider_physical_network
+ )
if (
provider_network_profile
and "network-type" in provider_network_profile
):
- network_dict[
- "provider:network_type"
- ] = provider_network_profile["network-type"]
+ network_dict["provider:network_type"] = (
+ provider_network_profile["network-type"]
+ )
else:
network_dict["provider:network_type"] = self.config.get(
"dataplane_network_type", "vlan"
if not ip_profile.get("subnet_address"):
# Fake subnet is required
- subnet_rand = random.randint(0, 255)
+ subnet_rand = random.SystemRandom().randint(0, 255)
ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
if "ip_version" not in ip_profile:
ip_str = str(netaddr.IPAddress(ip_int))
subnet["allocation_pools"][0]["end"] = ip_str
+ if (
+ ip_profile.get("ipv6_address_mode")
+ and ip_profile["ip_version"] != "IPv4"
+ ):
+ subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
+ # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
+ # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
+ subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
+
# self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
self.neutron.create_subnet({"subnet": subnet})
if k_item == "l2gwconn":
self.neutron.delete_l2_gateway_connection(k_id)
+
+ except (neExceptions.ConnectionFailed, ConnectionError) as e2:
+ self.logger.error(
+ "Error deleting l2 gateway connection: {}: {}".format(
+ type(e2).__name__, e2
+ )
+ )
+ self._format_exception(e2)
except Exception as e2:
self.logger.error(
"Error deleting l2 gateway connection: {}: {}".format(
Returns the network list of dictionaries
"""
self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
-
try:
self._reload_connection()
filter_dict_os = filter_dict.copy()
return net
+ @catch_any_exception
def delete_network(self, net_id, created_items=None):
"""
Removes a tenant network from VIM and its associated elements
k_item, _, k_id = k.partition(":")
if k_item == "l2gwconn":
self.neutron.delete_l2_gateway_connection(k_id)
+
+ except (neExceptions.ConnectionFailed, ConnectionError) as e:
+ self.logger.error(
+ "Error deleting l2 gateway connection: {}: {}".format(
+ type(e).__name__, e
+ )
+ )
+ self._format_exception(e)
except Exception as e:
self.logger.error(
"Error deleting l2 gateway connection: {}: {}".format(
for p in ports["ports"]:
try:
self.neutron.delete_port(p["id"])
+
+ except (neExceptions.ConnectionFailed, ConnectionError) as e:
+ self.logger.error("Error deleting port %s: %s", p["id"], str(e))
+ # If there is connection error, it raises.
+ self._format_exception(e)
except Exception as e:
self.logger.error("Error deleting port %s: %s", p["id"], str(e))
self.neutron.delete_network(net_id)
return net_id
- except (
- neExceptions.ConnectionFailed,
- neExceptions.NetworkNotFoundClient,
- neExceptions.NeutronException,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ except (neExceptions.NetworkNotFoundClient, neExceptions.NotFound) as e:
+ # If network to be deleted is not found, it does not raise.
+ self.logger.warning(
+ f"Error deleting network: {net_id} is not found, {str(e)}"
+ )
def refresh_nets_status(self, net_list):
"""Get the status of the networks
def get_flavor(self, flavor_id):
"""Obtain flavor details from the VIM. Returns the flavor dict details"""
self.logger.debug("Getting flavor '%s'", flavor_id)
-
try:
self._reload_connection()
flavor = self.nova.flavors.find(id=flavor_id)
- # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
return flavor.to_dict()
+
except (
nvExceptions.NotFound,
nvExceptions.ClientException,
)
except (
nvExceptions.NotFound,
+ nvExceptions.BadRequest,
nvExceptions.ClientException,
ksExceptions.ClientException,
ConnectionError,
flavor_data.get("extended"),
)
+ @catch_any_exception
def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
"""Adds a tenant flavor to openstack VIM.
if change_name_if_used is True, it will change name in case of conflict,
retry = 0
max_retries = 3
name_suffix = 0
+ name = flavor_data["name"]
+ while retry < max_retries:
+ retry += 1
+ try:
+ self._reload_connection()
- try:
- name = flavor_data["name"]
- while retry < max_retries:
- retry += 1
- try:
- self._reload_connection()
+ if change_name_if_used:
+ name = self._change_flavor_name(name, name_suffix, flavor_data)
- if change_name_if_used:
- name = self._change_flavor_name(name, name_suffix, flavor_data)
+ ram, vcpus, extra_specs, extended = self._get_flavor_details(
+ flavor_data
+ )
+ if extended:
+ self._process_extended_config_of_flavor(extended, extra_specs)
- ram, vcpus, extra_specs, extended = self._get_flavor_details(
- flavor_data
- )
- if extended:
- self._process_extended_config_of_flavor(extended, extra_specs)
-
- # Create flavor
-
- new_flavor = self.nova.flavors.create(
- name=name,
- ram=ram,
- vcpus=vcpus,
- disk=flavor_data.get("disk", 0),
- ephemeral=flavor_data.get("ephemeral", 0),
- swap=flavor_data.get("swap", 0),
- is_public=flavor_data.get("is_public", True),
- )
+ # Create flavor
- # Add metadata
- if extra_specs:
- new_flavor.set_keys(extra_specs)
+ new_flavor = self.nova.flavors.create(
+ name=name,
+ ram=ram,
+ vcpus=vcpus,
+ disk=flavor_data.get("disk", 0),
+ ephemeral=flavor_data.get("ephemeral", 0),
+ swap=flavor_data.get("swap", 0),
+ is_public=flavor_data.get("is_public", True),
+ )
- return new_flavor.id
+ # Add metadata
+ if extra_specs:
+ new_flavor.set_keys(extra_specs)
- except nvExceptions.Conflict as e:
- if change_name_if_used and retry < max_retries:
- continue
+ return new_flavor.id
- self._format_exception(e)
+ except nvExceptions.Conflict as e:
+ if change_name_if_used and retry < max_retries:
+ continue
- except (
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- ConnectionError,
- KeyError,
- ) as e:
- self._format_exception(e)
+ self._format_exception(e)
+ @catch_any_exception
def delete_flavor(self, flavor_id):
"""Deletes a tenant flavor from openstack VIM. Returns the old flavor_id"""
try:
self._reload_connection()
self.nova.flavors.delete(flavor_id)
-
return flavor_id
- # except nvExceptions.BadRequest as e:
- except (
- nvExceptions.NotFound,
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+
+ except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
+ # If flavor is not found, it does not raise.
+ self.logger.warning(
+ f"Error deleting flavor: {flavor_id} is not found, {str(e.message)}"
+ )
def new_image(self, image_dict):
"""
self.glance.images.update(new_image.id, **metadata_to_load)
return new_image.id
- except (
- nvExceptions.Conflict,
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- ) as e:
- self._format_exception(e)
except (
HTTPException,
gl1Exceptions.HTTPException,
"{}: {} for {}".format(type(e).__name__, e, image_dict["location"]),
http_code=vimconn.HTTP_Bad_Request,
)
+ except Exception as e:
+ self._format_exception(e)
+ @catch_any_exception
def delete_image(self, image_id):
"""Deletes a tenant image from openstack VIM. Returns the old id"""
try:
self.glance.images.delete(image_id)
return image_id
- except (
- nvExceptions.NotFound,
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- gl1Exceptions.CommunicationError,
- gl1Exceptions.HTTPNotFound,
- ConnectionError,
- ) as e: # TODO remove
- self._format_exception(e)
+ except gl1Exceptions.NotFound as e:
+ # If image is not found, it does not raise.
+ self.logger.warning(
+ f"Error deleting image: {image_id} is not found, {str(e)}"
+ )
+ @catch_any_exception
def get_image_id_from_path(self, path):
"""Get the image id from image path in the VIM database. Returns the image_id"""
- try:
- self._reload_connection()
- images = self.glance.images.list()
+ self._reload_connection()
+ images = self.glance.images.list()
- for image in images:
- if image.metadata.get("location") == path:
- return image.id
+ for image in images:
+ if image.metadata.get("location") == path:
+ return image.id
- raise vimconn.VimConnNotFoundException(
- "image with location '{}' not found".format(path)
- )
- except (
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- gl1Exceptions.CommunicationError,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ raise vimconn.VimConnNotFoundException(
+ "image with location '{}' not found".format(path)
+ )
def get_image_list(self, filter_dict={}):
"""Obtain tenant images from VIM
List can be empty
"""
self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
-
try:
self._reload_connection()
# filter_dict_os = filter_dict.copy()
pass
return filtered_list
+
except (
ksExceptions.ClientException,
nvExceptions.ClientException,
self.availability_zone = vim_availability_zones
else:
self.availability_zone = self._get_openstack_availablity_zones()
+ if "storage_availability_zone" in self.config:
+ self.storage_availability_zone = self.config.get(
+ "storage_availability_zone"
+ )
def _get_vm_availability_zone(
self, availability_zone_index, availability_zone_list
if net.get("mac_address"):
port_dict["mac_address"] = net["mac_address"]
- if net.get("ip_address"):
- port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+ ip_dual_list = []
+ if ip_list := net.get("ip_address"):
+ if not isinstance(ip_list, list):
+ ip_list = [ip_list]
+ for ip in ip_list:
+ ip_dict = {"ip_address": ip}
+ ip_dual_list.append(ip_dict)
+ port_dict["fixed_ips"] = ip_dual_list
# TODO add "subnet_id": <subnet_id>
def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
"""
new_port = self.neutron.create_port({"port": port_dict})
created_items["port:" + str(new_port["port"]["id"])] = True
- net["mac_adress"] = new_port["port"]["mac_address"]
+ net["mac_address"] = new_port["port"]["mac_address"]
net["vim_id"] = new_port["port"]["id"]
return new_port
def _prepare_persistent_root_volumes(
self,
name: str,
- vm_av_zone: list,
+ storage_av_zone: list,
disk: dict,
base_disk_index: int,
block_device_mapping: dict,
Args:
name (str): Name of VM instance
- vm_av_zone (list): List of availability zones
+ storage_av_zone (list): Storage of availability zones
disk (dict): Disk details
base_disk_index (int): Disk index
block_device_mapping (dict): Block device details
# Disk may include only vim_volume_id or only vim_id."
# Use existing persistent root volume finding with volume_id or vim_id
key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
-
if disk.get(key_id):
block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
existing_vim_volumes.append({"id": disk[key_id]})
-
else:
# Create persistent root volume
volume = self.cinder.volumes.create(
name=name + "vd" + chr(base_disk_index),
imageRef=disk["image_id"],
# Make sure volume is in the same AZ as the VM to be attached to
- availability_zone=vm_av_zone,
+ availability_zone=storage_av_zone,
)
boot_volume_id = volume.id
self.update_block_device_mapping(
"Created volume is not valid, does not have id attribute."
)
+ block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+ if disk.get("multiattach"): # multiattach volumes do not belong to VDUs
+ return
volume_txt = "volume:" + str(volume.id)
if disk.get("keep"):
volume_txt += ":keep"
created_items[volume_txt] = True
- block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+
+ @catch_any_exception
+ def new_shared_volumes(self, shared_volume_data) -> (str, str):
+ availability_zone = (
+ self.storage_availability_zone
+ if self.storage_availability_zone
+ else self.vm_av_zone
+ )
+ volume = self.cinder.volumes.create(
+ size=shared_volume_data["size"],
+ name=shared_volume_data["name"],
+ volume_type="multiattach",
+ availability_zone=availability_zone,
+ )
+ return volume.name, volume.id
+
+ def _prepare_shared_volumes(
+ self,
+ name: str,
+ disk: dict,
+ base_disk_index: int,
+ block_device_mapping: dict,
+ existing_vim_volumes: list,
+ created_items: dict,
+ ):
+ volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
+ if volumes.get(disk["name"]):
+ sv_id = volumes[disk["name"]]
+ max_retries = 3
+ vol_status = ""
+ # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
+ while max_retries:
+ max_retries -= 1
+ volume = self.cinder.volumes.get(sv_id)
+ vol_status = volume.status
+ if volume.status not in ("in-use", "available"):
+ time.sleep(5)
+ continue
+ self.update_block_device_mapping(
+ volume=volume,
+ block_device_mapping=block_device_mapping,
+ base_disk_index=base_disk_index,
+ disk=disk,
+ created_items=created_items,
+ )
+ return
+ raise vimconn.VimConnException(
+ "Shared volume is not prepared, status is: {}".format(vol_status),
+ http_code=vimconn.HTTP_Internal_Server_Error,
+ )
def _prepare_non_root_persistent_volumes(
self,
name: str,
disk: dict,
- vm_av_zone: list,
+ storage_av_zone: list,
block_device_mapping: dict,
base_disk_index: int,
existing_vim_volumes: list,
Args:
name (str): Name of VM instance
disk (dict): Disk details
- vm_av_zone (list): List of availability zones
+ storage_av_zone (list): Storage of availability zones
block_device_mapping (dict): Block device details
base_disk_index (int): Disk index
existing_vim_volumes (list): Existing disk details
# Non-root persistent volumes
# Disk may include only vim_volume_id or only vim_id."
key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
-
if disk.get(key_id):
# Use existing persistent volume
block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
existing_vim_volumes.append({"id": disk[key_id]})
-
else:
- # Create persistent volume
+ volume_name = f"{name}vd{chr(base_disk_index)}"
volume = self.cinder.volumes.create(
size=disk["size"],
- name=name + "vd" + chr(base_disk_index),
+ name=volume_name,
# Make sure volume is in the same AZ as the VM to be attached to
- availability_zone=vm_av_zone,
+ availability_zone=storage_av_zone,
)
self.update_block_device_mapping(
volume=volume,
elapsed_time (int): Time spent while waiting
"""
-
while elapsed_time < volume_timeout:
for created_item in created_items:
v, volume_id = (
created_item.split(":")[1],
)
if v == "volume":
- if self.cinder.volumes.get(volume_id).status != "available":
+ volume = self.cinder.volumes.get(volume_id)
+ if (
+ volume.volume_type == "multiattach"
+ and volume.status == "in-use"
+ ):
+ return elapsed_time
+ elif volume.status != "available":
break
else:
# All ready: break from while
while elapsed_time < volume_timeout:
for volume in existing_vim_volumes:
- if self.cinder.volumes.get(volume["id"]).status != "available":
+ v = self.cinder.volumes.get(volume["id"])
+ if v.volume_type == "multiattach" and v.status == "in-use":
+ return elapsed_time
+ elif v.status != "available":
break
else: # all ready: break from while
break
name: str,
existing_vim_volumes: list,
created_items: dict,
- vm_av_zone: list,
+ storage_av_zone: list,
block_device_mapping: dict,
disk_list: list = None,
) -> None:
name (str): Name of Instance
existing_vim_volumes (list): List of existing volumes
created_items (dict): All created items belongs to VM
- vm_av_zone (list): VM availability zone
+ storage_av_zone (list): Storage availability zone
block_device_mapping (dict): Block devices to be attached to VM
disk_list (list): List of disks
base_disk_index = ord("b")
boot_volume_id = None
elapsed_time = 0
-
for disk in disk_list:
if "image_id" in disk:
# Root persistent volume
base_disk_index = ord("a")
boot_volume_id = self._prepare_persistent_root_volumes(
name=name,
- vm_av_zone=vm_av_zone,
+ storage_av_zone=storage_av_zone,
+ disk=disk,
+ base_disk_index=base_disk_index,
+ block_device_mapping=block_device_mapping,
+ existing_vim_volumes=existing_vim_volumes,
+ created_items=created_items,
+ )
+ elif disk.get("multiattach"):
+ self._prepare_shared_volumes(
+ name=name,
disk=disk,
base_disk_index=base_disk_index,
block_device_mapping=block_device_mapping,
self._prepare_non_root_persistent_volumes(
name=name,
disk=disk,
- vm_av_zone=vm_av_zone,
+ storage_av_zone=storage_av_zone,
block_device_mapping=block_device_mapping,
base_disk_index=base_disk_index,
existing_vim_volumes=existing_vim_volumes,
flavor_id,
str(net_list),
)
+ server = None
+ created_items = {}
+ net_list_vim = []
+ # list of external networks to be connected to instance, later on used to create floating_ip
+ external_network = []
+ # List of ports with port-security disabled
+ no_secured_ports = []
+ block_device_mapping = {}
+ existing_vim_volumes = []
+ server_group_id = None
+ scheduller_hints = {}
try:
- server = None
- created_items = {}
- net_list_vim = []
- # list of external networks to be connected to instance, later on used to create floating_ip
- external_network = []
- # List of ports with port-security disabled
- no_secured_ports = []
- block_device_mapping = {}
- existing_vim_volumes = []
- server_group_id = None
- scheduller_hints = {}
-
# Check the Openstack Connection
self._reload_connection()
config_drive, userdata = self._create_user_data(cloud_config)
# Get availability Zone
- vm_av_zone = self._get_vm_availability_zone(
+ self.vm_av_zone = self._get_vm_availability_zone(
availability_zone_index, availability_zone_list
)
+ storage_av_zone = (
+ self.storage_availability_zone
+ if self.storage_availability_zone
+ else self.vm_av_zone
+ )
+
if disk_list:
# Prepare disks
self._prepare_disk_for_vminstance(
name=name,
existing_vim_volumes=existing_vim_volumes,
created_items=created_items,
- vm_av_zone=vm_av_zone,
+ storage_av_zone=storage_av_zone,
block_device_mapping=block_device_mapping,
disk_list=disk_list,
)
flavor_id,
net_list_vim,
self.config.get("security_groups"),
- vm_av_zone,
+ self.vm_av_zone,
self.config.get("keypair"),
userdata,
config_drive,
server_group_id,
)
)
-
# Create VM
server = self.nova.servers.create(
name=name,
nics=net_list_vim,
security_groups=self.config.get("security_groups"),
# TODO remove security_groups in future versions. Already at neutron port
- availability_zone=vm_av_zone,
+ availability_zone=self.vm_av_zone,
key_name=self.config.get("keypair"),
userdata=userdata,
config_drive=config_drive,
def get_vminstance(self, vm_id):
"""Returns the VM instance information from VIM"""
- # self.logger.debug("Getting VM from VIM")
- try:
- self._reload_connection()
- server = self.nova.servers.find(id=vm_id)
- # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
- return server.to_dict()
- except (
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- nvExceptions.NotFound,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ return self._find_nova_server(vm_id)
+ @catch_any_exception
def get_vminstance_console(self, vm_id, console_type="vnc"):
"""
Get a console for the virtual machine
suffix: extra text, e.g. the http path and query string
"""
self.logger.debug("Getting VM CONSOLE from VIM")
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
- try:
- self._reload_connection()
- server = self.nova.servers.find(id=vm_id)
+ if console_type is None or console_type == "novnc":
+ console_dict = server.get_vnc_console("novnc")
+ elif console_type == "xvpvnc":
+ console_dict = server.get_vnc_console(console_type)
+ elif console_type == "rdp-html5":
+ console_dict = server.get_rdp_console(console_type)
+ elif console_type == "spice-html5":
+ console_dict = server.get_spice_console(console_type)
+ else:
+ raise vimconn.VimConnException(
+ "console type '{}' not allowed".format(console_type),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
- if console_type is None or console_type == "novnc":
- console_dict = server.get_vnc_console("novnc")
- elif console_type == "xvpvnc":
- console_dict = server.get_vnc_console(console_type)
- elif console_type == "rdp-html5":
- console_dict = server.get_rdp_console(console_type)
- elif console_type == "spice-html5":
- console_dict = server.get_spice_console(console_type)
- else:
- raise vimconn.VimConnException(
- "console type '{}' not allowed".format(console_type),
- http_code=vimconn.HTTP_Bad_Request,
- )
+ console_dict1 = console_dict.get("console")
- console_dict1 = console_dict.get("console")
+ if console_dict1:
+ console_url = console_dict1.get("url")
- if console_dict1:
- console_url = console_dict1.get("url")
+ if console_url:
+ # parse console_url
+ protocol_index = console_url.find("//")
+ suffix_index = (
+ console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+ )
+ port_index = (
+ console_url[protocol_index + 2 : suffix_index].find(":")
+ + protocol_index
+ + 2
+ )
- if console_url:
- # parse console_url
- protocol_index = console_url.find("//")
- suffix_index = (
- console_url[protocol_index + 2 :].find("/") + protocol_index + 2
- )
- port_index = (
- console_url[protocol_index + 2 : suffix_index].find(":")
- + protocol_index
- + 2
+ if protocol_index < 0 or port_index < 0 or suffix_index < 0:
+ return (
+ -vimconn.HTTP_Internal_Server_Error,
+ "Unexpected response from VIM",
)
- if protocol_index < 0 or port_index < 0 or suffix_index < 0:
- return (
- -vimconn.HTTP_Internal_Server_Error,
- "Unexpected response from VIM",
- )
-
- console_dict = {
- "protocol": console_url[0:protocol_index],
- "server": console_url[protocol_index + 2 : port_index],
- "port": console_url[port_index:suffix_index],
- "suffix": console_url[suffix_index + 1 :],
- }
- protocol_index += 2
+ console_dict = {
+ "protocol": console_url[0:protocol_index],
+ "server": console_url[protocol_index + 2 : port_index],
+ "port": console_url[port_index:suffix_index],
+ "suffix": console_url[suffix_index + 1 :],
+ }
+ protocol_index += 2
- return console_dict
- raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
- except (
- nvExceptions.NotFound,
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- nvExceptions.BadRequest,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ return console_dict
+ raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
"""Neutron delete ports by id.
k_id (str): Port id in the VIM
"""
try:
- port_dict = self.neutron.list_ports()
- existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
-
- if k_id in existing_ports:
- self.neutron.delete_port(k_id)
+ self.neutron.delete_port(k_id)
+ except (neExceptions.ConnectionFailed, ConnectionError) as e:
+ self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+ # If there is connection error, raise.
+ self._format_exception(e)
except Exception as e:
self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+ def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
+ """Cinder delete volume by id.
+ Args:
+ shared_volume_vim_id (str): ID of shared volume in VIM
+ """
+ elapsed_time = 0
+ try:
+ while elapsed_time < server_timeout:
+ vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
+ if vol_status == "available":
+ self.cinder.volumes.delete(shared_volume_vim_id)
+ return True
+
+ time.sleep(5)
+ elapsed_time += 5
+
+ if elapsed_time >= server_timeout:
+ raise vimconn.VimConnException(
+ "Timeout waiting for volume "
+ + shared_volume_vim_id
+ + " to be available",
+ http_code=vimconn.HTTP_Request_Timeout,
+ )
+
+ except Exception as e:
+ self.logger.error(
+ "Error deleting volume: {}: {}".format(type(e).__name__, e)
+ )
+ self._format_exception(e)
+
def _delete_volumes_by_id_wth_cinder(
self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
) -> bool:
"""
try:
if k_id in volumes_to_hold:
- return
+ return False
if self.cinder.volumes.get(k_id).status != "available":
return True
self.cinder.volumes.delete(k_id)
created_items[k] = None
+ except (cExceptions.ConnectionError, ConnectionError) as e:
+ self.logger.error(
+ "Error deleting volume: {}: {}".format(type(e).__name__, e)
+ )
+ self._format_exception(e)
except Exception as e:
self.logger.error(
"Error deleting volume: {}: {}".format(type(e).__name__, e)
self.neutron.delete_floatingip(k_id)
created_items[k] = None
+ except (neExceptions.ConnectionFailed, ConnectionError) as e:
+ self.logger.error(
+ "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
+ )
+ self._format_exception(e)
except Exception as e:
self.logger.error(
"Error deleting floating ip: {}: {}".format(type(e).__name__, e)
if k_item == "port":
self._delete_ports_by_id_wth_neutron(k_id)
+ except (neExceptions.ConnectionFailed, ConnectionError) as e:
+ self.logger.error(
+ "Error deleting port: {}: {}".format(type(e).__name__, e)
+ )
+ self._format_exception(e)
except Exception as e:
self.logger.error(
"Error deleting port: {}: {}".format(type(e).__name__, e)
try:
k_item, k_id = self._get_item_name_id(k)
-
if k_item == "volume":
unavailable_vol = self._delete_volumes_by_id_wth_cinder(
k, k_id, volumes_to_hold, created_items
elif k_item == "floating_ip":
self._delete_floating_ip_by_id(k, k_id, created_items)
- except Exception as e:
- self.logger.error("Error deleting {}: {}".format(k, e))
+ except (
+ cExceptions.ConnectionError,
+ neExceptions.ConnectionFailed,
+ ConnectionError,
+ AttributeError,
+ TypeError,
+ ) as e:
+ self.logger.error("Error deleting {}: {}".format(k, e))
+ self._format_exception(e)
+
+ except Exception as e:
+ self.logger.error("Error deleting {}: {}".format(k, e))
return keep_waiting
if len(key.split(":")) == 2
}
+ @catch_any_exception
def delete_vminstance(
self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
) -> None:
if keep_waiting:
time.sleep(1)
elapsed_time += 1
-
- except (
- nvExceptions.NotFound,
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ except (nvExceptions.NotFound, nvExceptions.ResourceNotFound) as e:
+ # If VM does not exist, it does not raise
+ self.logger.warning(f"Error deleting VM: {vm_id} is not found, {str(e)}")
def refresh_vms_status(self, vm_list):
"""Get the status of the virtual machines and their interfaces/ports
self.logger.debug(
"refresh_vms status: Getting tenant VM instance information from VIM"
)
-
for vm_id in vm_list:
vm = {}
return vm_dict
+ @catch_any_exception
def action_vminstance(self, vm_id, action_dict, created_items={}):
"""Send and action over a VM instance from VIM
Returns None or the console dict if the action was successfully sent to the VIM
"""
self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
-
- try:
- self._reload_connection()
- server = self.nova.servers.find(id=vm_id)
-
- if "start" in action_dict:
- if action_dict["start"] == "rebuild":
- server.rebuild()
- else:
- if server.status == "PAUSED":
- server.unpause()
- elif server.status == "SUSPENDED":
- server.resume()
- elif server.status == "SHUTOFF":
- server.start()
- else:
- self.logger.debug(
- "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
- )
- raise vimconn.VimConnException(
- "Cannot 'start' instance while it is in active state",
- http_code=vimconn.HTTP_Bad_Request,
- )
-
- elif "pause" in action_dict:
- server.pause()
- elif "resume" in action_dict:
- server.resume()
- elif "shutoff" in action_dict or "shutdown" in action_dict:
- self.logger.debug("server status %s", server.status)
- if server.status == "ACTIVE":
- server.stop()
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+ if "start" in action_dict:
+ if action_dict["start"] == "rebuild":
+ server.rebuild()
+ else:
+ if server.status == "PAUSED":
+ server.unpause()
+ elif server.status == "SUSPENDED":
+ server.resume()
+ elif server.status == "SHUTOFF":
+ server.start()
else:
- self.logger.debug("ERROR: VM is not in Active state")
- raise vimconn.VimConnException(
- "VM is not in active state, stop operation is not allowed",
- http_code=vimconn.HTTP_Bad_Request,
+ self.logger.debug(
+ "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
)
- elif "forceOff" in action_dict:
- server.stop() # TODO
- elif "terminate" in action_dict:
- server.delete()
- elif "createImage" in action_dict:
- server.create_image()
- # "path":path_schema,
- # "description":description_schema,
- # "name":name_schema,
- # "metadata":metadata_schema,
- # "imageRef": id_schema,
- # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
- elif "rebuild" in action_dict:
- server.rebuild(server.image["id"])
- elif "reboot" in action_dict:
- server.reboot() # reboot_type="SOFT"
- elif "console" in action_dict:
- console_type = action_dict["console"]
-
- if console_type is None or console_type == "novnc":
- console_dict = server.get_vnc_console("novnc")
- elif console_type == "xvpvnc":
- console_dict = server.get_vnc_console(console_type)
- elif console_type == "rdp-html5":
- console_dict = server.get_rdp_console(console_type)
- elif console_type == "spice-html5":
- console_dict = server.get_spice_console(console_type)
- else:
raise vimconn.VimConnException(
- "console type '{}' not allowed".format(console_type),
+ "Cannot 'start' instance while it is in active state",
http_code=vimconn.HTTP_Bad_Request,
)
+ elif "pause" in action_dict:
+ server.pause()
+ elif "resume" in action_dict:
+ server.resume()
+ elif "shutoff" in action_dict or "shutdown" in action_dict:
+ self.logger.debug("server status %s", server.status)
+ if server.status == "ACTIVE":
+ server.stop()
+ else:
+ self.logger.debug("ERROR: VM is not in Active state")
+ raise vimconn.VimConnException(
+ "VM is not in active state, stop operation is not allowed",
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ elif "forceOff" in action_dict:
+ server.stop() # TODO
+ elif "terminate" in action_dict:
+ server.delete()
+ elif "createImage" in action_dict:
+ server.create_image()
+ # "path":path_schema,
+ # "description":description_schema,
+ # "name":name_schema,
+ # "metadata":metadata_schema,
+ # "imageRef": id_schema,
+ # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
+ elif "rebuild" in action_dict:
+ server.rebuild(server.image["id"])
+ elif "reboot" in action_dict:
+ server.reboot() # reboot_type="SOFT"
+ elif "console" in action_dict:
+ console_type = action_dict["console"]
- try:
- console_url = console_dict["console"]["url"]
- # parse console_url
- protocol_index = console_url.find("//")
- suffix_index = (
- console_url[protocol_index + 2 :].find("/") + protocol_index + 2
- )
- port_index = (
- console_url[protocol_index + 2 : suffix_index].find(":")
- + protocol_index
- + 2
- )
-
- if protocol_index < 0 or port_index < 0 or suffix_index < 0:
- raise vimconn.VimConnException(
- "Unexpected response from VIM " + str(console_dict)
- )
+ if console_type is None or console_type == "novnc":
+ console_dict = server.get_vnc_console("novnc")
+ elif console_type == "xvpvnc":
+ console_dict = server.get_vnc_console(console_type)
+ elif console_type == "rdp-html5":
+ console_dict = server.get_rdp_console(console_type)
+ elif console_type == "spice-html5":
+ console_dict = server.get_spice_console(console_type)
+ else:
+ raise vimconn.VimConnException(
+ "console type '{}' not allowed".format(console_type),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
- console_dict2 = {
- "protocol": console_url[0:protocol_index],
- "server": console_url[protocol_index + 2 : port_index],
- "port": int(console_url[port_index + 1 : suffix_index]),
- "suffix": console_url[suffix_index + 1 :],
- }
+ try:
+ console_url = console_dict["console"]["url"]
+ # parse console_url
+ protocol_index = console_url.find("//")
+ suffix_index = (
+ console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+ )
+ port_index = (
+ console_url[protocol_index + 2 : suffix_index].find(":")
+ + protocol_index
+ + 2
+ )
- return console_dict2
- except Exception:
+ if protocol_index < 0 or port_index < 0 or suffix_index < 0:
raise vimconn.VimConnException(
"Unexpected response from VIM " + str(console_dict)
)
- return None
- except (
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- nvExceptions.NotFound,
- ConnectionError,
- ) as e:
- self._format_exception(e)
- # TODO insert exception vimconn.HTTP_Unauthorized
+ console_dict2 = {
+ "protocol": console_url[0:protocol_index],
+ "server": console_url[protocol_index + 2 : port_index],
+ "port": int(console_url[port_index + 1 : suffix_index]),
+ "suffix": console_url[suffix_index + 1 :],
+ }
+
+ return console_dict2
+ except Exception:
+ raise vimconn.VimConnException(
+ "Unexpected response from VIM " + str(console_dict)
+ )
+
+ return None
# ###### VIO Specific Changes #########
def _generate_vlanID(self):
return error_value, error_text
- def new_affinity_group(self, affinity_group_data):
- """Adds a server group to VIM
- affinity_group_data contains a dictionary with information, keys:
- name: name in VIM for the server group
- type: affinity or anti-affinity
- scope: Only nfvi-node allowed
- Returns the server group identifier"""
- self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+ def new_classification(self, name, ctype, definition):
+ self.logger.debug(
+ "Adding a new (Traffic) Classification to VIM, named %s", name
+ )
try:
- name = affinity_group_data["name"]
- policy = affinity_group_data["type"]
+ new_class = None
+ self._reload_connection()
+ if ctype not in supportedClassificationTypes:
+ raise vimconn.VimConnNotSupportedException(
+ "OpenStack VIM connector does not support provided "
+ "Classification Type {}, supported ones are: {}".format(
+ ctype, supportedClassificationTypes
+ )
+ )
+
+ if not self._validate_classification(ctype, definition):
+ raise vimconn.VimConnException(
+ "Incorrect Classification definition for the type specified."
+ )
+
+ classification_dict = definition
+ classification_dict["name"] = name
+
+ self.logger.info(
+ "Adding a new (Traffic) Classification to VIM, named {} and {}.".format(
+ name, classification_dict
+ )
+ )
+ new_class = self.neutron.create_sfc_flow_classifier(
+ {"flow_classifier": classification_dict}
+ )
+
+ return new_class["flow_classifier"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self.logger.error("Creation of Classification failed.")
+ self._format_exception(e)
+
+ def get_classification(self, class_id):
+ self.logger.debug(" Getting Classification %s from VIM", class_id)
+ filter_dict = {"id": class_id}
+ class_list = self.get_classification_list(filter_dict)
+
+ if len(class_list) == 0:
+ raise vimconn.VimConnNotFoundException(
+ "Classification '{}' not found".format(class_id)
+ )
+ elif len(class_list) > 1:
+ raise vimconn.VimConnConflictException(
+ "Found more than one Classification with this criteria"
+ )
+
+ classification = class_list[0]
+
+ return classification
+
+ def get_classification_list(self, filter_dict={}):
+ self.logger.debug(
+ "Getting Classifications from VIM filter: '%s'", str(filter_dict)
+ )
+
+ try:
+ filter_dict_os = filter_dict.copy()
self._reload_connection()
- new_server_group = self.nova.server_groups.create(name, policy)
- return new_server_group.id
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+ classification_dict = self.neutron.list_sfc_flow_classifiers(
+ **filter_dict_os
+ )
+ classification_list = classification_dict["flow_classifiers"]
+ self.__classification_os2mano(classification_list)
+
+ return classification_list
except (
+ neExceptions.ConnectionFailed,
ksExceptions.ClientException,
- nvExceptions.ClientException,
+ neExceptions.NeutronException,
ConnectionError,
- KeyError,
) as e:
self._format_exception(e)
- def get_affinity_group(self, affinity_group_id):
- """Obtain server group details from the VIM. Returns the server group detais as a dict"""
- self.logger.debug("Getting flavor '%s'", affinity_group_id)
+ def delete_classification(self, class_id):
+ self.logger.debug("Deleting Classification '%s' from VIM", class_id)
+
try:
self._reload_connection()
- server_group = self.nova.server_groups.find(id=affinity_group_id)
+ self.neutron.delete_sfc_flow_classifier(class_id)
- return server_group.to_dict()
+ return class_id
except (
- nvExceptions.NotFound,
- nvExceptions.ClientException,
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
ksExceptions.ClientException,
+ neExceptions.NeutronException,
ConnectionError,
) as e:
self._format_exception(e)
- def delete_affinity_group(self, affinity_group_id):
- """Deletes a server group from the VIM. Returns the old affinity_group_id"""
- self.logger.debug("Getting server group '%s'", affinity_group_id)
+ def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
+ self.logger.debug(
+ "Adding a new Service Function Instance to VIM, named '%s'", name
+ )
+
try:
+ new_sfi = None
self._reload_connection()
- self.nova.server_groups.delete(affinity_group_id)
+ correlation = None
+
+ if sfc_encap:
+ correlation = "nsh"
+
+ if len(ingress_ports) != 1:
+ raise vimconn.VimConnNotSupportedException(
+ "OpenStack VIM connector can only have 1 ingress port per SFI"
+ )
+
+ if len(egress_ports) != 1:
+ raise vimconn.VimConnNotSupportedException(
+ "OpenStack VIM connector can only have 1 egress port per SFI"
+ )
+
+ sfi_dict = {
+ "name": name,
+ "ingress": ingress_ports[0],
+ "egress": egress_ports[0],
+ "service_function_parameters": {"correlation": correlation},
+ }
+ self.logger.info("Adding a new SFI to VIM, {}.".format(sfi_dict))
+ new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
- return affinity_group_id
+ return new_sfi["port_pair"]["id"]
except (
- nvExceptions.NotFound,
+ neExceptions.ConnectionFailed,
ksExceptions.ClientException,
- nvExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ if new_sfi:
+ try:
+ self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
+ except Exception:
+ self.logger.error(
+ "Creation of Service Function Instance failed, with "
+ "subsequent deletion failure as well."
+ )
+
+ self._format_exception(e)
+
+ def get_sfi(self, sfi_id):
+ self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
+ filter_dict = {"id": sfi_id}
+ sfi_list = self.get_sfi_list(filter_dict)
+
+ if len(sfi_list) == 0:
+ raise vimconn.VimConnNotFoundException(
+ "Service Function Instance '{}' not found".format(sfi_id)
+ )
+ elif len(sfi_list) > 1:
+ raise vimconn.VimConnConflictException(
+ "Found more than one Service Function Instance with this criteria"
+ )
+
+ sfi = sfi_list[0]
+
+ return sfi
+
+ def get_sfi_list(self, filter_dict={}):
+ self.logger.debug(
+ "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
+ )
+
+ try:
+ self._reload_connection()
+ filter_dict_os = filter_dict.copy()
+
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+ sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
+ sfi_list = sfi_dict["port_pairs"]
+ self.__sfi_os2mano(sfi_list)
+
+ return sfi_list
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def delete_sfi(self, sfi_id):
+ self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
+
+ try:
+ self._reload_connection()
+ self.neutron.delete_sfc_port_pair(sfi_id)
+
+ return sfi_id
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def new_sf(self, name, sfis, sfc_encap=True):
+ self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
+
+ new_sf = None
+
+ try:
+ self._reload_connection()
+
+ for instance in sfis:
+ sfi = self.get_sfi(instance)
+
+ if sfi.get("sfc_encap") != sfc_encap:
+ raise vimconn.VimConnNotSupportedException(
+ "OpenStack VIM connector requires all SFIs of the "
+ "same SF to share the same SFC Encapsulation"
+ )
+
+ sf_dict = {"name": name, "port_pairs": sfis}
+
+ self.logger.info("Adding a new SF to VIM, {}.".format(sf_dict))
+ new_sf = self.neutron.create_sfc_port_pair_group(
+ {"port_pair_group": sf_dict}
+ )
+
+ return new_sf["port_pair_group"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ if new_sf:
+ try:
+ new_sf_id = new_sf.get("port_pair_group").get("id")
+ self.neutron.delete_sfc_port_pair_group(new_sf_id)
+ except Exception:
+ self.logger.error(
+ "Creation of Service Function failed, with "
+ "subsequent deletion failure as well."
+ )
+
+ self._format_exception(e)
+
+ def get_sf(self, sf_id):
+ self.logger.debug("Getting Service Function %s from VIM", sf_id)
+ filter_dict = {"id": sf_id}
+ sf_list = self.get_sf_list(filter_dict)
+
+ if len(sf_list) == 0:
+ raise vimconn.VimConnNotFoundException(
+ "Service Function '{}' not found".format(sf_id)
+ )
+ elif len(sf_list) > 1:
+ raise vimconn.VimConnConflictException(
+ "Found more than one Service Function with this criteria"
+ )
+
+ sf = sf_list[0]
+
+ return sf
+
+ def get_sf_list(self, filter_dict={}):
+ self.logger.debug(
+ "Getting Service Function from VIM filter: '%s'", str(filter_dict)
+ )
+
+ try:
+ self._reload_connection()
+ filter_dict_os = filter_dict.copy()
+
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+ sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
+ sf_list = sf_dict["port_pair_groups"]
+ self.__sf_os2mano(sf_list)
+
+ return sf_list
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def delete_sf(self, sf_id):
+ self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
+
+ try:
+ self._reload_connection()
+ self.neutron.delete_sfc_port_pair_group(sf_id)
+
+ return sf_id
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
+ self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
+
+ new_sfp = None
+
+ try:
+ self._reload_connection()
+ # In networking-sfc the MPLS encapsulation is legacy
+ # should be used when no full SFC Encapsulation is intended
+ correlation = "mpls"
+
+ if sfc_encap:
+ correlation = "nsh"
+
+ sfp_dict = {
+ "name": name,
+ "flow_classifiers": classifications,
+ "port_pair_groups": sfs,
+ "chain_parameters": {"correlation": correlation},
+ }
+
+ if spi:
+ sfp_dict["chain_id"] = spi
+
+ self.logger.info("Adding a new SFP to VIM, {}.".format(sfp_dict))
+ new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
+
+ return new_sfp["port_chain"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ if new_sfp:
+ try:
+ new_sfp_id = new_sfp.get("port_chain").get("id")
+ self.neutron.delete_sfc_port_chain(new_sfp_id)
+ except Exception:
+ self.logger.error(
+ "Creation of Service Function Path failed, with "
+ "subsequent deletion failure as well."
+ )
+
+ self._format_exception(e)
+
+ def get_sfp(self, sfp_id):
+ self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
+
+ filter_dict = {"id": sfp_id}
+ sfp_list = self.get_sfp_list(filter_dict)
+
+ if len(sfp_list) == 0:
+ raise vimconn.VimConnNotFoundException(
+ "Service Function Path '{}' not found".format(sfp_id)
+ )
+ elif len(sfp_list) > 1:
+ raise vimconn.VimConnConflictException(
+ "Found more than one Service Function Path with this criteria"
+ )
+
+ sfp = sfp_list[0]
+
+ return sfp
+
+ def get_sfp_list(self, filter_dict={}):
+ self.logger.debug(
+ "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
+ )
+
+ try:
+ self._reload_connection()
+ filter_dict_os = filter_dict.copy()
+
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+ sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
+ sfp_list = sfp_dict["port_chains"]
+ self.__sfp_os2mano(sfp_list)
+
+ return sfp_list
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def delete_sfp(self, sfp_id):
+ self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
+
+ try:
+ self._reload_connection()
+ self.neutron.delete_sfc_port_chain(sfp_id)
+
+ return sfp_id
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
ConnectionError,
) as e:
self._format_exception(e)
- def get_vdu_state(self, vm_id):
+ def refresh_sfps_status(self, sfp_list):
+ """Get the status of the service function path
+ Params: the list of sfp identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function path
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
"""
- Getting the state of a vdu
- param:
- vm_id: ID of an instance
+ sfp_dict = {}
+ self.logger.debug(
+ "refresh_sfps status: Getting tenant SFP information from VIM"
+ )
+
+ for sfp_id in sfp_list:
+ sfp = {}
+
+ try:
+ sfp_vim = self.get_sfp(sfp_id)
+
+ if sfp_vim["spi"]:
+ sfp["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sfp["status"] = "OTHER"
+ sfp["error_msg"] = "VIM status reported " + sfp["status"]
+
+ sfp["vim_info"] = self.serialize(sfp_vim)
+
+ if sfp_vim.get("fault"):
+ sfp["error_msg"] = str(sfp_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sfp status: %s", str(e))
+ sfp["status"] = "DELETED"
+ sfp["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sfp status: %s", str(e))
+ sfp["status"] = "VIM_ERROR"
+ sfp["error_msg"] = str(e)
+
+ sfp_dict[sfp_id] = sfp
+
+ return sfp_dict
+
+ def refresh_sfis_status(self, sfi_list):
+ """Get the status of the service function instances
+ Params: the list of sfi identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function instance
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ sfi_dict = {}
+ self.logger.debug(
+ "refresh_sfis status: Getting tenant sfi information from VIM"
+ )
+
+ for sfi_id in sfi_list:
+ sfi = {}
+
+ try:
+ sfi_vim = self.get_sfi(sfi_id)
+
+ if sfi_vim:
+ sfi["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sfi["status"] = "OTHER"
+ sfi["error_msg"] = "VIM status reported " + sfi["status"]
+
+ sfi["vim_info"] = self.serialize(sfi_vim)
+
+ if sfi_vim.get("fault"):
+ sfi["error_msg"] = str(sfi_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sfi status: %s", str(e))
+ sfi["status"] = "DELETED"
+ sfi["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sfi status: %s", str(e))
+ sfi["status"] = "VIM_ERROR"
+ sfi["error_msg"] = str(e)
+
+ sfi_dict[sfi_id] = sfi
+
+ return sfi_dict
+
+ def refresh_sfs_status(self, sf_list):
+ """Get the status of the service functions
+ Params: the list of sf identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ sf_dict = {}
+ self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
+
+ for sf_id in sf_list:
+ sf = {}
+
+ try:
+ sf_vim = self.get_sf(sf_id)
+
+ if sf_vim:
+ sf["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sf["status"] = "OTHER"
+ sf["error_msg"] = "VIM status reported " + sf_vim["status"]
+
+ sf["vim_info"] = self.serialize(sf_vim)
+
+ if sf_vim.get("fault"):
+ sf["error_msg"] = str(sf_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sf status: %s", str(e))
+ sf["status"] = "DELETED"
+ sf["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sf status: %s", str(e))
+ sf["status"] = "VIM_ERROR"
+ sf["error_msg"] = str(e)
+
+ sf_dict[sf_id] = sf
+
+ return sf_dict
+
+ def refresh_classifications_status(self, classification_list):
+ """Get the status of the classifications
+ Params: the list of classification identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this classifier
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ classification_dict = {}
+ self.logger.debug(
+ "refresh_classifications status: Getting tenant classification information from VIM"
+ )
+
+ for classification_id in classification_list:
+ classification = {}
+
+ try:
+ classification_vim = self.get_classification(classification_id)
+
+ if classification_vim:
+ classification["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ classification["status"] = "OTHER"
+ classification["error_msg"] = (
+ "VIM status reported " + classification["status"]
+ )
+
+ classification["vim_info"] = self.serialize(classification_vim)
+
+ if classification_vim.get("fault"):
+ classification["error_msg"] = str(classification_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting classification status: %s", str(e))
+ classification["status"] = "DELETED"
+ classification["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting classification status: %s", str(e))
+ classification["status"] = "VIM_ERROR"
+ classification["error_msg"] = str(e)
+
+ classification_dict[classification_id] = classification
+
+ return classification_dict
+
+ @catch_any_exception
+ def new_affinity_group(self, affinity_group_data):
+ """Adds a server group to VIM
+ affinity_group_data contains a dictionary with information, keys:
+ name: name in VIM for the server group
+ type: affinity or anti-affinity
+ scope: Only nfvi-node allowed
+ Returns the server group identifier"""
+ self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+ name = affinity_group_data["name"]
+ policy = affinity_group_data["type"]
+ self._reload_connection()
+ new_server_group = self.nova.server_groups.create(name, policy)
+ return new_server_group.id
+
+ @catch_any_exception
+ def get_affinity_group(self, affinity_group_id):
+ """Obtain server group details from the VIM. Returns the server group detais as a dict"""
+ self.logger.debug("Getting flavor '%s'", affinity_group_id)
+ self._reload_connection()
+ server_group = self.nova.server_groups.find(id=affinity_group_id)
+ return server_group.to_dict()
+
+ @catch_any_exception
+ def delete_affinity_group(self, affinity_group_id):
+ """Deletes a server group from the VIM. Returns the old affinity_group_id"""
+ self.logger.debug("Getting server group '%s'", affinity_group_id)
+ self._reload_connection()
+ self.nova.server_groups.delete(affinity_group_id)
+ return affinity_group_id
+
+ @catch_any_exception
+ def get_vdu_state(self, vm_id, host_is_required=False) -> list:
+ """Getting the state of a VDU.
+ Args:
+ vm_id (str): ID of an instance
+ host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
+ and if this is set to True, it raises KeyError.
+ Returns:
+ vdu_data (list): VDU details including state, flavor, host_info, AZ
"""
self.logger.debug("Getting the status of VM")
self.logger.debug("VIM VM ID %s", vm_id)
self._reload_connection()
- server = self.nova.servers.find(id=vm_id)
- server_dict = server.to_dict()
+ server_dict = self._find_nova_server(vm_id)
+ srv_attr = "OS-EXT-SRV-ATTR:host"
+ host_info = (
+ server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
+ )
vdu_data = [
server_dict["status"],
server_dict["flavor"]["id"],
- server_dict["OS-EXT-SRV-ATTR:host"],
+ host_info,
server_dict["OS-EXT-AZ:availability_zone"],
]
self.logger.debug("vdu_data %s", vdu_data)
az_check["zone_check"] = True
return az_check
+ @catch_any_exception
def migrate_instance(self, vm_id, compute_host=None):
"""
Migrate a vdu
"""
self._reload_connection()
vm_state = False
- instance_state = self.get_vdu_state(vm_id)
+ instance_state = self.get_vdu_state(vm_id, host_is_required=True)
server_flavor_id = instance_state[1]
server_hypervisor_name = instance_state[2]
server_availability_zone = instance_state[3]
- try:
- server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
- server_flavor_details = [
- server_flavor["ram"],
- server_flavor["disk"],
- server_flavor["vcpus"],
- ]
- if compute_host == server_hypervisor_name:
- raise vimconn.VimConnException(
- "Unable to migrate instance '{}' to the same host '{}'".format(
- vm_id, compute_host
- ),
- http_code=vimconn.HTTP_Bad_Request,
- )
- az_status = self.check_availability_zone(
- server_availability_zone,
- server_flavor_details,
- server_hypervisor_name,
- compute_host,
+ server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+ server_flavor_details = [
+ server_flavor["ram"],
+ server_flavor["disk"],
+ server_flavor["vcpus"],
+ ]
+ if compute_host == server_hypervisor_name:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to the same host '{}'".format(
+ vm_id, compute_host
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
)
- availability_zone_check = az_status["zone_check"]
- available_compute_id = az_status.get("compute_availability")
+ az_status = self.check_availability_zone(
+ server_availability_zone,
+ server_flavor_details,
+ server_hypervisor_name,
+ compute_host,
+ )
+ availability_zone_check = az_status["zone_check"]
+ available_compute_id = az_status.get("compute_availability")
- if availability_zone_check is False:
- raise vimconn.VimConnException(
- "Unable to migrate instance '{}' to a different availability zone".format(
- vm_id
- ),
- http_code=vimconn.HTTP_Bad_Request,
- )
- if available_compute_id is not None:
- self.nova.servers.live_migrate(
- server=vm_id,
- host=available_compute_id,
- block_migration=True,
- disk_over_commit=False,
- )
- state = "MIGRATING"
- changed_compute_host = ""
- if state == "MIGRATING":
- vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
- changed_compute_host = self.get_vdu_state(vm_id)[2]
- if vm_state and changed_compute_host == available_compute_id:
- self.logger.debug(
- "Instance '{}' migrated to the new compute host '{}'".format(
- vm_id, changed_compute_host
- )
- )
- return state, available_compute_id
- else:
- raise vimconn.VimConnException(
- "Migration Failed. Instance '{}' not moved to the new host {}".format(
- vm_id, available_compute_id
- ),
- http_code=vimconn.HTTP_Bad_Request,
+ if availability_zone_check is False:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to a different availability zone".format(
+ vm_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ if available_compute_id is not None:
+ # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
+ self.nova.servers.live_migrate(
+ server=vm_id,
+ host=available_compute_id,
+ block_migration=True,
+ )
+ state = "MIGRATING"
+ changed_compute_host = ""
+ if state == "MIGRATING":
+ vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+ changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[
+ 2
+ ]
+ if vm_state and changed_compute_host == available_compute_id:
+ self.logger.debug(
+ "Instance '{}' migrated to the new compute host '{}'".format(
+ vm_id, changed_compute_host
)
+ )
+ return state, available_compute_id
else:
raise vimconn.VimConnException(
- "Compute '{}' not available or does not have enough resources to migrate the instance".format(
- available_compute_id
+ "Migration Failed. Instance '{}' not moved to the new host {}".format(
+ vm_id, available_compute_id
),
http_code=vimconn.HTTP_Bad_Request,
)
- except (
- nvExceptions.BadRequest,
- nvExceptions.ClientException,
- nvExceptions.NotFound,
- ) as e:
- self._format_exception(e)
+ else:
+ raise vimconn.VimConnException(
+ "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+ available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ @catch_any_exception
def resize_instance(self, vm_id, new_flavor_id):
"""
For resizing the vm based on the given
instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
- try:
- if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
- if old_flavor_disk > new_flavor_disk:
+ if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+ if old_flavor_disk > new_flavor_disk:
+ raise nvExceptions.BadRequest(
+ 400,
+ message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+ )
+ else:
+ self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+ vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+ if vm_state:
+ instance_resized_status = self.confirm_resize(vm_id)
+ return instance_resized_status
+ else:
raise nvExceptions.BadRequest(
- 400,
- message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+ 409,
+ message="Cannot 'resize' vm_state is in ERROR",
)
- else:
- self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
- vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
- if vm_state:
- instance_resized_status = self.confirm_resize(vm_id)
- return instance_resized_status
- else:
- raise nvExceptions.BadRequest(
- 409,
- message="Cannot 'resize' vm_state is in ERROR",
- )
- else:
- self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
- raise nvExceptions.BadRequest(
- 409,
- message="Cannot 'resize' instance while it is in vm_state resized",
- )
- except (
- nvExceptions.BadRequest,
- nvExceptions.ClientException,
- nvExceptions.NotFound,
- ) as e:
- self._format_exception(e)
+ else:
+ self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+ raise nvExceptions.BadRequest(
+ 409,
+ message="Cannot 'resize' instance while it is in vm_state resized",
+ )
def confirm_resize(self, vm_id):
"""
self.logger.debug("Getting servers and ports data from Openstack VIMs.")
self._reload_connection()
all_servers = self.nova.servers.list(detailed=True)
+ try:
+ for server in all_servers:
+ if server.flavor.get("original_name"):
+ server.flavor["id"] = self.nova.flavors.find(
+ name=server.flavor["original_name"]
+ ).id
+ except nClient.exceptions.NotFound as e:
+ self.logger.warning(str(e.message))
all_ports = self.neutron.list_ports()
return all_servers, all_ports
- except (
- vimconn.VimConnException,
- vimconn.VimConnNotFoundException,
- vimconn.VimConnConnectionException,
- ) as e:
+ except Exception as e:
raise vimconn.VimConnException(
f"Exception in monitoring while getting VMs and ports status: {str(e)}"
)
"x-vcloud-authorization"
],
}
- headers[
- "Content-Type"
- ] = "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"
+ headers["Content-Type"] = (
+ "application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"
+ )
response = self.perform_request(
req_type="POST", url=catalog_href, headers=headers, data=data
).group(1)
# cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>', result).group(1)
- headers[
- "Content-Type"
- ] = "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
+ headers["Content-Type"] = (
+ "application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"
+ )
vdc_id = vdc.get("id").split(":")[-1]
instantiate_vapp_href = (
"{}/api/vdc/{}/action/instantiateVAppTemplate".format(
child.attrib["type"]
== "application/vnd.vmware.vcloud.orgNetwork+xml"
):
- network_list[
- child.attrib["href"].split("/")[-1:][0]
- ] = child.attrib["name"]
+ network_list[child.attrib["href"].split("/")[-1:][0]] = (
+ child.attrib["name"]
+ )
org_dict["networks"] = network_list
if child.attrib["type"] == "application/vnd.vmware.vcloud.catalog+xml":
- catalog_list[
- child.attrib["href"].split("/")[-1:][0]
- ] = child.attrib["name"]
+ catalog_list[child.attrib["href"].split("/")[-1:][0]] = (
+ child.attrib["name"]
+ )
org_dict["catalogs"] = catalog_list
except Exception:
pass
for configuration in child.iter():
tagKey = configuration.tag.split("}")[1].strip()
if tagKey != "":
- network_configuration[
- tagKey
- ] = configuration.text.strip()
+ network_configuration[tagKey] = (
+ configuration.text.strip()
+ )
except Exception as exp:
self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
</InstantiateVdcTemplateParams>""".format(
vdc_name, vdc_template_ref
)
- headers[
- "Content-Type"
- ] = "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
+ headers["Content-Type"] = (
+ "application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"
+ )
response = self.perform_request(
req_type="POST", url=vm_list_rest_call, headers=headers, data=data
)
ip_ranges = scope.getchildren()
for ipblock in ip_ranges:
for block in ipblock:
- parsed_respond[
- block.tag.split("}")[1]
- ] = block.text
+ parsed_respond[block.tag.split("}")[1]] = (
+ block.text
+ )
else:
parsed_respond[tag_key] = scope.text
if link.attrib["rel"] == "edit" and link.attrib[
"href"
].endswith("/disks"):
- vm_virtual_hardware_info[
- "disk_edit_href"
- ] = link.attrib["href"]
+ vm_virtual_hardware_info["disk_edit_href"] = (
+ link.attrib["href"]
+ )
break
parsed_respond["vm_virtual_hardware"] = vm_virtual_hardware_info
)
# Send PUT request to modify disk size
- headers[
- "Content-Type"
- ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
+ headers["Content-Type"] = (
+ "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
+ )
response = self.perform_request(
req_type="PUT", url=disk_href, headers=headers, data=data
].spec.ip.ipAddress
for device in vm_obj.config.hardware.device:
- if type(device) == vim.vm.device.VirtualPCIPassthrough:
+ if device.isinstance(vim.vm.device.VirtualPCIPassthrough):
device_details = {
"devide_id": device.backing.id,
"pciSlotNumber": device.slotInfo.pciSlotNumber,
}
- vm_pci_devices_info[
- device.deviceInfo.label
- ] = device_details
+ vm_pci_devices_info[device.deviceInfo.label] = (
+ device_details
+ )
else:
self.logger.error(
"Can not connect to vCenter while getting "
)
data = response.text
- headers[
- "Content-Type"
- ] = "application/vnd.vmware.vcloud.networkConfigSection+xml"
+ headers["Content-Type"] = (
+ "application/vnd.vmware.vcloud.networkConfigSection+xml"
+ )
net_id = self.get_network_id_by_name(net_name)
if not net_id:
raise vimconn.VimConnException(
data = response.text
data = data.split('<Link rel="edit"')[0]
- headers[
- "Content-Type"
- ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
+ headers["Content-Type"] = (
+ "application/vnd.vmware.vcloud.networkConnectionSection+xml"
+ )
newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
data = data + new_item + "</NetworkConnectionSection>"
- headers[
- "Content-Type"
- ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
+ headers["Content-Type"] = (
+ "application/vnd.vmware.vcloud.networkConnectionSection+xml"
+ )
response = self.perform_request(
req_type="PUT", url=url_rest_call, headers=headers, data=data
data = data + new_item + "</NetworkConnectionSection>"
- headers[
- "Content-Type"
- ] = "application/vnd.vmware.vcloud.networkConnectionSection+xml"
+ headers["Content-Type"] = (
+ "application/vnd.vmware.vcloud.networkConnectionSection+xml"
+ )
response = self.perform_request(
req_type="PUT", url=url_rest_call, headers=headers, data=data
)
# Send PUT request to modify virtual hardware section with new disk
- headers[
- "Content-Type"
- ] = "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
+ headers["Content-Type"] = (
+ "application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1"
+ )
response = self.perform_request(
req_type="PUT", url=disk_href, data=new_data, headers=headers
for vms in vapp.get_all_vms():
vm_id = vms.get("id").split(":")[-1]
- headers[
- "Content-Type"
- ] = "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"
+ headers["Content-Type"] = (
+ "application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"
+ )
rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(
self.url, vm_id
)
from copy import deepcopy
import logging
-from random import randrange
+from random import SystemRandom
from uuid import uuid4
from osm_ro_plugin import vimconn
if iface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH") and self.config.get(
"sdn-port-mapping"
):
- compute_index = randrange(len(self.config["sdn-port-mapping"]))
- port_index = randrange(
+ compute_index = SystemRandom().randrange(
+ len(self.config["sdn-port-mapping"])
+ )
+ port_index = SystemRandom().randrange(
len(self.config["sdn-port-mapping"][compute_index]["ports"])
)
interface["compute_node"] = self.config["sdn-port-mapping"][
class VimConnNotImplemented(VimConnException):
- """The method is not implemented by the connected"""
+ """The method is not implemented by the connector"""
def __init__(self, message, http_code=HTTP_Not_Implemented):
VimConnException.__init__(self, message, http_code)
+class VimConnInsufficientCredentials(VimConnException):
+ """The VIM account does not have efficient permissions to perform the requested operation."""
+
+ def __init__(self, message, http_code=HTTP_Unauthorized):
+ VimConnException.__init__(self, message, http_code)
+
+
class VimConnector:
"""Abstract base class for all the VIM connector plugins
These plugins must implement a VimConnector class derived from this
rm -rf dists
mkdir -p pool/RO
mv deb_dist/*.deb pool/RO/
-mkdir -p dists/unstable/RO/binary-amd64/
-apt-ftparchive packages pool/RO > dists/unstable/RO/binary-amd64/Packages
-gzip -9fk dists/unstable/RO/binary-amd64/Packages
+
dist_ro_sdn_odl_of
dist_ro_sdn_onos_of
dist_ro_sdn_onos_vpls
+dist_ro_sdn_tapi
dist_ro_vim_aws
dist_ro_vim_azure
dist_ro_vim_openstack
dist_ro_vim_gcp"
TOX_ENV_LIST="$(echo $PACKAGES | sed "s/ /,/g")"
-PROCESSES=$(expr `nproc --a` / 2)
TOX_PARALLEL_NO_SPINNER=1 tox -e $TOX_ENV_LIST
cp ${vim_plugin}/deb_dist/python3-osm-rovim*.deb deb_dist/
done
-# SDN plugins: DynPac, Ietfl2vpn, Onosof Floodlightof
+# SDN plugins: DynPac, Ietfl2vpn, Onosof Floodlightof, Transport API (TAPI)
for sdn_plugin in RO-SDN-*
do
cp ${sdn_plugin}/deb_dist/python3-osm-rosdn*.deb deb_dist/
# limitations under the License.
set -e
+echo "Skipping the check of the release notes ..."
+exit 0
+
echo "Checking the presence of release notes ..."
nb_rn=$(git diff --diff-filter=A --name-only HEAD~1 |grep "releasenotes\/notes" |wc -l)
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+security:
+ - |
+ Coverity fix for CWE 330: Use of Insufficiently Random Values
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+security:
+ - |
+ Coverity fix for CWE 688: Function Call With Incorrect Variable or Reference as Argument
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+security:
+ - |
+ Coverity fix for issue CWE 476: NULL Pointer Dereference
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix Bug 2098 - Get VDUs from VNFR when Heal op has no additionalPrameters.
+ With this fix, when Heal is requested without vdu or count-index parameters
+ RO will recreate all VDUs from VNFR
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix Bug 2272 - NS instantiation fails in VIMs with microversion with a KeyError 'original_name'.
+ With this fix, now Key `original_name` is only used when working with the newer version of the
+ nova API.
\ No newline at end of file
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+ - |
+ Adding rockcraft.yaml for RO.
+ Runs with Python 3.10 using Ubuntu22.04 base image.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Bug Fix 2304 Flavor Not getting Updated in Mongodb nsrs after Vertical scaling,
+ because, in vertical scale feature, Ro task creation is not added for flavor.
+ This fix will add the new flavor in nsrs after scaling and will addits reference
+ in vnfrs.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+issues:
+ - |
+ Change Nova api microversion from 2.63 back to 2.1.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 10937: Transport API (TAPI) WIM connector for RO
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 10972: Support of volume multi-attach
+ Support of volume multi-attach for Openstack based VIMs (server groups)
\ No newline at end of file
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 10975: Get flavor-id from additionalParams if specified.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 10978: Add support of ipv6_address_mode and ipv6_ra_mode to openstack connector.
\ No newline at end of file
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 10979 - Static IPv6 Dual Stack IP Assignment for Openstack VIM
+ This feature enables assigning static IPv6 assignment to VNFs to enable
+ dual stack IP assignment.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 10980: Service Function Chaining
+ This feature will add the functionality to create the service function chaining in the OpenStack from OSM.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+features:
+ - |
+ Feature 11003 - AZ for Cinder
+ Currently the creation of volumes with cinder uses the same
+ availability zone of Nova. In some cases, a different AZ may
+ be required in order to select a different storage domain.
\ No newline at end of file
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fixing Bug 2019: This patch deletes the hosts in ONOS associated to a VPLS service.
+ Hosts are identified by its MAC and VLAN.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix Bug 2109
+
+ Fix VIM info DB update after vertical scaling, migrate and operate VNF.
+ Fix the path of the VDU record to update.
+ Fix unexpected keyword argument 'disk_over_commit' in live_migrate method because of Nova microversion change (Bug 2262)
+ Handle the OS-EXT-SRV-ATTR:host KeyError as this key is raised if the VIM account does not have admin credentials
+ Expand unit tests for vertical_scale, migrate and operate task creations
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix Bug 2158 If VIM is not reachable NS delete operation should fail and Bug 2254
+ VNF scale up operation should fail if operation is not completed by arranging exceptions
+
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix Bug 2258 Setting vim_status as ACTIVE if an operation is successful.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix bug 2268: Authentication error when instantiating NS on GCP.
+ There was a bug in _reload_connection method of GCP connector which is
+ called when trying to reconnect after a previous error: the credentials
+ parameter was missing in googleapiclient.discovery.build() call.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix Bug 2268
+
+ Revert to previous GCP libraries that don' present the GCP client
+ authentication issues that make the connection unstable.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix Bug 2275
+
+ Remove the pre-check before deleting a Neutron Port. The existing request
+ has no filter and is retrieving all the ports in the Tenant. This requires
+ a big amount of execution time causing issues in the NS deletions.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix Bug 2282
+
+ Add the posibility to return an empty dictionary to `find_persistent_root_volumes()` method
+ so the NS instantiation doesn't fail while using a pre existing volume.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix bug 2290: Error in 'ResourceSkusOperations.list()' when instantiating on Azure.
+
+ Added 'filter' parameter required by the 'resource_skus.list()' method.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix 2302: RO does not handle properly vnet IP ranges in Azure.
+ OSM uses a specific vnet to create NS or VNF subnets when instantiating on
+ Azure. The vnets allow more than one address prefix, but RO only try to
+ allocate new subnets in the range defined in first place, ignoring the rest.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ This fixes bug 2354, disabling by default the periodic check of VM
+ status for all VIMs.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Feature 11003 breaks volume multiattach due to how the AZ is handled in the case of share
+ volumes. This change fixes that problem, allowing the basic31 daily test to work as intended.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix RO Dockerfile.local
+ Build RO images using Ubuntu 22.04.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix a healing bug when a NS is deployed on Azure.
+ Healing operation doesn't work with VMs deployed on Azure due to lack of
+ information in vnfrs requiered for the operation. That data must be
+ provided by the 'refresh_vms_status' method in the connector.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fix a healing bug when a NS is deployed on GCP.
+ Healing operation doesn't work with VMs deployed on GCP due to lack of
+ information in vnfrs required for the operation. That data must be
+ provided by the 'refresh_vms_status' method in the connector.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ The run_coroutine_threadsafe() function is used to schedule a coroutine object from a different thread and returns a concurrent.futures.Future.
+ run_coroutine_threadsafe is unnecessary to run the main task and replaced with asyncio.run().
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+upgrade:
+ - |
+ Removing tox and pip installation using pip from Dockerfile.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+ - |
+ This change pins flake8 version in tox.ini to 7.0.0, black version to 24.1.1 and
+ pylint verion to 3.0.3. In addition, all the flake8 and black issues have been
+ properly fixed.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+ - |
+ Pin pylint version ito 2.17.7 so that pylint tests do not present version
+ with the new version 3.0.0, which has still some bugs.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+upgrade:
+ - |
+ Upgrade Ubuntu from 20.04 to 22.04 and Python from 3.8 to 3.10.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+ - |
+ Add support of OpenStack Nova client microversion 2.60, by retrieving the flavor ID and
+ adding it to the VM information.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+ - |
+ Update pip requirements.
+
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+
+other:
+ - |
+ Update version of PyYAML > 6
\ No newline at end of file
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+other:
+ - |
+ Update stage-build to run tox sequentially.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Fixes bug 2283: flavors are no longer unnecessarily created when `vim-flavor-id` is passed as instantiation
+ parameter.
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+fixes:
+ - |
+ Feature 10972: Fixes blocking when deleting shared volumes and failure when attaching to VDUs in Openstack.
+ Enables the use of keep-volume and multiattach in the same volume.
\ No newline at end of file
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################
-aiokafka==0.8.0
+aiokafka==0.8.1
# via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-async-timeout==4.0.2
+async-timeout==4.0.3
# via
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
# aiokafka
dataclasses==0.6
# via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+dnspython==2.4.2
+ # via
+ # -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
+ # pymongo
kafka-python==2.0.2
# via
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
# aiokafka
-motor==1.3.1
+motor==3.3.1
# via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git@master
# via -r requirements-dev.in
-packaging==23.0
+packaging==23.1
# via
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
# aiokafka
-pycryptodome==3.17
+pycryptodome==3.19.0
# via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
-pymongo==3.13.0
+pymongo==4.5.0
# via
# -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
# motor
-pyyaml==5.4.1
+pyyaml==6.0.1
# via -r https://osm.etsi.org/gitweb/?p=osm/common.git;a=blob_plain;f=requirements.txt;hb=master
#######################################################################################
-e RO-plugin
# via -r requirements-test.in
-coverage==7.1.0
+coverage==7.3.1
# via -r requirements-test.in
-mock==5.0.1
+mock==5.1.0
# via -r requirements-test.in
-nose2==0.12.0
+nose2==0.13.0
# via -r requirements-test.in
-r RO-SDN-odl_openflow/requirements.in
-r RO-SDN-onos_openflow/requirements.in
-r RO-SDN-onos_vpls/requirements.in
+-r RO-SDN-tapi/requirements.in
-r RO-VIM-aws/requirements.in
-r RO-VIM-azure/requirements.in
-r RO-VIM-openstack/requirements.in
-r RO-VIM-vmware/requirements.in
-r RO-VIM-gcp/requirements.in
-pyyaml<6
+pyyaml>6
# via msrestazure
appdirs==1.4.4
# via openstacksdk
-atpublic==3.1.1
+atpublic==4.0
# via flufl-enum
-attrs==22.2.0
+attrs==23.1.0
# via
# cmd2
# jsonschema
+ # referencing
autopage==0.5.1
# via cliff
azure-common==1.1.28
# azure-mgmt-compute
# azure-mgmt-network
# azure-mgmt-resource
-azure-core==1.26.2
+azure-core==1.29.4
# via
# azure-identity
# azure-mgmt-core
# msrest
-azure-identity==1.12.0
+azure-identity==1.14.0
# via -r RO-VIM-azure/requirements.in
-azure-mgmt-compute==29.1.0
+azure-mgmt-compute==30.3.0
# via -r RO-VIM-azure/requirements.in
-azure-mgmt-core==1.3.2
+azure-mgmt-core==1.4.0
# via
# azure-mgmt-compute
# azure-mgmt-network
# azure-mgmt-resource
-azure-mgmt-network==22.2.0
+azure-mgmt-network==25.1.0
# via -r RO-VIM-azure/requirements.in
-azure-mgmt-resource==22.0.0
+azure-mgmt-resource==23.0.1
# via -r RO-VIM-azure/requirements.in
bcrypt==4.0.1
# via paramiko
boto==2.49.0
# via -r RO-VIM-aws/requirements.in
-cachetools==5.3.0
+cachetools==5.3.1
# via google-auth
-certifi==2022.12.7
+certifi==2023.7.22
# via
# msrest
# requests
-cffi==1.15.1
+cffi==1.16.0
# via
# cryptography
# pynacl
-charset-normalizer==3.0.1
+charset-normalizer==3.2.0
# via requests
-cheroot==9.0.0
+cheroot==10.0.0
# via cherrypy
cherrypy==18.1.2
# via -r NG-RO/requirements.in
-cliff==4.1.0
+cliff==4.3.0
# via
# osc-lib
# python-neutronclient
# python-openstackclient
-cmd2==2.4.2
+cmd2==2.4.3
# via cliff
-cryptography==39.0.0
+cryptography==41.0.4
# via
# -r NG-RO/requirements.in
# adal
# paramiko
# pyjwt
# pyopenssl
-cvprac==1.2.2
+ # python-openstackclient
+cvprac==1.3.1
# via -r RO-SDN-arista_cloudvision/requirements.in
debtcollector==2.5.0
# via
# via
# dogpile-cache
# openstacksdk
-dogpile-cache==1.1.8
+dogpile-cache==1.2.2
# via openstacksdk
-flufl-enum==5.0.1
+flufl-enum==6.0.2
# via pyvcloud
-google-api-core==2.11.0
- # via google-api-python-client
-google-api-python-client==2.74.0
+google-api-core==2.8.2
+ # via
+ # -r RO-VIM-gcp/requirements.in
+ # google-api-python-client
+google-api-python-client==2.51.0
# via -r RO-VIM-gcp/requirements.in
-google-auth==2.16.0
+google-auth==2.8.0
# via
# -r RO-VIM-gcp/requirements.in
# google-api-core
# google-api-python-client
# google-auth-httplib2
-google-auth-httplib2==0.1.0
+google-auth-httplib2==0.1.1
# via google-api-python-client
google-cloud==0.34.0
# via -r RO-VIM-gcp/requirements.in
-googleapis-common-protos==1.58.0
- # via google-api-core
-httplib2==0.21.0
+googleapis-common-protos==1.56.3
+ # via
+ # -r RO-VIM-gcp/requirements.in
+ # google-api-core
+httplib2==0.22.0
# via
# google-api-python-client
# google-auth-httplib2
# via pyvcloud
idna==3.4
# via requests
-importlib-metadata==6.0.0
+importlib-metadata==6.8.0
# via
# -r NG-RO/requirements.in
# cliff
-importlib-resources==5.10.2
- # via jsonschema
ipconflict==0.5.0
# via -r RO-VIM-aws/requirements.in
-iso8601==1.1.0
+iso8601==2.0.0
# via
# keystoneauth1
# openstacksdk
# python-novaclient
# python-openstackclient
isodate==0.6.1
- # via msrest
-jaraco-functools==3.5.2
+ # via
+ # azure-mgmt-compute
+ # azure-mgmt-network
+ # azure-mgmt-resource
+ # msrest
+jaraco-functools==3.9.0
# via
# cheroot
# tempora
# via -r NG-RO/requirements.in
jmespath==1.0.1
# via openstacksdk
-jsonpatch==1.32
+jsonpatch==1.33
# via
# openstacksdk
# warlock
-jsonpointer==2.3
+jsonpointer==2.4
# via jsonpatch
-jsonschema==4.17.3
+jsonschema==4.19.1
# via warlock
-keystoneauth1==5.1.1
+jsonschema-specifications==2023.7.1
+ # via jsonschema
+keystoneauth1==5.3.0
# via
# openstacksdk
# osc-lib
# python-novaclient
logutils==0.3.5
# via -r NG-RO/requirements.in
-lxml==4.9.2
+lxml==4.9.3
# via pyvcloud
-markupsafe==2.1.2
+markupsafe==2.1.3
# via jinja2
-more-itertools==9.0.0
+more-itertools==10.1.0
# via
# cheroot
# cherrypy
# jaraco-functools
-msal==1.20.0
+msal==1.24.0
# via
# azure-identity
# msal-extensions
msal-extensions==1.0.0
# via azure-identity
-msgpack==1.0.4
+msgpack==1.0.7
# via oslo-serialization
msrest==0.7.1
# via
# -r RO-VIM-azure/requirements.in
- # azure-mgmt-compute
- # azure-mgmt-network
- # azure-mgmt-resource
# msrestazure
msrestazure==0.6.4
# via -r RO-VIM-azure/requirements.in
-netaddr==0.8.0
+netaddr==0.9.0
# via
# -r RO-VIM-aws/requirements.in
- # -r RO-VIM-azure/requirements.in
# -r RO-VIM-gcp/requirements.in
- # -r RO-VIM-openstack/requirements.in
- # -r RO-VIM-openvim/requirements.in
- # -r RO-VIM-vmware/requirements.in
# ipconflict
# oslo-config
# oslo-utils
# oslo-utils
oauthlib==3.2.2
# via requests-oauthlib
-openstacksdk==0.103.0
+openstacksdk==1.5.0
# via
# os-client-config
# osc-lib
+ # python-neutronclient
# python-openstackclient
os-client-config==2.1.0
# via python-neutronclient
# via
# keystoneauth1
# openstacksdk
-osc-lib==2.6.2
+osc-lib==2.8.1
# via
# python-neutronclient
# python-openstackclient
-oslo-config==9.1.0
+oslo-config==9.2.0
# via
# oslo-log
# python-keystoneclient
-oslo-context==5.0.0
+oslo-context==5.2.0
# via oslo-log
-oslo-i18n==5.1.0
+oslo-i18n==6.1.0
# via
# osc-lib
# oslo-config
# python-neutronclient
# python-novaclient
# python-openstackclient
-oslo-log==5.0.2
+oslo-log==5.3.0
# via python-neutronclient
-oslo-serialization==5.0.0
+oslo-serialization==5.2.0
# via
# oslo-log
# python-keystoneclient
# python-neutronclient
# python-novaclient
-oslo-utils==6.1.0
+oslo-utils==6.2.1
# via
# osc-lib
# oslo-log
# python-neutronclient
# python-novaclient
# python-openstackclient
-packaging==23.0
+packaging==23.1
# via
# oslo-utils
# python-keystoneclient
-paramiko==3.0.0
+paramiko==3.3.1
# via
- # -r RO-SDN-dpb/requirements.in
# -r RO-VIM-gcp/requirements.in
# -r RO-plugin/requirements.in
pbr==5.11.1
# python-novaclient
# python-openstackclient
# stevedore
-pkgutil-resolve-name==1.3.10
- # via jsonschema
-portalocker==2.7.0
+portalocker==2.8.2
# via msal-extensions
-portend==3.1.0
+portend==3.2.0
# via cherrypy
-prettytable==3.6.0
+prettytable==3.9.0
# via
# -r RO-VIM-vmware/requirements.in
# cliff
# python-novaclient
progressbar==2.5
# via -r RO-VIM-vmware/requirements.in
-protobuf==4.21.12
+protobuf==4.24.3
# via
# google-api-core
# googleapis-common-protos
py-radix==0.10.0
# via ipconflict
-pyasn1==0.4.8
+pyasn1==0.5.0
# via
# pyasn1-modules
# rsa
-pyasn1-modules==0.2.8
+pyasn1-modules==0.3.0
# via google-auth
pycparser==2.21
# via cffi
-pygments==2.14.0
+pygments==2.16.1
# via pyvcloud
pyinotify==0.9.6
# via oslo-log
-pyjwt[crypto]==2.6.0
+pyjwt[crypto]==2.8.0
# via
# adal
# msal
pynacl==1.5.0
# via paramiko
-pyopenssl==23.0.0
+pyopenssl==23.2.0
# via python-glanceclient
-pyparsing==3.0.9
+pyparsing==3.1.1
# via
# httplib2
# oslo-utils
pyperclip==1.8.2
# via cmd2
-pyrsistent==0.19.3
- # via jsonschema
+pysocks==1.7.1
+ # via requests
python-cinderclient==7.4.1
# via
# -r RO-VIM-openstack/requirements.in
# via
# adal
# oslo-log
-python-glanceclient==4.2.0
+python-glanceclient==4.4.0
# via -r RO-VIM-openstack/requirements.in
-python-keystoneclient==5.0.1
+python-keystoneclient==5.2.0
# via
# -r RO-VIM-openstack/requirements.in
# python-neutronclient
# python-openstackclient
-python-neutronclient==8.2.1
+python-neutronclient==11.0.0
# via -r RO-VIM-openstack/requirements.in
-python-novaclient==18.2.0
+python-novaclient==18.4.0
# via
# -r NG-RO/requirements.in
# -r RO-VIM-openstack/requirements.in
# python-openstackclient
-python-openstackclient==6.0.0
+python-openstackclient==6.3.0
# via -r RO-VIM-openstack/requirements.in
-pytz==2022.7.1
+pytz==2023.3.post1
# via
# oslo-serialization
# oslo-utils
# tempora
pyvcloud==19.1.1
# via -r RO-VIM-vmware/requirements.in
-pyvmomi==8.0.0.1.2
+pyvmomi==8.0.2.0
# via -r RO-VIM-vmware/requirements.in
-pyyaml==5.4.1
+pyyaml==6.0.1
# via
# -r NG-RO/requirements.in
- # -r RO-VIM-aws/requirements.in
- # -r RO-VIM-azure/requirements.in
- # -r RO-VIM-openstack/requirements.in
- # -r RO-VIM-openvim/requirements.in
- # -r RO-VIM-vmware/requirements.in
- # -r RO-plugin/requirements.in
# -r requirements.in
# cliff
# openstacksdk
# oslo-config
# pyvcloud
-requests==2.28.2
+referencing==0.30.2
+ # via
+ # jsonschema
+ # jsonschema-specifications
+requests[socks]==2.31.0
# via
# -r NG-RO/requirements.in
- # -r RO-SDN-arista_cloudvision/requirements.in
- # -r RO-SDN-dpb/requirements.in
- # -r RO-SDN-dynpac/requirements.in
- # -r RO-SDN-floodlight_openflow/requirements.in
- # -r RO-SDN-ietfl2vpn/requirements.in
- # -r RO-SDN-juniper_contrail/requirements.in
- # -r RO-SDN-odl_openflow/requirements.in
- # -r RO-SDN-onos_openflow/requirements.in
- # -r RO-SDN-onos_vpls/requirements.in
- # -r RO-VIM-aws/requirements.in
- # -r RO-VIM-azure/requirements.in
- # -r RO-VIM-openstack/requirements.in
- # -r RO-VIM-openvim/requirements.in
# -r RO-VIM-vmware/requirements.in
- # -r RO-plugin/requirements.in
# adal
# azure-core
# cvprac
# via openstacksdk
rfc3986==2.0.0
# via oslo-config
+rpds-py==0.10.3
+ # via
+ # jsonschema
+ # referencing
rsa==4.9
# via google-auth
-simplejson==3.18.1
+simplejson==3.19.1
# via
# osc-lib
# python-cinderclient
six==1.16.0
# via
# azure-core
- # azure-identity
- # cheroot
# google-auth
- # google-auth-httplib2
# isodate
- # keystoneauth1
# msrestazure
# python-dateutil
# python-keystoneclient
# pyvmomi
-stevedore==4.1.1
+stevedore==5.1.0
# via
# cliff
# dogpile-cache
# python-keystoneclient
# python-novaclient
# python-openstackclient
-tempora==5.2.1
+tempora==5.5.0
# via portend
-tqdm==4.64.1
+tqdm==4.66.1
# via ipconflict
-typing-extensions==4.4.0
- # via azure-core
+typing-extensions==4.8.0
+ # via
+ # azure-core
+ # dogpile-cache
+ # jaraco-functools
+tzdata==2023.3
+ # via
+ # oslo-serialization
+ # oslo-utils
uritemplate==4.1.1
# via google-api-python-client
-urllib3==1.26.14
+urllib3==2.0.5
# via requests
uuid==1.30
# via -r RO-SDN-arista_cloudvision/requirements.in
# via
# cmd2
# prettytable
-wrapt==1.14.1
+wrapt==1.15.0
# via
# debtcollector
# python-glanceclient
-zc-lockfile==2.0
+zc-lockfile==3.0.post1
# via cherrypy
-zipp==3.11.0
- # via
- # importlib-metadata
- # importlib-resources
+zipp==3.17.0
+ # via importlib-metadata
# The following packages are considered to be unsafe in a requirements file:
# setuptools
--- /dev/null
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+########################################################################################
+name: resource-orchestrator
+summary: An image for OSM RO
+description: |
+ OSM Resource Orchestrator image.
+version: master_beta_1 # version format: <upstream-version>_<base-version>_<version>
+license: Apache-2.0
+base: ubuntu:22.04
+platforms:
+ amd64:
+
+parts:
+ non-root-user:
+ plugin: nil
+ overlay-script: |
+ chroot ${CRAFT_OVERLAY}
+ mkdir -p /app/osm_ro
+ mkdir -p /app/storage/kafka
+ mkdir -p /app/log
+ groupadd -g 1001 appuser
+ useradd -u 1001 -g 1001 -d /app appuser
+ chown -R appuser:appuser /app
+
+ common:
+ source: https://osm.etsi.org/gerrit/osm/common.git
+ source-branch: master
+ plugin: python
+ python-requirements:
+ - requirements.txt
+ stage-packages:
+ - python3.10-venv
+ build-environment:
+ - PYTHONDONTWRITEBYTECODE: "1" # This does not work as expected, there is a bug and still not fixed.
+ # Workaround for conflicts related with the bug of PYTHONDONTWRITEBYTECODE
+ override-build: |
+ craftctl default
+ find "$CRAFT_PART_INSTALL" -name '*.pyc' -exec rm {} \;
+ stage:
+ - -bin/activate
+ - -bin/activate.csh
+ - -bin/activate.fish
+ - -lib/python3.10/site-packages/pip-23.2.1.dist-info/RECORD
+ - -lib/python3.10/site-packages/wheel-0.41.0.dist-info/RECORD
+ - -lib/python3.10/site-packages/charset_normalizer-3.1.0.dist-info/RECORD
+
+ ro:
+ plugin: python
+ source: https://osm.etsi.org/gerrit/osm/RO.git
+ source-tag: master
+ python-requirements:
+ - requirements.txt
+ stage-packages:
+ - python3.10-venv
+ python-packages:
+ - pip
+ - setuptools
+ - wheel
+ - ./NG-RO
+ - ./RO-plugin
+ - ./RO-VIM-vmware
+ - ./RO-VIM-openstack
+ - ./RO-VIM-openvim
+ - ./RO-VIM-aws
+ - ./RO-VIM-azure
+ - ./RO-SDN-dynpac
+ - ./RO-SDN-ietfl2vpn
+ - ./RO-SDN-onos_vpls
+ - ./RO-SDN-onos_openflow
+ - ./RO-SDN-odl_openflow
+ - ./RO-SDN-floodlight_openflow
+ - ./RO-SDN-arista_cloudvision
+ - ./RO-SDN-juniper_contrail
+ - ./RO-VIM-gcp
+ - ./RO-SDN-tapi
+ build-packages:
+ - wget
+ - genisoimage
+ - netbase
+
[testenv]
usedevelop = True
-basepython = python3
+basepython = python3.10
setenv = VIRTUAL_ENV={envdir}
PYTHONDONTWRITEBYTECODE = 1
deps = -r{toxinidir}/requirements.txt
[testenv:release_notes]
deps = reno
skip_install = true
-whitelist_externals = bash
+allowlist_externals = bash
commands =
reno new {posargs:new_feature}
bash -c "sed -i -e '1 e head -16 tox.ini' releasenotes/notes/{posargs:new_feature}*.yaml"
#######################################################################################
[testenv:black]
-deps = black
+deps = black==24.1.1
skip_install = true
commands =
black --check --diff NG-RO
black --check --diff RO-SDN-odl_openflow
black --check --diff RO-SDN-onos_openflow
black --check --diff RO-SDN-onos_vpls
+ black --check --diff RO-SDN-tapi
black --check --diff RO-VIM-aws
black --check --diff RO-VIM-azure
black --check --diff RO-VIM-openstack
# RO-SDN-onos_vpls
# nose2 -C --coverage RO-SDN-onos_vpls/osm_rosdn_onos_vpls -s RO-SDN-onos_vpls/osm_rosdn_onos_vpls
# sh -c 'mv .coverage .coverage_rosdn_onos_vpls'
+ # RO-SDN-tapi
+ nose2 -C --coverage RO-SDN-tapi/osm_rosdn_tapi -s RO-SDN-tapi/osm_rosdn_tapi
+ sh -c 'mv .coverage .coverage_rosdn_tapi'
# RO-VIM-aws
# nose2 -C --coverage RO-VIM-aws/osm_rovim_aws -s RO-VIM-aws/osm_rovim_aws
# sh -c 'mv .coverage .coverage_rovim_aws'
# nose2 -C --coverage RO-VIM-gcp/osm_rovim_gcp -s RO-VIM-gcp/osm_rovim_gcp
# sh -c 'mv .coverage .coverage_rovim_gcp'
# Combine results and generate reports
- # coverage combine .coverage_ng_ro .coverage_ro_plugin .coverage_rosdn_arista_cloudvision .coverage_rosdn_dpb .coverage_rosdn_dynpac .coverage_rosdn_floodlightof .coverage_rosdn_ietfl2vpn .coverage_rosdn_juniper_contrail .coverage_rosdn_odlof .coverage_rosdn_onos_vpls .coverage_rosdn_onosof .coverage_rovim_aws .coverage_rovim_azure .coverage_rovim_openvim .coverage_rovim_gcp # .coverage_rovim_openstack .coverage_rovim_vmware
+ # coverage combine .coverage_ng_ro .coverage_ro_plugin .coverage_rosdn_arista_cloudvision .coverage_rosdn_dpb .coverage_rosdn_dynpac .coverage_rosdn_floodlightof .coverage_rosdn_ietfl2vpn .coverage_rosdn_juniper_contrail .coverage_rosdn_odlof .coverage_rosdn_onos_vpls .coverage_rosdn_onosof .coverage_rosdn_tapi .coverage_rovim_aws .coverage_rovim_azure .coverage_rovim_openvim .coverage_rovim_gcp # .coverage_rovim_openstack .coverage_rovim_vmware
coverage combine .coverage_ng_ro .coverage_rovim_openstack .coverage_rosdn_juniper_contrail
coverage report --omit='*tests*'
coverage html -d ./cover --omit='*tests*'
#######################################################################################
[testenv:flake8]
deps =
- flake8==5.0.4
+ flake8==7.0.0
flake8-import-order
skip_install = true
commands =
flake8 RO-SDN-odl_openflow/osm_rosdn_odlof/ RO-SDN-odl_openflow/setup.py
flake8 RO-SDN-onos_openflow/osm_rosdn_onosof/ RO-SDN-onos_openflow/setup.py
flake8 RO-SDN-onos_vpls/osm_rosdn_onos_vpls/ RO-SDN-onos_vpls/setup.py
+ flake8 RO-SDN-tapi/osm_rosdn_tapi/ RO-SDN-tapi/setup.py
flake8 RO-VIM-aws/osm_rovim_aws/ RO-VIM-aws/setup.py
flake8 RO-VIM-azure/osm_rovim_azure/ RO-VIM-azure/setup.py
flake8 RO-VIM-openstack/osm_rovim_openstack/ RO-VIM-openstack/setup.py
deps = {[testenv]deps}
-r{toxinidir}/requirements-dev.txt
-r{toxinidir}/requirements-test.txt
- pylint
+ pylint==3.0.3
skip_install = true
commands =
pylint -E NG-RO/osm_ng_ro --disable=E1101
pylint -E RO-plugin/osm_ro_plugin --disable=E1101
- pylint -E RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision --disable=all
+ - pylint -E RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision --disable=all
pylint -E RO-SDN-dpb/osm_rosdn_dpb
pylint -E RO-SDN-dynpac/osm_rosdn_dynpac --disable=E1101
pylint -E RO-SDN-floodlight_openflow/osm_rosdn_floodlightof
pylint -E RO-SDN-odl_openflow/osm_rosdn_odlof
pylint -E RO-SDN-onos_openflow/osm_rosdn_onosof
pylint -E RO-SDN-onos_vpls/osm_rosdn_onos_vpls --disable=E1101
+ pylint -E RO-SDN-tapi/osm_rosdn_tapi
pylint -E RO-VIM-aws/osm_rovim_aws
- pylint -E RO-VIM-azure/osm_rovim_azure --disable=all
+ - pylint -E RO-VIM-azure/osm_rovim_azure --disable=all
pylint -E RO-VIM-openstack/osm_rovim_openstack --disable=E1101
- pylint -E RO-VIM-openvim/osm_rovim_openvim --disable=all
- pylint -E RO-VIM-vmware/osm_rovim_vmware --disable=all
+ - pylint -E RO-VIM-openvim/osm_rovim_openvim --disable=all
+ - pylint -E RO-VIM-vmware/osm_rovim_vmware --disable=all
pylint -E RO-VIM-gcp/osm_rovim_gcp --disable=E1101
#######################################################################################
[testenv:pip-compile]
-deps = pip-tools==6.6.2
+deps = pip-tools==6.13.0
skip_install = true
-whitelist_externals = bash
+allowlist_externals = bash
[
commands =
- bash -c "for file in requirements*.in ; do \
UNSAFE="" ; \
if [[ $file =~ 'dist' ]] ; then UNSAFE='--allow-unsafe' ; fi ; \
- pip-compile -rU --no-header $UNSAFE $file ;\
+ pip-compile --resolver=backtracking -rU --no-header $UNSAFE $file ;\
out=`echo $file | sed 's/.in/.txt/'` ; \
sed -i -e '1 e head -16 tox.ini' $out ;\
done"
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/NG-RO
commands =
sh -c 'rm -rf osm_ng_ro/requirements.txt deb_dist dist osm_ng_ro.egg-info osm_ng_ro*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-plugin
commands =
sh -c 'rm -rf deb_dist dist osm_ro_plugin.egg-info osm_ro_plugin*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-arista_cloudvision
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_arista_cloudvision.egg-info osm_rosdn_arista_cloudvision*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-dpb
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_dpb.egg-info osm_rosdn_dpb*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-dynpac
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_dynpac.egg-info osm_rosdn_dynpac*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-floodlight_openflow
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_floodlightof.egg-info osm_rosdn_floodlightof*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-ietfl2vpn
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_ietfl2vpn.egg-info osm_rosdn_ietfl2vpn*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-juniper_contrail
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_juniper_contrail.egg-info osm_rosdn_juniper_contrail*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-odl_openflow
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_odlof.egg-info osm_rosdn_odlof*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-onos_openflow
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_onosof.egg-info osm_rosdn_onosof*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-SDN-onos_vpls
commands =
sh -c 'rm -rf deb_dist dist osm_rosdn_onos_vpls.egg-info osm_rosdn_onos_vpls*.tar.gz'
sh -c 'cd deb_dist/osm-rosdn-onos-vpls*/ && dpkg-buildpackage -rfakeroot -uc -us'
+#######################################################################################
+[testenv:dist_ro_sdn_tapi]
+deps = {[testenv]deps}
+ -r{toxinidir}/requirements-dist.txt
+skip_install = true
+allowlist_externals = sh
+changedir = {toxinidir}/RO-SDN-tapi
+commands =
+ sh -c 'rm -rf deb_dist dist osm_rosdn_tapi.egg-info osm_rosdn_tapi*.tar.gz'
+ python3 setup.py --command-packages=stdeb.command sdist_dsc
+ sh -c 'cd deb_dist/osm-rosdn-tapi*/ && dpkg-buildpackage -rfakeroot -uc -us'
+
+
#######################################################################################
[testenv:dist_ro_vim_aws]
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-aws
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_aws.egg-info osm_rovim_aws*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-azure
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_azure.egg-info osm_rovim_azure*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-openstack
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_openstack.egg-info osm_rovim_openstack*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-openvim
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_openvim.egg-info osm_rovim_openvim*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-vmware
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_vmware.egg-info osm_rovim_vmware*.tar.gz'
deps = {[testenv]deps}
-r{toxinidir}/requirements-dist.txt
skip_install = true
-whitelist_externals = sh
+allowlist_externals = sh
changedir = {toxinidir}/RO-VIM-gcp
commands =
sh -c 'rm -rf deb_dist dist osm_rovim_gcp.egg-info osm_rovim_gcp*.tar.gz'