import random
import re
import time
-from typing import Dict, Optional, Tuple
+from typing import Dict, List, Optional, Tuple
from cinderclient import client as cClient
from glanceclient import client as glClient
endpoint_type=self.endpoint_type,
region_name=region_name,
)
- self.cinder = self.session["cinder"] = cClient.Client(
- 2,
- session=sess,
- endpoint_type=self.endpoint_type,
- region_name=region_name,
- )
+
+ if sess.get_all_version_data(service_type="volumev2"):
+ self.cinder = self.session["cinder"] = cClient.Client(
+ 2,
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
+ else:
+ self.cinder = self.session["cinder"] = cClient.Client(
+ 3,
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
try:
self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
self._reload_connection()
network_dict = {"name": net_name, "admin_state_up": True}
- if net_type in ("data", "ptp"):
+ if net_type in ("data", "ptp") or provider_network_profile:
provider_physical_network = None
if provider_network_profile and provider_network_profile.get(
if not ip_profile.get("subnet_address"):
# Fake subnet is required
- subnet_rand = random.randint(0, 255)
+ subnet_rand = random.SystemRandom().randint(0, 255)
ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
if "ip_version" not in ip_profile:
ip_str = str(netaddr.IPAddress(ip_int))
subnet["allocation_pools"][0]["end"] = ip_str
+ if (
+ ip_profile.get("ipv6_address_mode")
+ and ip_profile["ip_version"] != "IPv4"
+ ):
+ subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
+ # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
+ # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
+ subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
+
# self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
self.neutron.create_subnet({"subnet": subnet})
) as e:
self._format_exception(e)
- def process_resource_quota(self, quota, prefix, extra_specs):
- """
- :param prefix:
- :param extra_specs:
- :return:
+ @staticmethod
+ def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
+ """Process resource quota and fill up extra_specs.
+ Args:
+ quota (dict): Keeping the quota of resurces
+ prefix (str) Prefix
+ extra_specs (dict) Dict to be filled to be used during flavor creation
+
"""
if "limit" in quota:
extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
extra_specs["quota:" + prefix + "_shares_level"] = "custom"
extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
- def new_flavor(self, flavor_data, change_name_if_used=True):
- """Adds a tenant flavor to openstack VIM
- if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
- repetition
- Returns the flavor identifier
+ @staticmethod
+ def process_numa_memory(
+ numa: dict, node_id: Optional[int], extra_specs: dict
+ ) -> None:
+ """Set the memory in extra_specs.
+ Args:
+ numa (dict): A dictionary which includes numa information
+ node_id (int): ID of numa node
+ extra_specs (dict): To be filled.
+
+ """
+ if not numa.get("memory"):
+ return
+ memory_mb = numa["memory"] * 1024
+ memory = "hw:numa_mem.{}".format(node_id)
+ extra_specs[memory] = int(memory_mb)
+
+ @staticmethod
+ def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
+ """Set the cpu in extra_specs.
+ Args:
+ numa (dict): A dictionary which includes numa information
+ node_id (int): ID of numa node
+ extra_specs (dict): To be filled.
+
+ """
+ if not numa.get("vcpu"):
+ return
+ vcpu = numa["vcpu"]
+ cpu = "hw:numa_cpus.{}".format(node_id)
+ vcpu = ",".join(map(str, vcpu))
+ extra_specs[cpu] = vcpu
+
+ @staticmethod
+ def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+ """Fill up extra_specs if numa has paired-threads.
+ Args:
+ numa (dict): A dictionary which includes numa information
+ extra_specs (dict): To be filled.
+
+ Returns:
+ threads (int) Number of virtual cpus
+
+ """
+ if not numa.get("paired-threads"):
+ return
+
+ # cpu_thread_policy "require" implies that compute node must have an STM architecture
+ threads = numa["paired-threads"] * 2
+ extra_specs["hw:cpu_thread_policy"] = "require"
+ extra_specs["hw:cpu_policy"] = "dedicated"
+ return threads
+
+ @staticmethod
+ def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
+ """Fill up extra_specs if numa has cores.
+ Args:
+ numa (dict): A dictionary which includes numa information
+ extra_specs (dict): To be filled.
+
+ Returns:
+ cores (int) Number of virtual cpus
+
+ """
+ # cpu_thread_policy "isolate" implies that the host must not have an SMT
+ # architecture, or a non-SMT architecture will be emulated
+ if not numa.get("cores"):
+ return
+ cores = numa["cores"]
+ extra_specs["hw:cpu_thread_policy"] = "isolate"
+ extra_specs["hw:cpu_policy"] = "dedicated"
+ return cores
+
+ @staticmethod
+ def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+ """Fill up extra_specs if numa has threads.
+ Args:
+ numa (dict): A dictionary which includes numa information
+ extra_specs (dict): To be filled.
+
+ Returns:
+ threads (int) Number of virtual cpus
+
+ """
+ # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+ if not numa.get("threads"):
+ return
+ threads = numa["threads"]
+ extra_specs["hw:cpu_thread_policy"] = "prefer"
+ extra_specs["hw:cpu_policy"] = "dedicated"
+ return threads
+
+ def _process_numa_parameters_of_flavor(
+ self, numas: List, extra_specs: Dict
+ ) -> None:
+ """Process numa parameters and fill up extra_specs.
+
+ Args:
+ numas (list): List of dictionary which includes numa information
+ extra_specs (dict): To be filled.
+
+ """
+ numa_nodes = len(numas)
+ extra_specs["hw:numa_nodes"] = str(numa_nodes)
+ cpu_cores, cpu_threads = 0, 0
+
+ if self.vim_type == "VIO":
+ self.process_vio_numa_nodes(numa_nodes, extra_specs)
+
+ for numa in numas:
+ if "id" in numa:
+ node_id = numa["id"]
+ # overwrite ram and vcpus
+ # check if key "memory" is present in numa else use ram value at flavor
+ self.process_numa_memory(numa, node_id, extra_specs)
+ self.process_numa_vcpu(numa, node_id, extra_specs)
+
+ # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
+ extra_specs["hw:cpu_sockets"] = str(numa_nodes)
+
+ if "paired-threads" in numa:
+ threads = self.process_numa_paired_threads(numa, extra_specs)
+ cpu_threads += threads
+
+ elif "cores" in numa:
+ cores = self.process_numa_cores(numa, extra_specs)
+ cpu_cores += cores
+
+ elif "threads" in numa:
+ threads = self.process_numa_threads(numa, extra_specs)
+ cpu_threads += threads
+
+ if cpu_cores:
+ extra_specs["hw:cpu_cores"] = str(cpu_cores)
+ if cpu_threads:
+ extra_specs["hw:cpu_threads"] = str(cpu_threads)
+
+ @staticmethod
+ def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
+ """According to number of numa nodes, updates the extra_specs for VIO.
+
+ Args:
+
+ numa_nodes (int): List keeps the numa node numbers
+ extra_specs (dict): Extra specs dict to be updated
+
+ """
+ # If there are several numas, we do not define specific affinity.
+ extra_specs["vmware:latency_sensitivity_level"] = "high"
+
+ def _change_flavor_name(
+ self, name: str, name_suffix: int, flavor_data: dict
+ ) -> str:
+ """Change the flavor name if the name already exists.
+
+ Args:
+ name (str): Flavor name to be checked
+ name_suffix (int): Suffix to be appended to name
+ flavor_data (dict): Flavor dict
+
+ Returns:
+ name (str): New flavor name to be used
+
+ """
+ # Get used names
+ fl = self.nova.flavors.list()
+ fl_names = [f.name for f in fl]
+
+ while name in fl_names:
+ name_suffix += 1
+ name = flavor_data["name"] + "-" + str(name_suffix)
+
+ return name
+
+ def _process_extended_config_of_flavor(
+ self, extended: dict, extra_specs: dict
+ ) -> None:
+ """Process the extended dict to fill up extra_specs.
+ Args:
+
+ extended (dict): Keeping the extra specification of flavor
+ extra_specs (dict) Dict to be filled to be used during flavor creation
+
+ """
+ quotas = {
+ "cpu-quota": "cpu",
+ "mem-quota": "memory",
+ "vif-quota": "vif",
+ "disk-io-quota": "disk_io",
+ }
+
+ page_sizes = {
+ "LARGE": "large",
+ "SMALL": "small",
+ "SIZE_2MB": "2MB",
+ "SIZE_1GB": "1GB",
+ "PREFER_LARGE": "any",
+ }
+
+ policies = {
+ "cpu-pinning-policy": "hw:cpu_policy",
+ "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
+ "mem-policy": "hw:numa_mempolicy",
+ }
+
+ numas = extended.get("numas")
+ if numas:
+ self._process_numa_parameters_of_flavor(numas, extra_specs)
+
+ for quota, item in quotas.items():
+ if quota in extended.keys():
+ self.process_resource_quota(extended.get(quota), item, extra_specs)
+
+ # Set the mempage size as specified in the descriptor
+ if extended.get("mempage-size"):
+ if extended["mempage-size"] in page_sizes.keys():
+ extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
+ else:
+ # Normally, validations in NBI should not allow to this condition.
+ self.logger.debug(
+ "Invalid mempage-size %s. Will be ignored",
+ extended.get("mempage-size"),
+ )
+
+ for policy, hw_policy in policies.items():
+ if extended.get(policy):
+ extra_specs[hw_policy] = extended[policy].lower()
+
+ @staticmethod
+ def _get_flavor_details(flavor_data: dict) -> Tuple:
+ """Returns the details of flavor
+ Args:
+ flavor_data (dict): Dictionary that includes required flavor details
+
+ Returns:
+ ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
+
+ """
+ return (
+ flavor_data.get("ram", 64),
+ flavor_data.get("vcpus", 1),
+ {},
+ flavor_data.get("extended"),
+ )
+
+ def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
+ """Adds a tenant flavor to openstack VIM.
+ if change_name_if_used is True, it will change name in case of conflict,
+ because it is not supported name repetition.
+
+ Args:
+ flavor_data (dict): Flavor details to be processed
+ change_name_if_used (bool): Change name in case of conflict
+
+ Returns:
+ flavor_id (str): flavor identifier
+
"""
self.logger.debug("Adding flavor '%s'", str(flavor_data))
retry = 0
self._reload_connection()
if change_name_if_used:
- # get used names
- fl_names = []
- fl = self.nova.flavors.list()
+ name = self._change_flavor_name(name, name_suffix, flavor_data)
- for f in fl:
- fl_names.append(f.name)
-
- while name in fl_names:
- name_suffix += 1
- name = flavor_data["name"] + "-" + str(name_suffix)
-
- ram = flavor_data.get("ram", 64)
- vcpus = flavor_data.get("vcpus", 1)
- extra_specs = {}
-
- extended = flavor_data.get("extended")
+ ram, vcpus, extra_specs, extended = self._get_flavor_details(
+ flavor_data
+ )
if extended:
- numas = extended.get("numas")
-
- if numas:
- numa_nodes = len(numas)
-
- extra_specs["hw:numa_nodes"] = str(numa_nodes)
-
- if self.vim_type == "VIO":
- extra_specs[
- "vmware:extra_config"
- ] = '{"numa.nodeAffinity":"0"}'
- extra_specs["vmware:latency_sensitivity_level"] = "high"
-
- for numa in numas:
- if "id" in numa:
- node_id = numa["id"]
-
- if "memory" in numa:
- memory_mb = numa["memory"] * 1024
- memory = "hw:numa_mem.{}".format(node_id)
- extra_specs[memory] = int(memory_mb)
-
- if "vcpu" in numa:
- vcpu = numa["vcpu"]
- cpu = "hw:numa_cpus.{}".format(node_id)
- vcpu = ",".join(map(str, vcpu))
- extra_specs[cpu] = vcpu
-
- # overwrite ram and vcpus
- # check if key "memory" is present in numa else use ram value at flavor
- # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
- # implemented/virt-driver-cpu-thread-pinning.html
- extra_specs["hw:cpu_sockets"] = str(numa_nodes)
-
- if "paired-threads" in numa:
- vcpus = numa["paired-threads"] * 2
- # cpu_thread_policy "require" implies that the compute node must have an
- # STM architecture
- extra_specs["hw:cpu_thread_policy"] = "require"
- extra_specs["hw:cpu_policy"] = "dedicated"
- elif "cores" in numa:
- vcpus = numa["cores"]
- # cpu_thread_policy "prefer" implies that the host must not have an SMT
- # architecture, or a non-SMT architecture will be emulated
- extra_specs["hw:cpu_thread_policy"] = "isolate"
- extra_specs["hw:cpu_policy"] = "dedicated"
- elif "threads" in numa:
- vcpus = numa["threads"]
- # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
- # architecture
- extra_specs["hw:cpu_thread_policy"] = "prefer"
- extra_specs["hw:cpu_policy"] = "dedicated"
- # for interface in numa.get("interfaces",() ):
- # if interface["dedicated"]=="yes":
- # raise vimconn.VimConnException("Passthrough interfaces are not supported
- # for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
- # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
- # when a way to connect it is available
- elif extended.get("cpu-quota"):
- self.process_resource_quota(
- extended.get("cpu-quota"), "cpu", extra_specs
- )
-
- if extended.get("mem-quota"):
- self.process_resource_quota(
- extended.get("mem-quota"), "memory", extra_specs
- )
-
- if extended.get("vif-quota"):
- self.process_resource_quota(
- extended.get("vif-quota"), "vif", extra_specs
- )
+ self._process_extended_config_of_flavor(extended, extra_specs)
- if extended.get("disk-io-quota"):
- self.process_resource_quota(
- extended.get("disk-io-quota"), "disk_io", extra_specs
- )
+ # Create flavor
- # Set the mempage size as specified in the descriptor
- if extended.get("mempage-size"):
- if extended.get("mempage-size") == "LARGE":
- extra_specs["hw:mem_page_size"] = "large"
- elif extended.get("mempage-size") == "SMALL":
- extra_specs["hw:mem_page_size"] = "small"
- elif extended.get("mempage-size") == "SIZE_2MB":
- extra_specs["hw:mem_page_size"] = "2MB"
- elif extended.get("mempage-size") == "SIZE_1GB":
- extra_specs["hw:mem_page_size"] = "1GB"
- elif extended.get("mempage-size") == "PREFER_LARGE":
- extra_specs["hw:mem_page_size"] = "any"
- else:
- # The validations in NBI should make reaching here not possible.
- # If this message is shown, check validations
- self.logger.debug(
- "Invalid mempage-size %s. Will be ignored",
- extended.get("mempage-size"),
- )
- if extended.get("cpu-pinning-policy"):
- extra_specs["hw:cpu_policy"] = extended.get(
- "cpu-pinning-policy"
- ).lower()
-
- # Set the cpu thread pinning policy as specified in the descriptor
- if extended.get("cpu-thread-pinning-policy"):
- extra_specs["hw:cpu_thread_policy"] = extended.get(
- "cpu-thread-pinning-policy"
- ).lower()
-
- # Set the mem policy as specified in the descriptor
- if extended.get("mem-policy"):
- extra_specs["hw:numa_mempolicy"] = extended.get(
- "mem-policy"
- ).lower()
-
- # create flavor
new_flavor = self.nova.flavors.create(
name=name,
ram=ram,
swap=flavor_data.get("swap", 0),
is_public=flavor_data.get("is_public", True),
)
- # add metadata
+
+ # Add metadata
if extra_specs:
new_flavor.set_keys(extra_specs)
return new_flavor.id
+
except nvExceptions.Conflict as e:
if change_name_if_used and retry < max_retries:
continue
self._format_exception(e)
- # except nvExceptions.BadRequest as e:
+
except (
ksExceptions.ClientException,
nvExceptions.ClientException,
# For VF
elif net["type"] == "VF" or net["type"] == "SR-IOV":
-
port_dict["binding:vnic_type"] = "direct"
# VIO specific Changes
if net.get("mac_address"):
port_dict["mac_address"] = net["mac_address"]
- if net.get("ip_address"):
- port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+ ip_dual_list = []
+ if ip_list := net.get("ip_address"):
+ if not isinstance(ip_list, list):
+ ip_list = [ip_list]
+ for ip in ip_list:
+ ip_dict = {"ip_address": ip}
+ ip_dual_list.append(ip_dict)
+ port_dict["fixed_ips"] = ip_dual_list
# TODO add "subnet_id": <subnet_id>
def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
"""
new_port = self.neutron.create_port({"port": port_dict})
created_items["port:" + str(new_port["port"]["id"])] = True
- net["mac_adress"] = new_port["port"]["mac_address"]
+ net["mac_address"] = new_port["port"]["mac_address"]
net["vim_id"] = new_port["port"]["id"]
return new_port
key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
if disk.get(key_id):
-
block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
existing_vim_volumes.append({"id": disk[key_id]})
availability_zone=vm_av_zone,
)
boot_volume_id = volume.id
- created_items["volume:" + str(volume.id)] = True
- block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+ self.update_block_device_mapping(
+ volume=volume,
+ block_device_mapping=block_device_mapping,
+ base_disk_index=base_disk_index,
+ disk=disk,
+ created_items=created_items,
+ )
return boot_volume_id
+ @staticmethod
+ def update_block_device_mapping(
+ volume: object,
+ block_device_mapping: dict,
+ base_disk_index: int,
+ disk: dict,
+ created_items: dict,
+ ) -> None:
+ """Add volume information to block device mapping dict.
+ Args:
+ volume (object): Created volume object
+ block_device_mapping (dict): Block device details
+ base_disk_index (int): Disk index
+ disk (dict): Disk details
+ created_items (dict): All created items belongs to VM
+ """
+ if not volume:
+ raise vimconn.VimConnException("Volume is empty.")
+
+ if not hasattr(volume, "id"):
+ raise vimconn.VimConnException(
+ "Created volume is not valid, does not have id attribute."
+ )
+
+ volume_txt = "volume:" + str(volume.id)
+ if disk.get("keep"):
+ volume_txt += ":keep"
+ created_items[volume_txt] = True
+ block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+
def _prepare_non_root_persistent_volumes(
self,
name: str,
key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
if disk.get(key_id):
-
# Use existing persistent volume
block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
existing_vim_volumes.append({"id": disk[key_id]})
# Make sure volume is in the same AZ as the VM to be attached to
availability_zone=vm_av_zone,
)
- created_items["volume:" + str(volume.id)] = True
- block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+ self.update_block_device_mapping(
+ volume=volume,
+ block_device_mapping=block_device_mapping,
+ base_disk_index=base_disk_index,
+ disk=disk,
+ created_items=created_items,
+ )
def _wait_for_created_volumes_availability(
self, elapsed_time: int, created_items: dict
while elapsed_time < volume_timeout:
for created_item in created_items:
- v, _, volume_id = created_item.partition(":")
+ v, volume_id = (
+ created_item.split(":")[0],
+ created_item.split(":")[1],
+ )
if v == "volume":
if self.cinder.volumes.get(volume_id).status != "available":
break
return self.neutron.show_floatingip(free_floating_ip)
def _get_free_floating_ip(
- self, server: object, floating_network: dict, created_items: dict
+ self, server: object, floating_network: dict
) -> Optional[str]:
"""Get the free floating IP address.
Args:
server (object): Server Object
floating_network (dict): Floating network details
- created_items (dict): All created items belongs to new VM instance
Returns:
free_floating_ip (str): Free floating ip addr
# Randomize
random.shuffle(floating_ips)
- return self._find_floating_ip(
- server, floating_ips, floating_network, created_items
- )
+ return self._find_floating_ip(server, floating_ips, floating_network)
def _prepare_external_network_for_vminstance(
self,
# In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
# several times
while not assigned:
-
free_floating_ip = self._get_free_floating_ip(
- server, floating_network, created_items
+ server, floating_network
)
if not free_floating_ip:
self.neutron.update_port(port[0], port_update)
except Exception:
-
raise vimconn.VimConnException(
"It was not possible to disable port security for port {}".format(
port[0]
the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
as not present.
+
"""
self.logger.debug(
"new_vminstance input: image='%s' flavor='%s' nics='%s'",
server_id = server.id
try:
+ created_items = self.remove_keep_tag_from_persistent_volumes(
+ created_items
+ )
+
self.delete_vminstance(server_id, created_items)
except Exception as e2:
self._format_exception(e)
+ @staticmethod
+ def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
+ """Removes the keep flag from persistent volumes. So, those volumes could be removed.
+
+ Args:
+ created_items (dict): All created items belongs to VM
+
+ Returns:
+ updated_created_items (dict): Dict which does not include keep flag for volumes.
+
+ """
+ return {
+ key.replace(":keep", ""): value for (key, value) in created_items.items()
+ }
+
def get_vminstance(self, vm_id):
"""Returns the VM instance information from VIM"""
# self.logger.debug("Getting VM from VIM")
) as e:
self._format_exception(e)
- def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
- """Removes a VM instance from VIM. Returns the old identifier"""
- # print "osconnector: Getting VM from VIM"
+ def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
+ """Neutron delete ports by id.
+ Args:
+ k_id (str): Port id in the VIM
+ """
+ try:
+ port_dict = self.neutron.list_ports()
+ existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
+
+ if k_id in existing_ports:
+ self.neutron.delete_port(k_id)
+
+ except Exception as e:
+ self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+
+ def _delete_volumes_by_id_wth_cinder(
+ self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
+ ) -> bool:
+ """Cinder delete volume by id.
+ Args:
+ k (str): Full item name in created_items
+ k_id (str): ID of floating ip in VIM
+ volumes_to_hold (list): Volumes not to delete
+ created_items (dict): All created items belongs to VM
+ """
+ try:
+ if k_id in volumes_to_hold:
+ return
+
+ if self.cinder.volumes.get(k_id).status != "available":
+ return True
+
+ else:
+ self.cinder.volumes.delete(k_id)
+ created_items[k] = None
+
+ except Exception as e:
+ self.logger.error(
+ "Error deleting volume: {}: {}".format(type(e).__name__, e)
+ )
+
+ def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
+ """Neutron delete floating ip by id.
+ Args:
+ k (str): Full item name in created_items
+ k_id (str): ID of floating ip in VIM
+ created_items (dict): All created items belongs to VM
+ """
+ try:
+ self.neutron.delete_floatingip(k_id)
+ created_items[k] = None
+
+ except Exception as e:
+ self.logger.error(
+ "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
+ )
+
+ @staticmethod
+ def _get_item_name_id(k: str) -> Tuple[str, str]:
+ k_item, _, k_id = k.partition(":")
+ return k_item, k_id
+
+ def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
+ """Delete VM ports attached to the networks before deleting virtual machine.
+ Args:
+ created_items (dict): All created items belongs to VM
+ """
+
+ for k, v in created_items.items():
+ if not v: # skip already deleted
+ continue
+
+ try:
+ k_item, k_id = self._get_item_name_id(k)
+ if k_item == "port":
+ self._delete_ports_by_id_wth_neutron(k_id)
+
+ except Exception as e:
+ self.logger.error(
+ "Error deleting port: {}: {}".format(type(e).__name__, e)
+ )
+
+ def _delete_created_items(
+ self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
+ ) -> bool:
+ """Delete Volumes and floating ip if they exist in created_items."""
+ for k, v in created_items.items():
+ if not v: # skip already deleted
+ continue
+
+ try:
+ k_item, k_id = self._get_item_name_id(k)
+
+ if k_item == "volume":
+ unavailable_vol = self._delete_volumes_by_id_wth_cinder(
+ k, k_id, volumes_to_hold, created_items
+ )
+
+ if unavailable_vol:
+ keep_waiting = True
+
+ elif k_item == "floating_ip":
+ self._delete_floating_ip_by_id(k, k_id, created_items)
+
+ except Exception as e:
+ self.logger.error("Error deleting {}: {}".format(k, e))
+
+ return keep_waiting
+
+ @staticmethod
+ def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
+ """Remove the volumes which has key flag from created_items
+
+ Args:
+ created_items (dict): All created items belongs to VM
+
+ Returns:
+ created_items (dict): Persistent volumes eliminated created_items
+ """
+ return {
+ key: value
+ for (key, value) in created_items.items()
+ if len(key.split(":")) == 2
+ }
+
+ def delete_vminstance(
+ self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
+ ) -> None:
+ """Removes a VM instance from VIM. Returns the old identifier.
+ Args:
+ vm_id (str): Identifier of VM instance
+ created_items (dict): All created items belongs to VM
+ volumes_to_hold (list): Volumes_to_hold
+ """
if created_items is None:
created_items = {}
+ if volumes_to_hold is None:
+ volumes_to_hold = []
try:
- self._reload_connection()
- # delete VM ports attached to this networks before the virtual machine
- for k, v in created_items.items():
- if not v: # skip already deleted
- continue
+ created_items = self._extract_items_wth_keep_flag_from_created_items(
+ created_items
+ )
- try:
- k_item, _, k_id = k.partition(":")
- if k_item == "port":
- port_dict = self.neutron.list_ports()
- existing_ports = [
- port["id"] for port in port_dict["ports"] if port_dict
- ]
- if k_id in existing_ports:
- self.neutron.delete_port(k_id)
- except Exception as e:
- self.logger.error(
- "Error deleting port: {}: {}".format(type(e).__name__, e)
- )
+ self._reload_connection()
- # #commented because detaching the volumes makes the servers.delete not work properly ?!?
- # #dettach volumes attached
- # server = self.nova.servers.get(vm_id)
- # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"] #volume["id"]
- # #for volume in volumes_attached_dict:
- # # self.cinder.volumes.detach(volume["id"])
+ # Delete VM ports attached to the networks before the virtual machine
+ if created_items:
+ self._delete_vm_ports_attached_to_network(created_items)
if vm_id:
self.nova.servers.delete(vm_id)
- # delete volumes. Although having detached, they should have in active status before deleting
- # we ensure in this loop
+ # Although having detached, volumes should have in active status before deleting.
+ # We ensure in this loop
keep_waiting = True
elapsed_time = 0
while keep_waiting and elapsed_time < volume_timeout:
keep_waiting = False
- for k, v in created_items.items():
- if not v: # skip already deleted
- continue
-
- try:
- k_item, _, k_id = k.partition(":")
- if k_item == "volume":
- if self.cinder.volumes.get(k_id).status != "available":
- keep_waiting = True
- else:
- if k_id not in volumes_to_hold:
- self.cinder.volumes.delete(k_id)
- created_items[k] = None
- elif k_item == "floating_ip": # floating ip
- self.neutron.delete_floatingip(k_id)
- created_items[k] = None
-
- except Exception as e:
- self.logger.error("Error deleting {}: {}".format(k, e))
+ # Delete volumes and floating IP.
+ keep_waiting = self._delete_created_items(
+ created_items, volumes_to_hold, keep_waiting
+ )
if keep_waiting:
time.sleep(1)
elapsed_time += 1
- return None
except (
nvExceptions.NotFound,
ksExceptions.ClientException,
def action_vminstance(self, vm_id, action_dict, created_items={}):
"""Send and action over a VM instance from VIM
- Returns None or the console dict if the action was successfully sent to the VIM"""
+ Returns None or the console dict if the action was successfully sent to the VIM
+ """
self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
try:
)
)
- # NOT USED FUNCTIONS
-
- def new_external_port(self, port_data):
- """Adds a external port to VIM
- Returns the port identifier"""
- # TODO openstack if needed
- return (
- -vimconn.HTTP_Internal_Server_Error,
- "osconnector.new_external_port() not implemented",
- )
-
- def connect_port_network(self, port_id, network_id, admin=False):
- """Connects a external port to a network
- Returns status code of the VIM response"""
- # TODO openstack if needed
- return (
- -vimconn.HTTP_Internal_Server_Error,
- "osconnector.connect_port_network() not implemented",
- )
-
- def new_user(self, user_name, user_passwd, tenant_id=None):
- """Adds a new user to openstack VIM
- Returns the user identifier"""
- self.logger.debug("osconnector: Adding a new user to VIM")
-
- try:
- self._reload_connection()
- user = self.keystone.users.create(
- user_name, password=user_passwd, default_project=tenant_id
- )
- # self.keystone.tenants.add_user(self.k_creds["username"], #role)
-
- return user.id
- except ksExceptions.ConnectionError as e:
- error_value = -vimconn.HTTP_Bad_Request
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
- )
- except ksExceptions.ClientException as e: # TODO remove
- error_value = -vimconn.HTTP_Bad_Request
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
- )
-
- # TODO insert exception vimconn.HTTP_Unauthorized
- # if reaching here is because an exception
- self.logger.debug("new_user " + error_text)
-
- return error_value, error_text
-
- def delete_user(self, user_id):
- """Delete a user from openstack VIM
- Returns the user identifier"""
- if self.debug:
- print("osconnector: Deleting a user from VIM")
-
- try:
- self._reload_connection()
- self.keystone.users.delete(user_id)
-
- return 1, user_id
- except ksExceptions.ConnectionError as e:
- error_value = -vimconn.HTTP_Bad_Request
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
- )
- except ksExceptions.NotFound as e:
- error_value = -vimconn.HTTP_Not_Found
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
- )
- except ksExceptions.ClientException as e: # TODO remove
- error_value = -vimconn.HTTP_Bad_Request
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
- )
-
- # TODO insert exception vimconn.HTTP_Unauthorized
- # if reaching here is because an exception
- self.logger.debug("delete_tenant " + error_text)
-
- return error_value, error_text
-
def get_hosts_info(self):
"""Get the information of deployed hosts
Returns the hosts content"""
return error_value, error_text
- def new_classification(self, name, ctype, definition):
- self.logger.debug(
- "Adding a new (Traffic) Classification to VIM, named %s", name
- )
-
- try:
- new_class = None
- self._reload_connection()
-
- if ctype not in supportedClassificationTypes:
- raise vimconn.VimConnNotSupportedException(
- "OpenStack VIM connector does not support provided "
- "Classification Type {}, supported ones are: {}".format(
- ctype, supportedClassificationTypes
- )
- )
-
- if not self._validate_classification(ctype, definition):
- raise vimconn.VimConnException(
- "Incorrect Classification definition for the type specified."
- )
-
- classification_dict = definition
- classification_dict["name"] = name
- new_class = self.neutron.create_sfc_flow_classifier(
- {"flow_classifier": classification_dict}
- )
-
- return new_class["flow_classifier"]["id"]
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self.logger.error("Creation of Classification failed.")
- self._format_exception(e)
-
- def get_classification(self, class_id):
- self.logger.debug(" Getting Classification %s from VIM", class_id)
- filter_dict = {"id": class_id}
- class_list = self.get_classification_list(filter_dict)
-
- if len(class_list) == 0:
- raise vimconn.VimConnNotFoundException(
- "Classification '{}' not found".format(class_id)
- )
- elif len(class_list) > 1:
- raise vimconn.VimConnConflictException(
- "Found more than one Classification with this criteria"
- )
-
- classification = class_list[0]
-
- return classification
-
- def get_classification_list(self, filter_dict={}):
- self.logger.debug(
- "Getting Classifications from VIM filter: '%s'", str(filter_dict)
- )
-
- try:
- filter_dict_os = filter_dict.copy()
- self._reload_connection()
-
- if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
- classification_dict = self.neutron.list_sfc_flow_classifiers(
- **filter_dict_os
- )
- classification_list = classification_dict["flow_classifiers"]
- self.__classification_os2mano(classification_list)
-
- return classification_list
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
-
- def delete_classification(self, class_id):
- self.logger.debug("Deleting Classification '%s' from VIM", class_id)
-
- try:
- self._reload_connection()
- self.neutron.delete_sfc_flow_classifier(class_id)
-
- return class_id
- except (
- neExceptions.ConnectionFailed,
- neExceptions.NeutronException,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
-
- def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
- self.logger.debug(
- "Adding a new Service Function Instance to VIM, named '%s'", name
- )
-
- try:
- new_sfi = None
- self._reload_connection()
- correlation = None
-
- if sfc_encap:
- correlation = "nsh"
-
- if len(ingress_ports) != 1:
- raise vimconn.VimConnNotSupportedException(
- "OpenStack VIM connector can only have 1 ingress port per SFI"
- )
-
- if len(egress_ports) != 1:
- raise vimconn.VimConnNotSupportedException(
- "OpenStack VIM connector can only have 1 egress port per SFI"
- )
-
- sfi_dict = {
- "name": name,
- "ingress": ingress_ports[0],
- "egress": egress_ports[0],
- "service_function_parameters": {"correlation": correlation},
- }
- new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
-
- return new_sfi["port_pair"]["id"]
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- if new_sfi:
- try:
- self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
- except Exception:
- self.logger.error(
- "Creation of Service Function Instance failed, with "
- "subsequent deletion failure as well."
- )
-
- self._format_exception(e)
-
- def get_sfi(self, sfi_id):
- self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
- filter_dict = {"id": sfi_id}
- sfi_list = self.get_sfi_list(filter_dict)
-
- if len(sfi_list) == 0:
- raise vimconn.VimConnNotFoundException(
- "Service Function Instance '{}' not found".format(sfi_id)
- )
- elif len(sfi_list) > 1:
- raise vimconn.VimConnConflictException(
- "Found more than one Service Function Instance with this criteria"
- )
-
- sfi = sfi_list[0]
-
- return sfi
-
- def get_sfi_list(self, filter_dict={}):
- self.logger.debug(
- "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
- )
-
- try:
- self._reload_connection()
- filter_dict_os = filter_dict.copy()
-
- if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
- sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
- sfi_list = sfi_dict["port_pairs"]
- self.__sfi_os2mano(sfi_list)
-
- return sfi_list
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
-
- def delete_sfi(self, sfi_id):
- self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
-
- try:
- self._reload_connection()
- self.neutron.delete_sfc_port_pair(sfi_id)
-
- return sfi_id
- except (
- neExceptions.ConnectionFailed,
- neExceptions.NeutronException,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
-
- def new_sf(self, name, sfis, sfc_encap=True):
- self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
-
- try:
- new_sf = None
- self._reload_connection()
- # correlation = None
- # if sfc_encap:
- # correlation = "nsh"
-
- for instance in sfis:
- sfi = self.get_sfi(instance)
-
- if sfi.get("sfc_encap") != sfc_encap:
- raise vimconn.VimConnNotSupportedException(
- "OpenStack VIM connector requires all SFIs of the "
- "same SF to share the same SFC Encapsulation"
- )
-
- sf_dict = {"name": name, "port_pairs": sfis}
- new_sf = self.neutron.create_sfc_port_pair_group(
- {"port_pair_group": sf_dict}
- )
-
- return new_sf["port_pair_group"]["id"]
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- if new_sf:
- try:
- self.neutron.delete_sfc_port_pair_group(
- new_sf["port_pair_group"]["id"]
- )
- except Exception:
- self.logger.error(
- "Creation of Service Function failed, with "
- "subsequent deletion failure as well."
- )
-
- self._format_exception(e)
-
- def get_sf(self, sf_id):
- self.logger.debug("Getting Service Function %s from VIM", sf_id)
- filter_dict = {"id": sf_id}
- sf_list = self.get_sf_list(filter_dict)
-
- if len(sf_list) == 0:
- raise vimconn.VimConnNotFoundException(
- "Service Function '{}' not found".format(sf_id)
- )
- elif len(sf_list) > 1:
- raise vimconn.VimConnConflictException(
- "Found more than one Service Function with this criteria"
- )
-
- sf = sf_list[0]
-
- return sf
-
- def get_sf_list(self, filter_dict={}):
- self.logger.debug(
- "Getting Service Function from VIM filter: '%s'", str(filter_dict)
- )
-
- try:
- self._reload_connection()
- filter_dict_os = filter_dict.copy()
-
- if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
- sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
- sf_list = sf_dict["port_pair_groups"]
- self.__sf_os2mano(sf_list)
-
- return sf_list
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
-
- def delete_sf(self, sf_id):
- self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
-
- try:
- self._reload_connection()
- self.neutron.delete_sfc_port_pair_group(sf_id)
-
- return sf_id
- except (
- neExceptions.ConnectionFailed,
- neExceptions.NeutronException,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
-
- def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
- self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
-
- try:
- new_sfp = None
- self._reload_connection()
- # In networking-sfc the MPLS encapsulation is legacy
- # should be used when no full SFC Encapsulation is intended
- correlation = "mpls"
-
- if sfc_encap:
- correlation = "nsh"
-
- sfp_dict = {
- "name": name,
- "flow_classifiers": classifications,
- "port_pair_groups": sfs,
- "chain_parameters": {"correlation": correlation},
- }
-
- if spi:
- sfp_dict["chain_id"] = spi
-
- new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
-
- return new_sfp["port_chain"]["id"]
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- if new_sfp:
- try:
- self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
- except Exception:
- self.logger.error(
- "Creation of Service Function Path failed, with "
- "subsequent deletion failure as well."
- )
-
- self._format_exception(e)
-
- def get_sfp(self, sfp_id):
- self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
-
- filter_dict = {"id": sfp_id}
- sfp_list = self.get_sfp_list(filter_dict)
-
- if len(sfp_list) == 0:
- raise vimconn.VimConnNotFoundException(
- "Service Function Path '{}' not found".format(sfp_id)
- )
- elif len(sfp_list) > 1:
- raise vimconn.VimConnConflictException(
- "Found more than one Service Function Path with this criteria"
- )
-
- sfp = sfp_list[0]
-
- return sfp
-
- def get_sfp_list(self, filter_dict={}):
- self.logger.debug(
- "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
- )
-
- try:
- self._reload_connection()
- filter_dict_os = filter_dict.copy()
-
- if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
- sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
- sfp_list = sfp_dict["port_chains"]
- self.__sfp_os2mano(sfp_list)
-
- return sfp_list
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
-
- def delete_sfp(self, sfp_id):
- self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
-
- try:
- self._reload_connection()
- self.neutron.delete_sfc_port_chain(sfp_id)
-
- return sfp_id
- except (
- neExceptions.ConnectionFailed,
- neExceptions.NeutronException,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
-
- def refresh_sfps_status(self, sfp_list):
- """Get the status of the service function path
- Params: the list of sfp identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function path
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
- """
- sfp_dict = {}
- self.logger.debug(
- "refresh_sfps status: Getting tenant SFP information from VIM"
- )
-
- for sfp_id in sfp_list:
- sfp = {}
-
- try:
- sfp_vim = self.get_sfp(sfp_id)
-
- if sfp_vim["spi"]:
- sfp["status"] = vmStatus2manoFormat["ACTIVE"]
- else:
- sfp["status"] = "OTHER"
- sfp["error_msg"] = "VIM status reported " + sfp["status"]
-
- sfp["vim_info"] = self.serialize(sfp_vim)
-
- if sfp_vim.get("fault"):
- sfp["error_msg"] = str(sfp_vim["fault"])
- except vimconn.VimConnNotFoundException as e:
- self.logger.error("Exception getting sfp status: %s", str(e))
- sfp["status"] = "DELETED"
- sfp["error_msg"] = str(e)
- except vimconn.VimConnException as e:
- self.logger.error("Exception getting sfp status: %s", str(e))
- sfp["status"] = "VIM_ERROR"
- sfp["error_msg"] = str(e)
-
- sfp_dict[sfp_id] = sfp
-
- return sfp_dict
-
- def refresh_sfis_status(self, sfi_list):
- """Get the status of the service function instances
- Params: the list of sfi identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function instance
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- """
- sfi_dict = {}
- self.logger.debug(
- "refresh_sfis status: Getting tenant sfi information from VIM"
- )
-
- for sfi_id in sfi_list:
- sfi = {}
-
- try:
- sfi_vim = self.get_sfi(sfi_id)
-
- if sfi_vim:
- sfi["status"] = vmStatus2manoFormat["ACTIVE"]
- else:
- sfi["status"] = "OTHER"
- sfi["error_msg"] = "VIM status reported " + sfi["status"]
-
- sfi["vim_info"] = self.serialize(sfi_vim)
-
- if sfi_vim.get("fault"):
- sfi["error_msg"] = str(sfi_vim["fault"])
- except vimconn.VimConnNotFoundException as e:
- self.logger.error("Exception getting sfi status: %s", str(e))
- sfi["status"] = "DELETED"
- sfi["error_msg"] = str(e)
- except vimconn.VimConnException as e:
- self.logger.error("Exception getting sfi status: %s", str(e))
- sfi["status"] = "VIM_ERROR"
- sfi["error_msg"] = str(e)
-
- sfi_dict[sfi_id] = sfi
-
- return sfi_dict
-
- def refresh_sfs_status(self, sf_list):
- """Get the status of the service functions
- Params: the list of sf identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- """
- sf_dict = {}
- self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
-
- for sf_id in sf_list:
- sf = {}
-
- try:
- sf_vim = self.get_sf(sf_id)
-
- if sf_vim:
- sf["status"] = vmStatus2manoFormat["ACTIVE"]
- else:
- sf["status"] = "OTHER"
- sf["error_msg"] = "VIM status reported " + sf_vim["status"]
-
- sf["vim_info"] = self.serialize(sf_vim)
-
- if sf_vim.get("fault"):
- sf["error_msg"] = str(sf_vim["fault"])
- except vimconn.VimConnNotFoundException as e:
- self.logger.error("Exception getting sf status: %s", str(e))
- sf["status"] = "DELETED"
- sf["error_msg"] = str(e)
- except vimconn.VimConnException as e:
- self.logger.error("Exception getting sf status: %s", str(e))
- sf["status"] = "VIM_ERROR"
- sf["error_msg"] = str(e)
-
- sf_dict[sf_id] = sf
-
- return sf_dict
-
- def refresh_classifications_status(self, classification_list):
- """Get the status of the classifications
- Params: the list of classification identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this classifier
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- """
- classification_dict = {}
- self.logger.debug(
- "refresh_classifications status: Getting tenant classification information from VIM"
- )
-
- for classification_id in classification_list:
- classification = {}
-
- try:
- classification_vim = self.get_classification(classification_id)
-
- if classification_vim:
- classification["status"] = vmStatus2manoFormat["ACTIVE"]
- else:
- classification["status"] = "OTHER"
- classification["error_msg"] = (
- "VIM status reported " + classification["status"]
- )
-
- classification["vim_info"] = self.serialize(classification_vim)
-
- if classification_vim.get("fault"):
- classification["error_msg"] = str(classification_vim["fault"])
- except vimconn.VimConnNotFoundException as e:
- self.logger.error("Exception getting classification status: %s", str(e))
- classification["status"] = "DELETED"
- classification["error_msg"] = str(e)
- except vimconn.VimConnException as e:
- self.logger.error("Exception getting classification status: %s", str(e))
- classification["status"] = "VIM_ERROR"
- classification["error_msg"] = str(e)
-
- classification_dict[classification_id] = classification
-
- return classification_dict
-
def new_affinity_group(self, affinity_group_data):
"""Adds a server group to VIM
affinity_group_data contains a dictionary with information, keys:
self.__wait_for_vm(vm_id, "ACTIVE")
instance_status = self.get_vdu_state(vm_id)[0]
return instance_status
+
+ def get_monitoring_data(self):
+ try:
+ self.logger.debug("Getting servers and ports data from Openstack VIMs.")
+ self._reload_connection()
+ all_servers = self.nova.servers.list(detailed=True)
+ all_ports = self.neutron.list_ports()
+ return all_servers, all_ports
+ except (
+ vimconn.VimConnException,
+ vimconn.VimConnNotFoundException,
+ vimconn.VimConnConnectionException,
+ ) as e:
+ raise vimconn.VimConnException(
+ f"Exception in monitoring while getting VMs and ports status: {str(e)}"
+ )