- Service Function Path (OSM) -> Port Chain (Neutron)
"""
-from osm_ro_plugin import vimconn
-
-# import json
+import copy
+from http.client import HTTPException
+import json
import logging
-import netaddr
-import time
-import yaml
+from pprint import pformat
import random
import re
-import copy
-from pprint import pformat
-from novaclient import client as nClient, exceptions as nvExceptions
-from keystoneauth1.identity import v2, v3
+import time
+from typing import Dict, List, Optional, Tuple
+
+from cinderclient import client as cClient
+from glanceclient import client as glClient
+import glanceclient.exc as gl1Exceptions
from keystoneauth1 import session
+from keystoneauth1.identity import v2, v3
import keystoneclient.exceptions as ksExceptions
-import keystoneclient.v3.client as ksClient_v3
import keystoneclient.v2_0.client as ksClient_v2
-from glanceclient import client as glClient
-import glanceclient.exc as gl1Exceptions
-from cinderclient import client as cClient
-
-# TODO py3 check that this base exception matches python2 httplib.HTTPException
-from http.client import HTTPException
-from neutronclient.neutron import client as neClient
+import keystoneclient.v3.client as ksClient_v3
+import netaddr
from neutronclient.common import exceptions as neExceptions
+from neutronclient.neutron import client as neClient
+from novaclient import client as nClient, exceptions as nvExceptions
+from osm_ro_plugin import vimconn
from requests.exceptions import ConnectionError
+import yaml
__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
__date__ = "$22-sep-2017 23:59:59$"
endpoint_type=self.endpoint_type,
region_name=region_name,
)
- self.cinder = self.session["cinder"] = cClient.Client(
- 2,
- session=sess,
- endpoint_type=self.endpoint_type,
- region_name=region_name,
- )
+
+ if sess.get_all_version_data(service_type="volumev2"):
+ self.cinder = self.session["cinder"] = cClient.Client(
+ 2,
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
+ else:
+ self.cinder = self.session["cinder"] = cClient.Client(
+ 3,
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
try:
self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
self._reload_connection()
network_dict = {"name": net_name, "admin_state_up": True}
- if net_type in ("data", "ptp"):
+ if net_type in ("data", "ptp") or provider_network_profile:
provider_physical_network = None
if provider_network_profile and provider_network_profile.get(
if self.config.get("disable_network_port_security"):
network_dict["port_security_enabled"] = False
+ if self.config.get("neutron_availability_zone_hints"):
+ hints = self.config.get("neutron_availability_zone_hints")
+
+ if isinstance(hints, str):
+ hints = [hints]
+
+ network_dict["availability_zone_hints"] = hints
+
new_net = self.neutron.create_network({"network": network_dict})
# print new_net
# create subnetwork, even if there is no profile
if not ip_profile.get("subnet_address"):
# Fake subnet is required
- subnet_rand = random.randint(0, 255)
+ subnet_rand = random.SystemRandom().randint(0, 255)
ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
if "ip_version" not in ip_profile:
ip_str = str(netaddr.IPAddress(ip_int))
subnet["allocation_pools"][0]["end"] = ip_str
+ if (
+ ip_profile.get("ipv6_address_mode")
+ and ip_profile["ip_version"] != "IPv4"
+ ):
+ subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
+ # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
+ # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
+ subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
+
# self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
self.neutron.create_subnet({"subnet": subnet})
flavor_dict["ram"],
flavor_dict["vcpus"],
flavor_dict["disk"],
+ flavor_dict.get("ephemeral", 0),
+ flavor_dict.get("swap", 0),
)
# numa=None
extended = flavor_dict.get("extended", {})
continue
# TODO
- flavor_data = (flavor.ram, flavor.vcpus, flavor.disk)
+ flavor_data = (
+ flavor.ram,
+ flavor.vcpus,
+ flavor.disk,
+ flavor.ephemeral,
+ flavor.swap if isinstance(flavor.swap, int) else 0,
+ )
if flavor_data == flavor_target:
return flavor.id
elif (
) as e:
self._format_exception(e)
- def process_resource_quota(self, quota, prefix, extra_specs):
- """
- :param prefix:
- :param extra_specs:
- :return:
+ @staticmethod
+ def process_resource_quota(quota: dict, prefix: str, extra_specs: dict) -> None:
+ """Process resource quota and fill up extra_specs.
+ Args:
+ quota (dict): Keeping the quota of resurces
+ prefix (str) Prefix
+ extra_specs (dict) Dict to be filled to be used during flavor creation
+
"""
if "limit" in quota:
extra_specs["quota:" + prefix + "_limit"] = quota["limit"]
extra_specs["quota:" + prefix + "_shares_level"] = "custom"
extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"]
- def new_flavor(self, flavor_data, change_name_if_used=True):
- """Adds a tenant flavor to openstack VIM
- if change_name_if_used is True, it will change name in case of conflict, because it is not supported name
- repetition
- Returns the flavor identifier
+ @staticmethod
+ def process_numa_memory(
+ numa: dict, node_id: Optional[int], extra_specs: dict
+ ) -> None:
+ """Set the memory in extra_specs.
+ Args:
+ numa (dict): A dictionary which includes numa information
+ node_id (int): ID of numa node
+ extra_specs (dict): To be filled.
+
+ """
+ if not numa.get("memory"):
+ return
+ memory_mb = numa["memory"] * 1024
+ memory = "hw:numa_mem.{}".format(node_id)
+ extra_specs[memory] = int(memory_mb)
+
+ @staticmethod
+ def process_numa_vcpu(numa: dict, node_id: int, extra_specs: dict) -> None:
+ """Set the cpu in extra_specs.
+ Args:
+ numa (dict): A dictionary which includes numa information
+ node_id (int): ID of numa node
+ extra_specs (dict): To be filled.
+
+ """
+ if not numa.get("vcpu"):
+ return
+ vcpu = numa["vcpu"]
+ cpu = "hw:numa_cpus.{}".format(node_id)
+ vcpu = ",".join(map(str, vcpu))
+ extra_specs[cpu] = vcpu
+
+ @staticmethod
+ def process_numa_paired_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+ """Fill up extra_specs if numa has paired-threads.
+ Args:
+ numa (dict): A dictionary which includes numa information
+ extra_specs (dict): To be filled.
+
+ Returns:
+ threads (int) Number of virtual cpus
+
+ """
+ if not numa.get("paired-threads"):
+ return
+
+ # cpu_thread_policy "require" implies that compute node must have an STM architecture
+ threads = numa["paired-threads"] * 2
+ extra_specs["hw:cpu_thread_policy"] = "require"
+ extra_specs["hw:cpu_policy"] = "dedicated"
+ return threads
+
+ @staticmethod
+ def process_numa_cores(numa: dict, extra_specs: dict) -> Optional[int]:
+ """Fill up extra_specs if numa has cores.
+ Args:
+ numa (dict): A dictionary which includes numa information
+ extra_specs (dict): To be filled.
+
+ Returns:
+ cores (int) Number of virtual cpus
+
+ """
+ # cpu_thread_policy "isolate" implies that the host must not have an SMT
+ # architecture, or a non-SMT architecture will be emulated
+ if not numa.get("cores"):
+ return
+ cores = numa["cores"]
+ extra_specs["hw:cpu_thread_policy"] = "isolate"
+ extra_specs["hw:cpu_policy"] = "dedicated"
+ return cores
+
+ @staticmethod
+ def process_numa_threads(numa: dict, extra_specs: dict) -> Optional[int]:
+ """Fill up extra_specs if numa has threads.
+ Args:
+ numa (dict): A dictionary which includes numa information
+ extra_specs (dict): To be filled.
+
+ Returns:
+ threads (int) Number of virtual cpus
+
+ """
+ # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+ if not numa.get("threads"):
+ return
+ threads = numa["threads"]
+ extra_specs["hw:cpu_thread_policy"] = "prefer"
+ extra_specs["hw:cpu_policy"] = "dedicated"
+ return threads
+
+ def _process_numa_parameters_of_flavor(
+ self, numas: List, extra_specs: Dict
+ ) -> None:
+ """Process numa parameters and fill up extra_specs.
+
+ Args:
+ numas (list): List of dictionary which includes numa information
+ extra_specs (dict): To be filled.
+
+ """
+ numa_nodes = len(numas)
+ extra_specs["hw:numa_nodes"] = str(numa_nodes)
+ cpu_cores, cpu_threads = 0, 0
+
+ if self.vim_type == "VIO":
+ self.process_vio_numa_nodes(numa_nodes, extra_specs)
+
+ for numa in numas:
+ if "id" in numa:
+ node_id = numa["id"]
+ # overwrite ram and vcpus
+ # check if key "memory" is present in numa else use ram value at flavor
+ self.process_numa_memory(numa, node_id, extra_specs)
+ self.process_numa_vcpu(numa, node_id, extra_specs)
+
+ # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
+ extra_specs["hw:cpu_sockets"] = str(numa_nodes)
+
+ if "paired-threads" in numa:
+ threads = self.process_numa_paired_threads(numa, extra_specs)
+ cpu_threads += threads
+
+ elif "cores" in numa:
+ cores = self.process_numa_cores(numa, extra_specs)
+ cpu_cores += cores
+
+ elif "threads" in numa:
+ threads = self.process_numa_threads(numa, extra_specs)
+ cpu_threads += threads
+
+ if cpu_cores:
+ extra_specs["hw:cpu_cores"] = str(cpu_cores)
+ if cpu_threads:
+ extra_specs["hw:cpu_threads"] = str(cpu_threads)
+
+ @staticmethod
+ def process_vio_numa_nodes(numa_nodes: int, extra_specs: Dict) -> None:
+ """According to number of numa nodes, updates the extra_specs for VIO.
+
+ Args:
+
+ numa_nodes (int): List keeps the numa node numbers
+ extra_specs (dict): Extra specs dict to be updated
+
+ """
+ # If there are several numas, we do not define specific affinity.
+ extra_specs["vmware:latency_sensitivity_level"] = "high"
+
+ def _change_flavor_name(
+ self, name: str, name_suffix: int, flavor_data: dict
+ ) -> str:
+ """Change the flavor name if the name already exists.
+
+ Args:
+ name (str): Flavor name to be checked
+ name_suffix (int): Suffix to be appended to name
+ flavor_data (dict): Flavor dict
+
+ Returns:
+ name (str): New flavor name to be used
+
+ """
+ # Get used names
+ fl = self.nova.flavors.list()
+ fl_names = [f.name for f in fl]
+
+ while name in fl_names:
+ name_suffix += 1
+ name = flavor_data["name"] + "-" + str(name_suffix)
+
+ return name
+
+ def _process_extended_config_of_flavor(
+ self, extended: dict, extra_specs: dict
+ ) -> None:
+ """Process the extended dict to fill up extra_specs.
+ Args:
+
+ extended (dict): Keeping the extra specification of flavor
+ extra_specs (dict) Dict to be filled to be used during flavor creation
+
+ """
+ quotas = {
+ "cpu-quota": "cpu",
+ "mem-quota": "memory",
+ "vif-quota": "vif",
+ "disk-io-quota": "disk_io",
+ }
+
+ page_sizes = {
+ "LARGE": "large",
+ "SMALL": "small",
+ "SIZE_2MB": "2MB",
+ "SIZE_1GB": "1GB",
+ "PREFER_LARGE": "any",
+ }
+
+ policies = {
+ "cpu-pinning-policy": "hw:cpu_policy",
+ "cpu-thread-pinning-policy": "hw:cpu_thread_policy",
+ "mem-policy": "hw:numa_mempolicy",
+ }
+
+ numas = extended.get("numas")
+ if numas:
+ self._process_numa_parameters_of_flavor(numas, extra_specs)
+
+ for quota, item in quotas.items():
+ if quota in extended.keys():
+ self.process_resource_quota(extended.get(quota), item, extra_specs)
+
+ # Set the mempage size as specified in the descriptor
+ if extended.get("mempage-size"):
+ if extended["mempage-size"] in page_sizes.keys():
+ extra_specs["hw:mem_page_size"] = page_sizes[extended["mempage-size"]]
+ else:
+ # Normally, validations in NBI should not allow to this condition.
+ self.logger.debug(
+ "Invalid mempage-size %s. Will be ignored",
+ extended.get("mempage-size"),
+ )
+
+ for policy, hw_policy in policies.items():
+ if extended.get(policy):
+ extra_specs[hw_policy] = extended[policy].lower()
+
+ @staticmethod
+ def _get_flavor_details(flavor_data: dict) -> Tuple:
+ """Returns the details of flavor
+ Args:
+ flavor_data (dict): Dictionary that includes required flavor details
+
+ Returns:
+ ram, vcpus, extra_specs, extended (tuple): Main items of required flavor
+
+ """
+ return (
+ flavor_data.get("ram", 64),
+ flavor_data.get("vcpus", 1),
+ {},
+ flavor_data.get("extended"),
+ )
+
+ def new_flavor(self, flavor_data: dict, change_name_if_used: bool = True) -> str:
+ """Adds a tenant flavor to openstack VIM.
+ if change_name_if_used is True, it will change name in case of conflict,
+ because it is not supported name repetition.
+
+ Args:
+ flavor_data (dict): Flavor details to be processed
+ change_name_if_used (bool): Change name in case of conflict
+
+ Returns:
+ flavor_id (str): flavor identifier
+
"""
self.logger.debug("Adding flavor '%s'", str(flavor_data))
retry = 0
self._reload_connection()
if change_name_if_used:
- # get used names
- fl_names = []
- fl = self.nova.flavors.list()
-
- for f in fl:
- fl_names.append(f.name)
-
- while name in fl_names:
- name_suffix += 1
- name = flavor_data["name"] + "-" + str(name_suffix)
+ name = self._change_flavor_name(name, name_suffix, flavor_data)
- ram = flavor_data.get("ram", 64)
- vcpus = flavor_data.get("vcpus", 1)
- extra_specs = {}
-
- extended = flavor_data.get("extended")
+ ram, vcpus, extra_specs, extended = self._get_flavor_details(
+ flavor_data
+ )
if extended:
- numas = extended.get("numas")
-
- if numas:
- numa_nodes = len(numas)
-
- if numa_nodes > 1:
- return -1, "Can not add flavor with more than one numa"
-
- extra_specs["hw:numa_nodes"] = str(numa_nodes)
- extra_specs["hw:mem_page_size"] = "large"
- extra_specs["hw:cpu_policy"] = "dedicated"
- extra_specs["hw:numa_mempolicy"] = "strict"
-
- if self.vim_type == "VIO":
- extra_specs[
- "vmware:extra_config"
- ] = '{"numa.nodeAffinity":"0"}'
- extra_specs["vmware:latency_sensitivity_level"] = "high"
-
- for numa in numas:
- # overwrite ram and vcpus
- # check if key "memory" is present in numa else use ram value at flavor
- if "memory" in numa:
- ram = numa["memory"] * 1024
- # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
- # implemented/virt-driver-cpu-thread-pinning.html
- extra_specs["hw:cpu_sockets"] = 1
-
- if "paired-threads" in numa:
- vcpus = numa["paired-threads"] * 2
- # cpu_thread_policy "require" implies that the compute node must have an
- # STM architecture
- extra_specs["hw:cpu_thread_policy"] = "require"
- extra_specs["hw:cpu_policy"] = "dedicated"
- elif "cores" in numa:
- vcpus = numa["cores"]
- # cpu_thread_policy "prefer" implies that the host must not have an SMT
- # architecture, or a non-SMT architecture will be emulated
- extra_specs["hw:cpu_thread_policy"] = "isolate"
- extra_specs["hw:cpu_policy"] = "dedicated"
- elif "threads" in numa:
- vcpus = numa["threads"]
- # cpu_thread_policy "prefer" implies that the host may or may not have an SMT
- # architecture
- extra_specs["hw:cpu_thread_policy"] = "prefer"
- extra_specs["hw:cpu_policy"] = "dedicated"
- # for interface in numa.get("interfaces",() ):
- # if interface["dedicated"]=="yes":
- # raise vimconn.VimConnException("Passthrough interfaces are not supported
- # for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
- # #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"'
- # when a way to connect it is available
- elif extended.get("cpu-quota"):
- self.process_resource_quota(
- extended.get("cpu-quota"), "cpu", extra_specs
- )
-
- if extended.get("mem-quota"):
- self.process_resource_quota(
- extended.get("mem-quota"), "memory", extra_specs
- )
-
- if extended.get("vif-quota"):
- self.process_resource_quota(
- extended.get("vif-quota"), "vif", extra_specs
- )
+ self._process_extended_config_of_flavor(extended, extra_specs)
- if extended.get("disk-io-quota"):
- self.process_resource_quota(
- extended.get("disk-io-quota"), "disk_io", extra_specs
- )
+ # Create flavor
- # create flavor
new_flavor = self.nova.flavors.create(
- name,
- ram,
- vcpus,
- flavor_data.get("disk", 0),
+ name=name,
+ ram=ram,
+ vcpus=vcpus,
+ disk=flavor_data.get("disk", 0),
+ ephemeral=flavor_data.get("ephemeral", 0),
+ swap=flavor_data.get("swap", 0),
is_public=flavor_data.get("is_public", True),
)
- # add metadata
+
+ # Add metadata
if extra_specs:
new_flavor.set_keys(extra_specs)
return new_flavor.id
+
except nvExceptions.Conflict as e:
if change_name_if_used and retry < max_retries:
continue
self._format_exception(e)
- # except nvExceptions.BadRequest as e:
+
except (
ksExceptions.ClientException,
nvExceptions.ClientException,
"No enough availability zones at VIM for this deployment"
)
- def new_vminstance(
- self,
- name,
- description,
- start,
- image_id,
- flavor_id,
- net_list,
- cloud_config=None,
- disk_list=None,
- availability_zone_index=None,
- availability_zone_list=None,
- ):
- """Adds a VM instance to VIM
- Params:
- start: indicates if VM must start or boot in pause mode. Ignored
- image_id,flavor_id: iamge and flavor uuid
- net_list: list of interfaces, each one is a dictionary with:
- name:
- net_id: network uuid to connect
- vpci: virtual vcpi to assign, ignored because openstack lack #TODO
- model: interface model, ignored #TODO
- mac_address: used for SR-IOV ifaces #TODO for other types
- use: 'data', 'bridge', 'mgmt'
- type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
- vim_id: filled/added by this function
- floating_ip: True/False (or it can be None)
- port_security: True/False
- 'cloud_config': (optional) dictionary with:
- 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
- 'users': (optional) list of users to be inserted, each item is a dict with:
- 'name': (mandatory) user name,
- 'key-pairs': (optional) list of strings with the public key to be inserted to the user
- 'user-data': (optional) string is a text script to be passed directly to cloud-init
- 'config-files': (optional). List of files to be transferred. Each item is a dict with:
- 'dest': (mandatory) string with the destination absolute path
- 'encoding': (optional, by default text). Can be one of:
- 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
- 'content' (mandatory): string with the content of the file
- 'permissions': (optional) string with file permissions, typically octal notation '0644'
- 'owner': (optional) file owner, string with the format 'owner:group'
- 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
- 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
- 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
- 'size': (mandatory) string with the size of the disk in GB
- 'vim_id' (optional) should use this existing volume id
- availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
- availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
- availability_zone_index is None
- #TODO ip, security groups
- Returns a tuple with the instance identifier and created_items or raises an exception on error
- created_items can be None or a dictionary where this method can include key-values that will be passed to
- the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
- Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
- as not present.
+ def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
+ """Fill up the security_groups in the port_dict.
+
+ Args:
+ net (dict): Network details
+ port_dict (dict): Port details
+
"""
- self.logger.debug(
- "new_vminstance input: image='%s' flavor='%s' nics='%s'",
- image_id,
- flavor_id,
- str(net_list),
- )
+ if (
+ self.config.get("security_groups")
+ and net.get("port_security") is not False
+ and not self.config.get("no_port_security_extension")
+ ):
+ if not self.security_groups_id:
+ self._get_ids_from_name()
- try:
- server = None
- created_items = {}
- # metadata = {}
- net_list_vim = []
- external_network = []
- # ^list of external networks to be connected to instance, later on used to create floating_ip
- no_secured_ports = [] # List of port-is with port-security disabled
- self._reload_connection()
- # metadata_vpci = {} # For a specific neutron plugin
- block_device_mapping = None
+ port_dict["security_groups"] = self.security_groups_id
- for net in net_list:
- if not net.get("net_id"): # skip non connected iface
- continue
+ def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
+ """Fill up the network binding depending on network type in the port_dict.
- port_dict = {
- "network_id": net["net_id"],
- "name": net.get("name"),
- "admin_state_up": True,
- }
+ Args:
+ net (dict): Network details
+ port_dict (dict): Port details
- if (
- self.config.get("security_groups")
- and net.get("port_security") is not False
- and not self.config.get("no_port_security_extension")
- ):
- if not self.security_groups_id:
- self._get_ids_from_name()
+ """
+ if not net.get("type"):
+ raise vimconn.VimConnException("Type is missing in the network details.")
- port_dict["security_groups"] = self.security_groups_id
+ if net["type"] == "virtual":
+ pass
- if net["type"] == "virtual":
- pass
- # if "vpci" in net:
- # metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
- elif net["type"] == "VF" or net["type"] == "SR-IOV": # for VF
- # if "vpci" in net:
- # if "VF" not in metadata_vpci:
- # metadata_vpci["VF"]=[]
- # metadata_vpci["VF"].append([ net["vpci"], "" ])
- port_dict["binding:vnic_type"] = "direct"
-
- # VIO specific Changes
- if self.vim_type == "VIO":
- # Need to create port with port_security_enabled = False and no-security-groups
- port_dict["port_security_enabled"] = False
- port_dict["provider_security_groups"] = []
- port_dict["security_groups"] = []
- else: # For PT PCI-PASSTHROUGH
- # if "vpci" in net:
- # if "PF" not in metadata_vpci:
- # metadata_vpci["PF"]=[]
- # metadata_vpci["PF"].append([ net["vpci"], "" ])
- port_dict["binding:vnic_type"] = "direct-physical"
-
- if not port_dict["name"]:
- port_dict["name"] = name
-
- if net.get("mac_address"):
- port_dict["mac_address"] = net["mac_address"]
-
- if net.get("ip_address"):
- port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
- # TODO add "subnet_id": <subnet_id>
-
- new_port = self.neutron.create_port({"port": port_dict})
- created_items["port:" + str(new_port["port"]["id"])] = True
- net["mac_adress"] = new_port["port"]["mac_address"]
- net["vim_id"] = new_port["port"]["id"]
- # if try to use a network without subnetwork, it will return a emtpy list
- fixed_ips = new_port["port"].get("fixed_ips")
-
- if fixed_ips:
- net["ip"] = fixed_ips[0].get("ip_address")
- else:
- net["ip"] = None
-
- port = {"port-id": new_port["port"]["id"]}
- if float(self.nova.api_version.get_string()) >= 2.32:
- port["tag"] = new_port["port"]["name"]
-
- net_list_vim.append(port)
-
- if net.get("floating_ip", False):
- net["exit_on_floating_ip_error"] = True
- external_network.append(net)
- elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
- net["exit_on_floating_ip_error"] = False
- external_network.append(net)
- net["floating_ip"] = self.config.get("use_floating_ip")
-
- # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
- # is dropped.
- # As a workaround we wait until the VM is active and then disable the port-security
- if net.get("port_security") is False and not self.config.get(
- "no_port_security_extension"
- ):
- no_secured_ports.append(
- (
- new_port["port"]["id"],
- net.get("port_security_disable_strategy"),
- )
- )
+ # For VF
+ elif net["type"] == "VF" or net["type"] == "SR-IOV":
+ port_dict["binding:vnic_type"] = "direct"
- # if metadata_vpci:
- # metadata = {"pci_assignement": json.dumps(metadata_vpci)}
- # if len(metadata["pci_assignement"]) >255:
- # #limit the metadata size
- # #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
- # self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
- # metadata = {}
+ # VIO specific Changes
+ if self.vim_type == "VIO":
+ # Need to create port with port_security_enabled = False and no-security-groups
+ port_dict["port_security_enabled"] = False
+ port_dict["provider_security_groups"] = []
+ port_dict["security_groups"] = []
- self.logger.debug(
- "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
- name,
- image_id,
- flavor_id,
- str(net_list_vim),
- description,
- )
+ else:
+ # For PT PCI-PASSTHROUGH
+ port_dict["binding:vnic_type"] = "direct-physical"
- # cloud config
- config_drive, userdata = self._create_user_data(cloud_config)
+ @staticmethod
+ def _set_fixed_ip(new_port: dict, net: dict) -> None:
+ """Set the "ip" parameter in net dictionary.
- # Create additional volumes in case these are present in disk_list
- base_disk_index = ord("b")
- if disk_list:
- block_device_mapping = {}
- for disk in disk_list:
- if disk.get("vim_id"):
- block_device_mapping["_vd" + chr(base_disk_index)] = disk[
- "vim_id"
- ]
- else:
- if "image_id" in disk:
- volume = self.cinder.volumes.create(
- size=disk["size"],
- name=name + "_vd" + chr(base_disk_index),
- imageRef=disk["image_id"],
- )
- else:
- volume = self.cinder.volumes.create(
- size=disk["size"],
- name=name + "_vd" + chr(base_disk_index),
- )
+ Args:
+ new_port (dict): New created port
+ net (dict): Network details
- created_items["volume:" + str(volume.id)] = True
- block_device_mapping["_vd" + chr(base_disk_index)] = volume.id
+ """
+ fixed_ips = new_port["port"].get("fixed_ips")
- base_disk_index += 1
+ if fixed_ips:
+ net["ip"] = fixed_ips[0].get("ip_address")
+ else:
+ net["ip"] = None
- # Wait until created volumes are with status available
- elapsed_time = 0
- while elapsed_time < volume_timeout:
- for created_item in created_items:
- v, _, volume_id = created_item.partition(":")
- if v == "volume":
- if self.cinder.volumes.get(volume_id).status != "available":
- break
- else: # all ready: break from while
- break
+ @staticmethod
+ def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
+ """Fill up the mac_address and fixed_ips in port_dict.
- time.sleep(5)
- elapsed_time += 5
+ Args:
+ net (dict): Network details
+ port_dict (dict): Port details
- # If we exceeded the timeout rollback
- if elapsed_time >= volume_timeout:
- raise vimconn.VimConnException(
- "Timeout creating volumes for instance " + name,
- http_code=vimconn.HTTP_Request_Timeout,
- )
+ """
+ if net.get("mac_address"):
+ port_dict["mac_address"] = net["mac_address"]
+
+ ip_dual_list = []
+ if ip_list := net.get("ip_address"):
+ if not isinstance(ip_list, list):
+ ip_list = [ip_list]
+ for ip in ip_list:
+ ip_dict = {"ip_address": ip}
+ ip_dual_list.append(ip_dict)
+ port_dict["fixed_ips"] = ip_dual_list
+ # TODO add "subnet_id": <subnet_id>
+
+ def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
+ """Create new port using neutron.
- # get availability Zone
- vm_av_zone = self._get_vm_availability_zone(
- availability_zone_index, availability_zone_list
- )
+ Args:
+ port_dict (dict): Port details
+ created_items (dict): All created items
+ net (dict): Network details
- self.logger.debug(
- "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
- "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
- "block_device_mapping={})".format(
- name,
- image_id,
- flavor_id,
- net_list_vim,
- self.config.get("security_groups"),
- vm_av_zone,
- self.config.get("keypair"),
- userdata,
- config_drive,
- block_device_mapping,
- )
- )
- server = self.nova.servers.create(
- name,
- image_id,
- flavor_id,
- nics=net_list_vim,
- security_groups=self.config.get("security_groups"),
- # TODO remove security_groups in future versions. Already at neutron port
- availability_zone=vm_av_zone,
- key_name=self.config.get("keypair"),
- userdata=userdata,
- config_drive=config_drive,
- block_device_mapping=block_device_mapping,
- ) # , description=description)
+ Returns:
+ new_port (dict): New created port
- vm_start_time = time.time()
- # Previously mentioned workaround to wait until the VM is active and then disable the port-security
- if no_secured_ports:
- self.__wait_for_vm(server.id, "ACTIVE")
+ """
+ new_port = self.neutron.create_port({"port": port_dict})
+ created_items["port:" + str(new_port["port"]["id"])] = True
+ net["mac_address"] = new_port["port"]["mac_address"]
+ net["vim_id"] = new_port["port"]["id"]
- for port in no_secured_ports:
- port_update = {
- "port": {"port_security_enabled": False, "security_groups": None}
- }
+ return new_port
- if port[1] == "allow-address-pairs":
- port_update = {
- "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
- }
+ def _create_port(
+ self, net: dict, name: str, created_items: dict
+ ) -> Tuple[dict, dict]:
+ """Create port using net details.
- try:
- self.neutron.update_port(port[0], port_update)
- except Exception:
- raise vimconn.VimConnException(
- "It was not possible to disable port security for port {}".format(
- port[0]
- )
- )
+ Args:
+ net (dict): Network details
+ name (str): Name to be used as network name if net dict does not include name
+ created_items (dict): All created items
- # print "DONE :-)", server
+ Returns:
+ new_port, port New created port, port dictionary
- # pool_id = None
- for floating_network in external_network:
- try:
- assigned = False
- floating_ip_retries = 3
- # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
- # several times
- while not assigned:
- floating_ips = self.neutron.list_floatingips().get(
- "floatingips", ()
- )
- random.shuffle(floating_ips) # randomize
- for fip in floating_ips:
- if (
- fip.get("port_id")
- or fip.get("tenant_id") != server.tenant_id
- ):
- continue
+ """
- if isinstance(floating_network["floating_ip"], str):
- if (
- fip.get("floating_network_id")
- != floating_network["floating_ip"]
- ):
- continue
+ port_dict = {
+ "network_id": net["net_id"],
+ "name": net.get("name"),
+ "admin_state_up": True,
+ }
- free_floating_ip = fip["id"]
- break
- else:
- if (
- isinstance(floating_network["floating_ip"], str)
- and floating_network["floating_ip"].lower() != "true"
- ):
- pool_id = floating_network["floating_ip"]
- else:
- # Find the external network
- external_nets = list()
-
- for net in self.neutron.list_networks()["networks"]:
- if net["router:external"]:
- external_nets.append(net)
-
- if len(external_nets) == 0:
- raise vimconn.VimConnException(
- "Cannot create floating_ip automatically since "
- "no external network is present",
- http_code=vimconn.HTTP_Conflict,
- )
+ if not port_dict["name"]:
+ port_dict["name"] = name
- if len(external_nets) > 1:
- raise vimconn.VimConnException(
- "Cannot create floating_ip automatically since "
- "multiple external networks are present",
- http_code=vimconn.HTTP_Conflict,
- )
+ self._prepare_port_dict_security_groups(net, port_dict)
- pool_id = external_nets[0].get("id")
-
- param = {
- "floatingip": {
- "floating_network_id": pool_id,
- "tenant_id": server.tenant_id,
- }
- }
-
- try:
- # self.logger.debug("Creating floating IP")
- new_floating_ip = self.neutron.create_floatingip(param)
- free_floating_ip = new_floating_ip["floatingip"]["id"]
- created_items[
- "floating_ip:" + str(free_floating_ip)
- ] = True
- except Exception as e:
- raise vimconn.VimConnException(
- type(e).__name__
- + ": Cannot create new floating_ip "
- + str(e),
- http_code=vimconn.HTTP_Conflict,
- )
+ self._prepare_port_dict_binding(net, port_dict)
- try:
- # for race condition ensure not already assigned
- fip = self.neutron.show_floatingip(free_floating_ip)
+ vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
- if fip["floatingip"]["port_id"]:
- continue
+ new_port = self._create_new_port(port_dict, created_items, net)
- # the vim_id key contains the neutron.port_id
- self.neutron.update_floatingip(
- free_floating_ip,
- {"floatingip": {"port_id": floating_network["vim_id"]}},
- )
- # for race condition ensure not re-assigned to other VM after 5 seconds
- time.sleep(5)
- fip = self.neutron.show_floatingip(free_floating_ip)
+ vimconnector._set_fixed_ip(new_port, net)
- if (
- fip["floatingip"]["port_id"]
- != floating_network["vim_id"]
- ):
- self.logger.error(
- "floating_ip {} re-assigned to other port".format(
- free_floating_ip
- )
- )
- continue
+ port = {"port-id": new_port["port"]["id"]}
- self.logger.debug(
- "Assigned floating_ip {} to VM {}".format(
- free_floating_ip, server.id
- )
- )
- assigned = True
- except Exception as e:
- # openstack need some time after VM creation to assign an IP. So retry if fails
- vm_status = self.nova.servers.get(server.id).status
-
- if vm_status not in ("ACTIVE", "ERROR"):
- if time.time() - vm_start_time < server_timeout:
- time.sleep(5)
- continue
- elif floating_ip_retries > 0:
- floating_ip_retries -= 1
- continue
+ if float(self.nova.api_version.get_string()) >= 2.32:
+ port["tag"] = new_port["port"]["name"]
- raise vimconn.VimConnException(
- "Cannot create floating_ip: {} {}".format(
- type(e).__name__, e
- ),
- http_code=vimconn.HTTP_Conflict,
- )
+ return new_port, port
- except Exception as e:
- if not floating_network["exit_on_floating_ip_error"]:
- self.logger.error("Cannot create floating_ip. %s", str(e))
- continue
+ def _prepare_network_for_vminstance(
+ self,
+ name: str,
+ net_list: list,
+ created_items: dict,
+ net_list_vim: list,
+ external_network: list,
+ no_secured_ports: list,
+ ) -> None:
+ """Create port and fill up net dictionary for new VM instance creation.
- raise
+ Args:
+ name (str): Name of network
+ net_list (list): List of networks
+ created_items (dict): All created items belongs to a VM
+ net_list_vim (list): List of ports
+ external_network (list): List of external-networks
+ no_secured_ports (list): Port security disabled ports
+ """
- return server.id, created_items
- # except nvExceptions.NotFound as e:
- # error_value=-vimconn.HTTP_Not_Found
- # error_text= "vm instance %s not found" % vm_id
- # except TypeError as e:
- # raise vimconn.VimConnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
+ self._reload_connection()
- except Exception as e:
- server_id = None
- if server:
- server_id = server.id
+ for net in net_list:
+ # Skip non-connected iface
+ if not net.get("net_id"):
+ continue
- try:
- self.delete_vminstance(server_id, created_items)
- except Exception as e2:
- self.logger.error("new_vminstance rollback fail {}".format(e2))
+ new_port, port = self._create_port(net, name, created_items)
- self._format_exception(e)
+ net_list_vim.append(port)
- def get_vminstance(self, vm_id):
- """Returns the VM instance information from VIM"""
- # self.logger.debug("Getting VM from VIM")
- try:
- self._reload_connection()
- server = self.nova.servers.find(id=vm_id)
- # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+ if net.get("floating_ip", False):
+ net["exit_on_floating_ip_error"] = True
+ external_network.append(net)
- return server.to_dict()
- except (
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- nvExceptions.NotFound,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
+ net["exit_on_floating_ip_error"] = False
+ external_network.append(net)
+ net["floating_ip"] = self.config.get("use_floating_ip")
+
+ # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
+ # is dropped. As a workaround we wait until the VM is active and then disable the port-security
+ if net.get("port_security") is False and not self.config.get(
+ "no_port_security_extension"
+ ):
+ no_secured_ports.append(
+ (
+ new_port["port"]["id"],
+ net.get("port_security_disable_strategy"),
+ )
+ )
+
+ def _prepare_persistent_root_volumes(
+ self,
+ name: str,
+ vm_av_zone: list,
+ disk: dict,
+ base_disk_index: int,
+ block_device_mapping: dict,
+ existing_vim_volumes: list,
+ created_items: dict,
+ ) -> Optional[str]:
+ """Prepare persistent root volumes for new VM instance.
+
+ Args:
+ name (str): Name of VM instance
+ vm_av_zone (list): List of availability zones
+ disk (dict): Disk details
+ base_disk_index (int): Disk index
+ block_device_mapping (dict): Block device details
+ existing_vim_volumes (list): Existing disk details
+ created_items (dict): All created items belongs to VM
+
+ Returns:
+ boot_volume_id (str): ID of boot volume
- def get_vminstance_console(self, vm_id, console_type="vnc"):
"""
- Get a console for the virtual machine
- Params:
- vm_id: uuid of the VM
- console_type, can be:
- "novnc" (by default), "xvpvnc" for VNC types,
- "rdp-html5" for RDP types, "spice-html5" for SPICE types
- Returns dict with the console parameters:
- protocol: ssh, ftp, http, https, ...
- server: usually ip address
- port: the http, ssh, ... port
- suffix: extra text, e.g. the http path and query string
+ # Disk may include only vim_volume_id or only vim_id."
+ # Use existing persistent root volume finding with volume_id or vim_id
+ key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
+
+ if disk.get(key_id):
+ block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+ existing_vim_volumes.append({"id": disk[key_id]})
+
+ else:
+ # Create persistent root volume
+ volume = self.cinder.volumes.create(
+ size=disk["size"],
+ name=name + "vd" + chr(base_disk_index),
+ imageRef=disk["image_id"],
+ # Make sure volume is in the same AZ as the VM to be attached to
+ availability_zone=vm_av_zone,
+ )
+ boot_volume_id = volume.id
+ self.update_block_device_mapping(
+ volume=volume,
+ block_device_mapping=block_device_mapping,
+ base_disk_index=base_disk_index,
+ disk=disk,
+ created_items=created_items,
+ )
+
+ return boot_volume_id
+
+ @staticmethod
+ def update_block_device_mapping(
+ volume: object,
+ block_device_mapping: dict,
+ base_disk_index: int,
+ disk: dict,
+ created_items: dict,
+ ) -> None:
+ """Add volume information to block device mapping dict.
+ Args:
+ volume (object): Created volume object
+ block_device_mapping (dict): Block device details
+ base_disk_index (int): Disk index
+ disk (dict): Disk details
+ created_items (dict): All created items belongs to VM
"""
- self.logger.debug("Getting VM CONSOLE from VIM")
+ if not volume:
+ raise vimconn.VimConnException("Volume is empty.")
- try:
- self._reload_connection()
- server = self.nova.servers.find(id=vm_id)
+ if not hasattr(volume, "id"):
+ raise vimconn.VimConnException(
+ "Created volume is not valid, does not have id attribute."
+ )
- if console_type is None or console_type == "novnc":
- console_dict = server.get_vnc_console("novnc")
- elif console_type == "xvpvnc":
- console_dict = server.get_vnc_console(console_type)
- elif console_type == "rdp-html5":
- console_dict = server.get_rdp_console(console_type)
- elif console_type == "spice-html5":
- console_dict = server.get_spice_console(console_type)
- else:
- raise vimconn.VimConnException(
- "console type '{}' not allowed".format(console_type),
- http_code=vimconn.HTTP_Bad_Request,
- )
+ volume_txt = "volume:" + str(volume.id)
+ if disk.get("keep"):
+ volume_txt += ":keep"
+ created_items[volume_txt] = True
+ block_device_mapping["vd" + chr(base_disk_index)] = volume.id
- console_dict1 = console_dict.get("console")
+ def _prepare_non_root_persistent_volumes(
+ self,
+ name: str,
+ disk: dict,
+ vm_av_zone: list,
+ block_device_mapping: dict,
+ base_disk_index: int,
+ existing_vim_volumes: list,
+ created_items: dict,
+ ) -> None:
+ """Prepare persistent volumes for new VM instance.
- if console_dict1:
- console_url = console_dict1.get("url")
+ Args:
+ name (str): Name of VM instance
+ disk (dict): Disk details
+ vm_av_zone (list): List of availability zones
+ block_device_mapping (dict): Block device details
+ base_disk_index (int): Disk index
+ existing_vim_volumes (list): Existing disk details
+ created_items (dict): All created items belongs to VM
+ """
+ # Non-root persistent volumes
+ # Disk may include only vim_volume_id or only vim_id."
+ key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
- if console_url:
- # parse console_url
- protocol_index = console_url.find("//")
- suffix_index = (
- console_url[protocol_index + 2 :].find("/") + protocol_index + 2
- )
- port_index = (
- console_url[protocol_index + 2 : suffix_index].find(":")
- + protocol_index
- + 2
- )
+ if disk.get(key_id):
+ # Use existing persistent volume
+ block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+ existing_vim_volumes.append({"id": disk[key_id]})
- if protocol_index < 0 or port_index < 0 or suffix_index < 0:
- return (
- -vimconn.HTTP_Internal_Server_Error,
- "Unexpected response from VIM",
- )
+ else:
+ # Create persistent volume
+ volume = self.cinder.volumes.create(
+ size=disk["size"],
+ name=name + "vd" + chr(base_disk_index),
+ # Make sure volume is in the same AZ as the VM to be attached to
+ availability_zone=vm_av_zone,
+ )
+ self.update_block_device_mapping(
+ volume=volume,
+ block_device_mapping=block_device_mapping,
+ base_disk_index=base_disk_index,
+ disk=disk,
+ created_items=created_items,
+ )
- console_dict = {
- "protocol": console_url[0:protocol_index],
- "server": console_url[protocol_index + 2 : port_index],
- "port": console_url[port_index:suffix_index],
- "suffix": console_url[suffix_index + 1 :],
- }
- protocol_index += 2
+ def _wait_for_created_volumes_availability(
+ self, elapsed_time: int, created_items: dict
+ ) -> Optional[int]:
+ """Wait till created volumes become available.
- return console_dict
- raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
- except (
- nvExceptions.NotFound,
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- nvExceptions.BadRequest,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ Args:
+ elapsed_time (int): Passed time while waiting
+ created_items (dict): All created items belongs to VM
- def delete_vminstance(self, vm_id, created_items=None):
- """Removes a VM instance from VIM. Returns the old identifier"""
- # print "osconnector: Getting VM from VIM"
- if created_items is None:
- created_items = {}
+ Returns:
+ elapsed_time (int): Time spent while waiting
- try:
- self._reload_connection()
- # delete VM ports attached to this networks before the virtual machine
- for k, v in created_items.items():
- if not v: # skip already deleted
- continue
+ """
- try:
- k_item, _, k_id = k.partition(":")
- if k_item == "port":
- self.neutron.delete_port(k_id)
- except Exception as e:
- self.logger.error(
- "Error deleting port: {}: {}".format(type(e).__name__, e)
- )
+ while elapsed_time < volume_timeout:
+ for created_item in created_items:
+ v, volume_id = (
+ created_item.split(":")[0],
+ created_item.split(":")[1],
+ )
+ if v == "volume":
+ if self.cinder.volumes.get(volume_id).status != "available":
+ break
+ else:
+ # All ready: break from while
+ break
- # #commented because detaching the volumes makes the servers.delete not work properly ?!?
- # #dettach volumes attached
- # server = self.nova.servers.get(vm_id)
- # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"] #volume["id"]
- # #for volume in volumes_attached_dict:
- # # self.cinder.volumes.detach(volume["id"])
+ time.sleep(5)
+ elapsed_time += 5
- if vm_id:
- self.nova.servers.delete(vm_id)
+ return elapsed_time
- # delete volumes. Although having detached, they should have in active status before deleting
- # we ensure in this loop
- keep_waiting = True
- elapsed_time = 0
+ def _wait_for_existing_volumes_availability(
+ self, elapsed_time: int, existing_vim_volumes: list
+ ) -> Optional[int]:
+ """Wait till existing volumes become available.
- while keep_waiting and elapsed_time < volume_timeout:
- keep_waiting = False
+ Args:
+ elapsed_time (int): Passed time while waiting
+ existing_vim_volumes (list): Existing volume details
- for k, v in created_items.items():
- if not v: # skip already deleted
- continue
+ Returns:
+ elapsed_time (int): Time spent while waiting
- try:
- k_item, _, k_id = k.partition(":")
- if k_item == "volume":
- if self.cinder.volumes.get(k_id).status != "available":
- keep_waiting = True
- else:
- self.cinder.volumes.delete(k_id)
- created_items[k] = None
- elif k_item == "floating_ip": # floating ip
- self.neutron.delete_floatingip(k_id)
- created_items[k] = None
+ """
- except Exception as e:
- self.logger.error("Error deleting {}: {}".format(k, e))
+ while elapsed_time < volume_timeout:
+ for volume in existing_vim_volumes:
+ if self.cinder.volumes.get(volume["id"]).status != "available":
+ break
+ else: # all ready: break from while
+ break
- if keep_waiting:
- time.sleep(1)
- elapsed_time += 1
+ time.sleep(5)
+ elapsed_time += 5
- return None
- except (
- nvExceptions.NotFound,
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ return elapsed_time
- def refresh_vms_status(self, vm_list):
- """Get the status of the virtual machines and their interfaces/ports
- Params: the list of VM identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this Virtual Machine
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
- # CREATING (on building process), ERROR
- # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
- #
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- interfaces:
- - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
- mac_address: #Text format XX:XX:XX:XX:XX:XX
- vim_net_id: #network id where this interface is connected
- vim_interface_id: #interface/port VIM id
- ip_address: #null, or text with IPv4, IPv6 address
- compute_node: #identification of compute node where PF,VF interface is allocated
- pci: #PCI address of the NIC that hosts the PF,VF
- vlan: #physical VLAN used for VF
- """
- vm_dict = {}
- self.logger.debug(
- "refresh_vms status: Getting tenant VM instance information from VIM"
- )
+ def _prepare_disk_for_vminstance(
+ self,
+ name: str,
+ existing_vim_volumes: list,
+ created_items: dict,
+ vm_av_zone: list,
+ block_device_mapping: dict,
+ disk_list: list = None,
+ ) -> None:
+ """Prepare all volumes for new VM instance.
- for vm_id in vm_list:
- vm = {}
+ Args:
+ name (str): Name of Instance
+ existing_vim_volumes (list): List of existing volumes
+ created_items (dict): All created items belongs to VM
+ vm_av_zone (list): VM availability zone
+ block_device_mapping (dict): Block devices to be attached to VM
+ disk_list (list): List of disks
- try:
- vm_vim = self.get_vminstance(vm_id)
+ """
+ # Create additional volumes in case these are present in disk_list
+ base_disk_index = ord("b")
+ boot_volume_id = None
+ elapsed_time = 0
- if vm_vim["status"] in vmStatus2manoFormat:
- vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
- else:
- vm["status"] = "OTHER"
- vm["error_msg"] = "VIM status reported " + vm_vim["status"]
+ for disk in disk_list:
+ if "image_id" in disk:
+ # Root persistent volume
+ base_disk_index = ord("a")
+ boot_volume_id = self._prepare_persistent_root_volumes(
+ name=name,
+ vm_av_zone=vm_av_zone,
+ disk=disk,
+ base_disk_index=base_disk_index,
+ block_device_mapping=block_device_mapping,
+ existing_vim_volumes=existing_vim_volumes,
+ created_items=created_items,
+ )
+ else:
+ # Non-root persistent volume
+ self._prepare_non_root_persistent_volumes(
+ name=name,
+ disk=disk,
+ vm_av_zone=vm_av_zone,
+ block_device_mapping=block_device_mapping,
+ base_disk_index=base_disk_index,
+ existing_vim_volumes=existing_vim_volumes,
+ created_items=created_items,
+ )
+ base_disk_index += 1
- vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
- vm_vim.pop("user_data", None)
- vm["vim_info"] = self.serialize(vm_vim)
+ # Wait until created volumes are with status available
+ elapsed_time = self._wait_for_created_volumes_availability(
+ elapsed_time, created_items
+ )
+ # Wait until existing volumes in vim are with status available
+ elapsed_time = self._wait_for_existing_volumes_availability(
+ elapsed_time, existing_vim_volumes
+ )
+ # If we exceeded the timeout rollback
+ if elapsed_time >= volume_timeout:
+ raise vimconn.VimConnException(
+ "Timeout creating volumes for instance " + name,
+ http_code=vimconn.HTTP_Request_Timeout,
+ )
+ if boot_volume_id:
+ self.cinder.volumes.set_bootable(boot_volume_id, True)
- vm["interfaces"] = []
- if vm_vim.get("fault"):
- vm["error_msg"] = str(vm_vim["fault"])
+ def _find_the_external_network_for_floating_ip(self):
+ """Get the external network ip in order to create floating IP.
- # get interfaces
- try:
- self._reload_connection()
- port_dict = self.neutron.list_ports(device_id=vm_id)
+ Returns:
+ pool_id (str): External network pool ID
- for port in port_dict["ports"]:
- interface = {}
- interface["vim_info"] = self.serialize(port)
- interface["mac_address"] = port.get("mac_address")
- interface["vim_net_id"] = port["network_id"]
- interface["vim_interface_id"] = port["id"]
- # check if OS-EXT-SRV-ATTR:host is there,
- # in case of non-admin credentials, it will be missing
+ """
- if vm_vim.get("OS-EXT-SRV-ATTR:host"):
- interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
+ # Find the external network
+ external_nets = list()
- interface["pci"] = None
+ for net in self.neutron.list_networks()["networks"]:
+ if net["router:external"]:
+ external_nets.append(net)
- # check if binding:profile is there,
- # in case of non-admin credentials, it will be missing
- if port.get("binding:profile"):
- if port["binding:profile"].get("pci_slot"):
- # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
- # the slot to 0x00
- # TODO: This is just a workaround valid for niantinc. Find a better way to do so
- # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
- pci = port["binding:profile"]["pci_slot"]
- # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
- interface["pci"] = pci
-
- interface["vlan"] = None
-
- if port.get("binding:vif_details"):
- interface["vlan"] = port["binding:vif_details"].get("vlan")
+ if len(external_nets) == 0:
+ raise vimconn.VimConnException(
+ "Cannot create floating_ip automatically since "
+ "no external network is present",
+ http_code=vimconn.HTTP_Conflict,
+ )
- # Get vlan from network in case not present in port for those old openstacks and cases where
- # it is needed vlan at PT
- if not interface["vlan"]:
- # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
- network = self.neutron.show_network(port["network_id"])
+ if len(external_nets) > 1:
+ raise vimconn.VimConnException(
+ "Cannot create floating_ip automatically since "
+ "multiple external networks are present",
+ http_code=vimconn.HTTP_Conflict,
+ )
- if (
- network["network"].get("provider:network_type")
- == "vlan"
- ):
- # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
- interface["vlan"] = network["network"].get(
- "provider:segmentation_id"
- )
+ # Pool ID
+ return external_nets[0].get("id")
- ips = []
- # look for floating ip address
- try:
- floating_ip_dict = self.neutron.list_floatingips(
- port_id=port["id"]
- )
+ def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
+ """Trigger neutron to create a new floating IP using external network ID.
- if floating_ip_dict.get("floatingips"):
- ips.append(
- floating_ip_dict["floatingips"][0].get(
- "floating_ip_address"
- )
- )
- except Exception:
- pass
+ Args:
+ param (dict): Input parameters to create a floating IP
+ created_items (dict): All created items belongs to new VM instance
- for subnet in port["fixed_ips"]:
- ips.append(subnet["ip_address"])
+ Raises:
- interface["ip_address"] = ";".join(ips)
- vm["interfaces"].append(interface)
- except Exception as e:
- self.logger.error(
- "Error getting vm interface information {}: {}".format(
- type(e).__name__, e
- ),
- exc_info=True,
- )
- except vimconn.VimConnNotFoundException as e:
- self.logger.error("Exception getting vm status: %s", str(e))
- vm["status"] = "DELETED"
- vm["error_msg"] = str(e)
- except vimconn.VimConnException as e:
- self.logger.error("Exception getting vm status: %s", str(e))
- vm["status"] = "VIM_ERROR"
- vm["error_msg"] = str(e)
+ VimConnException
+ """
+ try:
+ self.logger.debug("Creating floating IP")
+ new_floating_ip = self.neutron.create_floatingip(param)
+ free_floating_ip = new_floating_ip["floatingip"]["id"]
+ created_items["floating_ip:" + str(free_floating_ip)] = True
- vm_dict[vm_id] = vm
+ except Exception as e:
+ raise vimconn.VimConnException(
+ type(e).__name__ + ": Cannot create new floating_ip " + str(e),
+ http_code=vimconn.HTTP_Conflict,
+ )
- return vm_dict
+ def _create_floating_ip(
+ self, floating_network: dict, server: object, created_items: dict
+ ) -> None:
+ """Get the available Pool ID and create a new floating IP.
- def action_vminstance(self, vm_id, action_dict, created_items={}):
- """Send and action over a VM instance from VIM
- Returns None or the console dict if the action was successfully sent to the VIM"""
- self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+ Args:
+ floating_network (dict): Dict including external network ID
+ server (object): Server object
+ created_items (dict): All created items belongs to new VM instance
- try:
- self._reload_connection()
- server = self.nova.servers.find(id=vm_id)
+ """
- if "start" in action_dict:
- if action_dict["start"] == "rebuild":
- server.rebuild()
- else:
- if server.status == "PAUSED":
- server.unpause()
- elif server.status == "SUSPENDED":
- server.resume()
- elif server.status == "SHUTOFF":
- server.start()
- elif "pause" in action_dict:
- server.pause()
- elif "resume" in action_dict:
- server.resume()
- elif "shutoff" in action_dict or "shutdown" in action_dict:
- server.stop()
- elif "forceOff" in action_dict:
- server.stop() # TODO
- elif "terminate" in action_dict:
- server.delete()
- elif "createImage" in action_dict:
- server.create_image()
- # "path":path_schema,
- # "description":description_schema,
- # "name":name_schema,
- # "metadata":metadata_schema,
- # "imageRef": id_schema,
- # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
- elif "rebuild" in action_dict:
- server.rebuild(server.image["id"])
- elif "reboot" in action_dict:
- server.reboot() # reboot_type="SOFT"
- elif "console" in action_dict:
- console_type = action_dict["console"]
+ # Pool_id is available
+ if (
+ isinstance(floating_network["floating_ip"], str)
+ and floating_network["floating_ip"].lower() != "true"
+ ):
+ pool_id = floating_network["floating_ip"]
- if console_type is None or console_type == "novnc":
- console_dict = server.get_vnc_console("novnc")
- elif console_type == "xvpvnc":
- console_dict = server.get_vnc_console(console_type)
- elif console_type == "rdp-html5":
- console_dict = server.get_rdp_console(console_type)
- elif console_type == "spice-html5":
- console_dict = server.get_spice_console(console_type)
- else:
- raise vimconn.VimConnException(
- "console type '{}' not allowed".format(console_type),
- http_code=vimconn.HTTP_Bad_Request,
- )
+ # Find the Pool_id
+ else:
+ pool_id = self._find_the_external_network_for_floating_ip()
- try:
- console_url = console_dict["console"]["url"]
- # parse console_url
- protocol_index = console_url.find("//")
- suffix_index = (
- console_url[protocol_index + 2 :].find("/") + protocol_index + 2
- )
- port_index = (
- console_url[protocol_index + 2 : suffix_index].find(":")
- + protocol_index
- + 2
- )
+ param = {
+ "floatingip": {
+ "floating_network_id": pool_id,
+ "tenant_id": server.tenant_id,
+ }
+ }
- if protocol_index < 0 or port_index < 0 or suffix_index < 0:
- raise vimconn.VimConnException(
- "Unexpected response from VIM " + str(console_dict)
- )
+ self._neutron_create_float_ip(param, created_items)
- console_dict2 = {
- "protocol": console_url[0:protocol_index],
- "server": console_url[protocol_index + 2 : port_index],
- "port": int(console_url[port_index + 1 : suffix_index]),
- "suffix": console_url[suffix_index + 1 :],
- }
+ def _find_floating_ip(
+ self,
+ server: object,
+ floating_ips: list,
+ floating_network: dict,
+ ) -> Optional[str]:
+ """Find the available free floating IPs if there are.
- return console_dict2
- except Exception:
- raise vimconn.VimConnException(
- "Unexpected response from VIM " + str(console_dict)
- )
+ Args:
+ server (object): Server object
+ floating_ips (list): List of floating IPs
+ floating_network (dict): Details of floating network such as ID
- return None
- except (
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- nvExceptions.NotFound,
- ConnectionError,
- ) as e:
- self._format_exception(e)
- # TODO insert exception vimconn.HTTP_Unauthorized
+ Returns:
+ free_floating_ip (str): Free floating ip address
- # ###### VIO Specific Changes #########
- def _generate_vlanID(self):
- """
- Method to get unused vlanID
- Args:
- None
- Returns:
- vlanID
"""
- # Get used VLAN IDs
- usedVlanIDs = []
- networks = self.get_network_list()
+ for fip in floating_ips:
+ if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
+ continue
- for net in networks:
- if net.get("provider:segmentation_id"):
- usedVlanIDs.append(net.get("provider:segmentation_id"))
+ if isinstance(floating_network["floating_ip"], str):
+ if fip.get("floating_network_id") != floating_network["floating_ip"]:
+ continue
- used_vlanIDs = set(usedVlanIDs)
+ return fip["id"]
- # find unused VLAN ID
- for vlanID_range in self.config.get("dataplane_net_vlan_range"):
- try:
- start_vlanid, end_vlanid = map(
- int, vlanID_range.replace(" ", "").split("-")
- )
+ def _assign_floating_ip(
+ self, free_floating_ip: str, floating_network: dict
+ ) -> Dict:
+ """Assign the free floating ip address to port.
- for vlanID in range(start_vlanid, end_vlanid + 1):
- if vlanID not in used_vlanIDs:
- return vlanID
- except Exception as exp:
- raise vimconn.VimConnException(
- "Exception {} occurred while generating VLAN ID.".format(exp)
- )
- else:
- raise vimconn.VimConnConflictException(
- "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
- self.config.get("dataplane_net_vlan_range")
- )
- )
+ Args:
+ free_floating_ip (str): Floating IP to be assigned
+ floating_network (dict): ID of floating network
+
+ Returns:
+ fip (dict) (dict): Floating ip details
- def _generate_multisegment_vlanID(self):
"""
- Method to get unused vlanID
+ # The vim_id key contains the neutron.port_id
+ self.neutron.update_floatingip(
+ free_floating_ip,
+ {"floatingip": {"port_id": floating_network["vim_id"]}},
+ )
+ # For race condition ensure not re-assigned to other VM after 5 seconds
+ time.sleep(5)
+
+ return self.neutron.show_floatingip(free_floating_ip)
+
+ def _get_free_floating_ip(
+ self, server: object, floating_network: dict
+ ) -> Optional[str]:
+ """Get the free floating IP address.
+
Args:
- None
+ server (object): Server Object
+ floating_network (dict): Floating network details
+
Returns:
- vlanID
+ free_floating_ip (str): Free floating ip addr
+
"""
- # Get used VLAN IDs
- usedVlanIDs = []
- networks = self.get_network_list()
- for net in networks:
- if net.get("provider:network_type") == "vlan" and net.get(
- "provider:segmentation_id"
- ):
- usedVlanIDs.append(net.get("provider:segmentation_id"))
- elif net.get("segments"):
- for segment in net.get("segments"):
- if segment.get("provider:network_type") == "vlan" and segment.get(
- "provider:segmentation_id"
- ):
- usedVlanIDs.append(segment.get("provider:segmentation_id"))
- used_vlanIDs = set(usedVlanIDs)
+ floating_ips = self.neutron.list_floatingips().get("floatingips", ())
- # find unused VLAN ID
- for vlanID_range in self.config.get("multisegment_vlan_range"):
- try:
- start_vlanid, end_vlanid = map(
- int, vlanID_range.replace(" ", "").split("-")
- )
+ # Randomize
+ random.shuffle(floating_ips)
- for vlanID in range(start_vlanid, end_vlanid + 1):
- if vlanID not in used_vlanIDs:
- return vlanID
- except Exception as exp:
- raise vimconn.VimConnException(
- "Exception {} occurred while generating VLAN ID.".format(exp)
- )
- else:
- raise vimconn.VimConnConflictException(
- "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
- self.config.get("multisegment_vlan_range")
- )
- )
+ return self._find_floating_ip(server, floating_ips, floating_network)
+
+ def _prepare_external_network_for_vminstance(
+ self,
+ external_network: list,
+ server: object,
+ created_items: dict,
+ vm_start_time: float,
+ ) -> None:
+ """Assign floating IP address for VM instance.
+
+ Args:
+ external_network (list): ID of External network
+ server (object): Server Object
+ created_items (dict): All created items belongs to new VM instance
+ vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
+
+ Raises:
+ VimConnException
- def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
"""
- Method to validate user given vlanID ranges
- Args: None
- Returns: None
- """
- for vlanID_range in input_vlan_range:
- vlan_range = vlanID_range.replace(" ", "")
- # validate format
- vlanID_pattern = r"(\d)*-(\d)*$"
- match_obj = re.match(vlanID_pattern, vlan_range)
- if not match_obj:
- raise vimconn.VimConnConflictException(
- "Invalid VLAN range for {}: {}.You must provide "
- "'{}' in format [start_ID - end_ID].".format(
- text_vlan_range, vlanID_range, text_vlan_range
- )
- )
-
- start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
- if start_vlanid <= 0:
- raise vimconn.VimConnConflictException(
- "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
- "networks valid IDs are 1 to 4094 ".format(
- text_vlan_range, vlanID_range
+ for floating_network in external_network:
+ try:
+ assigned = False
+ floating_ip_retries = 3
+ # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
+ # several times
+ while not assigned:
+ free_floating_ip = self._get_free_floating_ip(
+ server, floating_network
)
- )
- if end_vlanid > 4094:
- raise vimconn.VimConnConflictException(
- "Invalid VLAN range for {}: {}. End VLAN ID can not be "
- "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
- text_vlan_range, vlanID_range
- )
- )
+ if not free_floating_ip:
+ self._create_floating_ip(
+ floating_network, server, created_items
+ )
- if start_vlanid > end_vlanid:
- raise vimconn.VimConnConflictException(
- "Invalid VLAN range for {}: {}. You must provide '{}'"
- " in format start_ID - end_ID and start_ID < end_ID ".format(
- text_vlan_range, vlanID_range, text_vlan_range
- )
- )
+ try:
+ # For race condition ensure not already assigned
+ fip = self.neutron.show_floatingip(free_floating_ip)
- # NOT USED FUNCTIONS
+ if fip["floatingip"].get("port_id"):
+ continue
- def new_external_port(self, port_data):
- """Adds a external port to VIM
- Returns the port identifier"""
- # TODO openstack if needed
- return (
- -vimconn.HTTP_Internal_Server_Error,
- "osconnector.new_external_port() not implemented",
- )
+ # Assign floating ip
+ fip = self._assign_floating_ip(
+ free_floating_ip, floating_network
+ )
- def connect_port_network(self, port_id, network_id, admin=False):
- """Connects a external port to a network
- Returns status code of the VIM response"""
- # TODO openstack if needed
- return (
- -vimconn.HTTP_Internal_Server_Error,
- "osconnector.connect_port_network() not implemented",
- )
+ if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
+ self.logger.warning(
+ "floating_ip {} re-assigned to other port".format(
+ free_floating_ip
+ )
+ )
+ continue
- def new_user(self, user_name, user_passwd, tenant_id=None):
- """Adds a new user to openstack VIM
- Returns the user identifier"""
- self.logger.debug("osconnector: Adding a new user to VIM")
+ self.logger.debug(
+ "Assigned floating_ip {} to VM {}".format(
+ free_floating_ip, server.id
+ )
+ )
- try:
- self._reload_connection()
- user = self.keystone.users.create(
- user_name, password=user_passwd, default_project=tenant_id
- )
- # self.keystone.tenants.add_user(self.k_creds["username"], #role)
+ assigned = True
- return user.id
- except ksExceptions.ConnectionError as e:
- error_value = -vimconn.HTTP_Bad_Request
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
- )
- except ksExceptions.ClientException as e: # TODO remove
- error_value = -vimconn.HTTP_Bad_Request
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
- )
+ except Exception as e:
+ # Openstack need some time after VM creation to assign an IP. So retry if fails
+ vm_status = self.nova.servers.get(server.id).status
- # TODO insert exception vimconn.HTTP_Unauthorized
- # if reaching here is because an exception
- self.logger.debug("new_user " + error_text)
+ if vm_status not in ("ACTIVE", "ERROR"):
+ if time.time() - vm_start_time < server_timeout:
+ time.sleep(5)
+ continue
+ elif floating_ip_retries > 0:
+ floating_ip_retries -= 1
+ continue
- return error_value, error_text
+ raise vimconn.VimConnException(
+ "Cannot create floating_ip: {} {}".format(
+ type(e).__name__, e
+ ),
+ http_code=vimconn.HTTP_Conflict,
+ )
- def delete_user(self, user_id):
- """Delete a user from openstack VIM
- Returns the user identifier"""
- if self.debug:
- print("osconnector: Deleting a user from VIM")
+ except Exception as e:
+ if not floating_network["exit_on_floating_ip_error"]:
+ self.logger.error("Cannot create floating_ip. %s", str(e))
+ continue
- try:
- self._reload_connection()
- self.keystone.users.delete(user_id)
+ raise
- return 1, user_id
- except ksExceptions.ConnectionError as e:
- error_value = -vimconn.HTTP_Bad_Request
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
- )
- except ksExceptions.NotFound as e:
- error_value = -vimconn.HTTP_Not_Found
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
- )
- except ksExceptions.ClientException as e: # TODO remove
- error_value = -vimconn.HTTP_Bad_Request
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
- )
+ def _update_port_security_for_vminstance(
+ self,
+ no_secured_ports: list,
+ server: object,
+ ) -> None:
+ """Updates the port security according to no_secured_ports list.
- # TODO insert exception vimconn.HTTP_Unauthorized
- # if reaching here is because an exception
- self.logger.debug("delete_tenant " + error_text)
+ Args:
+ no_secured_ports (list): List of ports that security will be disabled
+ server (object): Server Object
- return error_value, error_text
+ Raises:
+ VimConnException
- def get_hosts_info(self):
- """Get the information of deployed hosts
- Returns the hosts content"""
- if self.debug:
- print("osconnector: Getting Host info from VIM")
+ """
+ # Wait until the VM is active and then disable the port-security
+ if no_secured_ports:
+ self.__wait_for_vm(server.id, "ACTIVE")
- try:
- h_list = []
- self._reload_connection()
- hypervisors = self.nova.hypervisors.list()
+ for port in no_secured_ports:
+ port_update = {
+ "port": {"port_security_enabled": False, "security_groups": None}
+ }
- for hype in hypervisors:
- h_list.append(hype.to_dict())
+ if port[1] == "allow-address-pairs":
+ port_update = {
+ "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
+ }
- return 1, {"hosts": h_list}
- except nvExceptions.NotFound as e:
- error_value = -vimconn.HTTP_Not_Found
- error_text = str(e) if len(e.args) == 0 else str(e.args[0])
- except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
- error_value = -vimconn.HTTP_Bad_Request
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
- )
+ try:
+ self.neutron.update_port(port[0], port_update)
- # TODO insert exception vimconn.HTTP_Unauthorized
- # if reaching here is because an exception
- self.logger.debug("get_hosts_info " + error_text)
+ except Exception:
+ raise vimconn.VimConnException(
+ "It was not possible to disable port security for port {}".format(
+ port[0]
+ )
+ )
- return error_value, error_text
+ def new_vminstance(
+ self,
+ name: str,
+ description: str,
+ start: bool,
+ image_id: str,
+ flavor_id: str,
+ affinity_group_list: list,
+ net_list: list,
+ cloud_config=None,
+ disk_list=None,
+ availability_zone_index=None,
+ availability_zone_list=None,
+ ) -> tuple:
+ """Adds a VM instance to VIM.
- def get_hosts(self, vim_tenant):
- """Get the hosts and deployed instances
- Returns the hosts content"""
- r, hype_dict = self.get_hosts_info()
+ Args:
+ name (str): name of VM
+ description (str): description
+ start (bool): indicates if VM must start or boot in pause mode. Ignored
+ image_id (str) image uuid
+ flavor_id (str) flavor uuid
+ affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
+ net_list (list): list of interfaces, each one is a dictionary with:
+ name: name of network
+ net_id: network uuid to connect
+ vpci: virtual vcpi to assign, ignored because openstack lack #TODO
+ model: interface model, ignored #TODO
+ mac_address: used for SR-IOV ifaces #TODO for other types
+ use: 'data', 'bridge', 'mgmt'
+ type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
+ vim_id: filled/added by this function
+ floating_ip: True/False (or it can be None)
+ port_security: True/False
+ cloud_config (dict): (optional) dictionary with:
+ key-pairs: (optional) list of strings with the public key to be inserted to the default user
+ users: (optional) list of users to be inserted, each item is a dict with:
+ name: (mandatory) user name,
+ key-pairs: (optional) list of strings with the public key to be inserted to the user
+ user-data: (optional) string is a text script to be passed directly to cloud-init
+ config-files: (optional). List of files to be transferred. Each item is a dict with:
+ dest: (mandatory) string with the destination absolute path
+ encoding: (optional, by default text). Can be one of:
+ 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+ content : (mandatory) string with the content of the file
+ permissions: (optional) string with file permissions, typically octal notation '0644'
+ owner: (optional) file owner, string with the format 'owner:group'
+ boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
+ disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
+ image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+ size: (mandatory) string with the size of the disk in GB
+ vim_id: (optional) should use this existing volume id
+ availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+ availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
+ availability_zone_index is None
+ #TODO ip, security groups
- if r < 0:
- return r, hype_dict
+ Returns:
+ A tuple with the instance identifier and created_items or raises an exception on error
+ created_items can be None or a dictionary where this method can include key-values that will be passed to
+ the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+ Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+ as not present.
- hypervisors = hype_dict["hosts"]
+ """
+ self.logger.debug(
+ "new_vminstance input: image='%s' flavor='%s' nics='%s'",
+ image_id,
+ flavor_id,
+ str(net_list),
+ )
try:
- servers = self.nova.servers.list()
- for hype in hypervisors:
- for server in servers:
- if (
- server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
- == hype["hypervisor_hostname"]
- ):
- if "vm" in hype:
- hype["vm"].append(server.id)
- else:
- hype["vm"] = [server.id]
+ server = None
+ created_items = {}
+ net_list_vim = []
+ # list of external networks to be connected to instance, later on used to create floating_ip
+ external_network = []
+ # List of ports with port-security disabled
+ no_secured_ports = []
+ block_device_mapping = {}
+ existing_vim_volumes = []
+ server_group_id = None
+ scheduller_hints = {}
+
+ # Check the Openstack Connection
+ self._reload_connection()
- return 1, hype_dict
- except nvExceptions.NotFound as e:
- error_value = -vimconn.HTTP_Not_Found
- error_text = str(e) if len(e.args) == 0 else str(e.args[0])
- except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
- error_value = -vimconn.HTTP_Bad_Request
- error_text = (
- type(e).__name__
- + ": "
- + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ # Prepare network list
+ self._prepare_network_for_vminstance(
+ name=name,
+ net_list=net_list,
+ created_items=created_items,
+ net_list_vim=net_list_vim,
+ external_network=external_network,
+ no_secured_ports=no_secured_ports,
)
- # TODO insert exception vimconn.HTTP_Unauthorized
- # if reaching here is because an exception
- self.logger.debug("get_hosts " + error_text)
-
- return error_value, error_text
-
- def new_classification(self, name, ctype, definition):
- self.logger.debug(
- "Adding a new (Traffic) Classification to VIM, named %s", name
- )
+ # Cloud config
+ config_drive, userdata = self._create_user_data(cloud_config)
- try:
- new_class = None
- self._reload_connection()
+ # Get availability Zone
+ vm_av_zone = self._get_vm_availability_zone(
+ availability_zone_index, availability_zone_list
+ )
- if ctype not in supportedClassificationTypes:
- raise vimconn.VimConnNotSupportedException(
- "OpenStack VIM connector does not support provided "
- "Classification Type {}, supported ones are: {}".format(
- ctype, supportedClassificationTypes
- )
+ if disk_list:
+ # Prepare disks
+ self._prepare_disk_for_vminstance(
+ name=name,
+ existing_vim_volumes=existing_vim_volumes,
+ created_items=created_items,
+ vm_av_zone=vm_av_zone,
+ block_device_mapping=block_device_mapping,
+ disk_list=disk_list,
)
- if not self._validate_classification(ctype, definition):
- raise vimconn.VimConnException(
- "Incorrect Classification definition for the type specified."
+ if affinity_group_list:
+ # Only first id on the list will be used. Openstack restriction
+ server_group_id = affinity_group_list[0]["affinity_group_id"]
+ scheduller_hints["group"] = server_group_id
+
+ self.logger.debug(
+ "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
+ "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
+ "block_device_mapping={}, server_group={})".format(
+ name,
+ image_id,
+ flavor_id,
+ net_list_vim,
+ self.config.get("security_groups"),
+ vm_av_zone,
+ self.config.get("keypair"),
+ userdata,
+ config_drive,
+ block_device_mapping,
+ server_group_id,
)
+ )
- classification_dict = definition
- classification_dict["name"] = name
- new_class = self.neutron.create_sfc_flow_classifier(
- {"flow_classifier": classification_dict}
+ # Create VM
+ server = self.nova.servers.create(
+ name=name,
+ image=image_id,
+ flavor=flavor_id,
+ nics=net_list_vim,
+ security_groups=self.config.get("security_groups"),
+ # TODO remove security_groups in future versions. Already at neutron port
+ availability_zone=vm_av_zone,
+ key_name=self.config.get("keypair"),
+ userdata=userdata,
+ config_drive=config_drive,
+ block_device_mapping=block_device_mapping,
+ scheduler_hints=scheduller_hints,
)
- return new_class["flow_classifier"]["id"]
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self.logger.error("Creation of Classification failed.")
- self._format_exception(e)
+ vm_start_time = time.time()
- def get_classification(self, class_id):
- self.logger.debug(" Getting Classification %s from VIM", class_id)
- filter_dict = {"id": class_id}
- class_list = self.get_classification_list(filter_dict)
+ self._update_port_security_for_vminstance(no_secured_ports, server)
- if len(class_list) == 0:
- raise vimconn.VimConnNotFoundException(
- "Classification '{}' not found".format(class_id)
- )
- elif len(class_list) > 1:
- raise vimconn.VimConnConflictException(
- "Found more than one Classification with this criteria"
+ self._prepare_external_network_for_vminstance(
+ external_network=external_network,
+ server=server,
+ created_items=created_items,
+ vm_start_time=vm_start_time,
)
- classification = class_list[0]
+ return server.id, created_items
- return classification
+ except Exception as e:
+ server_id = None
+ if server:
+ server_id = server.id
- def get_classification_list(self, filter_dict={}):
- self.logger.debug(
- "Getting Classifications from VIM filter: '%s'", str(filter_dict)
- )
+ try:
+ created_items = self.remove_keep_tag_from_persistent_volumes(
+ created_items
+ )
- try:
- filter_dict_os = filter_dict.copy()
- self._reload_connection()
+ self.delete_vminstance(server_id, created_items)
- if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
- classification_dict = self.neutron.list_sfc_flow_classifiers(
- **filter_dict_os
- )
- classification_list = classification_dict["flow_classifiers"]
- self.__classification_os2mano(classification_list)
+ except Exception as e2:
+ self.logger.error("new_vminstance rollback fail {}".format(e2))
- return classification_list
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
self._format_exception(e)
- def delete_classification(self, class_id):
- self.logger.debug("Deleting Classification '%s' from VIM", class_id)
+ @staticmethod
+ def remove_keep_tag_from_persistent_volumes(created_items: Dict) -> Dict:
+ """Removes the keep flag from persistent volumes. So, those volumes could be removed.
+
+ Args:
+ created_items (dict): All created items belongs to VM
+
+ Returns:
+ updated_created_items (dict): Dict which does not include keep flag for volumes.
+
+ """
+ return {
+ key.replace(":keep", ""): value for (key, value) in created_items.items()
+ }
+ def get_vminstance(self, vm_id):
+ """Returns the VM instance information from VIM"""
+ # self.logger.debug("Getting VM from VIM")
try:
self._reload_connection()
- self.neutron.delete_sfc_flow_classifier(class_id)
+ server = self.nova.servers.find(id=vm_id)
+ # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
- return class_id
+ return server.to_dict()
except (
- neExceptions.ConnectionFailed,
- neExceptions.NeutronException,
ksExceptions.ClientException,
- neExceptions.NeutronException,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
ConnectionError,
) as e:
self._format_exception(e)
- def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
- self.logger.debug(
- "Adding a new Service Function Instance to VIM, named '%s'", name
- )
+ def get_vminstance_console(self, vm_id, console_type="vnc"):
+ """
+ Get a console for the virtual machine
+ Params:
+ vm_id: uuid of the VM
+ console_type, can be:
+ "novnc" (by default), "xvpvnc" for VNC types,
+ "rdp-html5" for RDP types, "spice-html5" for SPICE types
+ Returns dict with the console parameters:
+ protocol: ssh, ftp, http, https, ...
+ server: usually ip address
+ port: the http, ssh, ... port
+ suffix: extra text, e.g. the http path and query string
+ """
+ self.logger.debug("Getting VM CONSOLE from VIM")
try:
- new_sfi = None
self._reload_connection()
- correlation = None
-
- if sfc_encap:
- correlation = "nsh"
+ server = self.nova.servers.find(id=vm_id)
- if len(ingress_ports) != 1:
- raise vimconn.VimConnNotSupportedException(
- "OpenStack VIM connector can only have 1 ingress port per SFI"
+ if console_type is None or console_type == "novnc":
+ console_dict = server.get_vnc_console("novnc")
+ elif console_type == "xvpvnc":
+ console_dict = server.get_vnc_console(console_type)
+ elif console_type == "rdp-html5":
+ console_dict = server.get_rdp_console(console_type)
+ elif console_type == "spice-html5":
+ console_dict = server.get_spice_console(console_type)
+ else:
+ raise vimconn.VimConnException(
+ "console type '{}' not allowed".format(console_type),
+ http_code=vimconn.HTTP_Bad_Request,
)
- if len(egress_ports) != 1:
- raise vimconn.VimConnNotSupportedException(
- "OpenStack VIM connector can only have 1 egress port per SFI"
- )
+ console_dict1 = console_dict.get("console")
- sfi_dict = {
- "name": name,
- "ingress": ingress_ports[0],
- "egress": egress_ports[0],
- "service_function_parameters": {"correlation": correlation},
- }
- new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
+ if console_dict1:
+ console_url = console_dict1.get("url")
- return new_sfi["port_pair"]["id"]
+ if console_url:
+ # parse console_url
+ protocol_index = console_url.find("//")
+ suffix_index = (
+ console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+ )
+ port_index = (
+ console_url[protocol_index + 2 : suffix_index].find(":")
+ + protocol_index
+ + 2
+ )
+
+ if protocol_index < 0 or port_index < 0 or suffix_index < 0:
+ return (
+ -vimconn.HTTP_Internal_Server_Error,
+ "Unexpected response from VIM",
+ )
+
+ console_dict = {
+ "protocol": console_url[0:protocol_index],
+ "server": console_url[protocol_index + 2 : port_index],
+ "port": console_url[port_index:suffix_index],
+ "suffix": console_url[suffix_index + 1 :],
+ }
+ protocol_index += 2
+
+ return console_dict
+ raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
except (
- neExceptions.ConnectionFailed,
+ nvExceptions.NotFound,
ksExceptions.ClientException,
- neExceptions.NeutronException,
+ nvExceptions.ClientException,
+ nvExceptions.BadRequest,
ConnectionError,
) as e:
- if new_sfi:
- try:
- self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
- except Exception:
- self.logger.error(
- "Creation of Service Function Instance failed, with "
- "subsequent deletion failure as well."
- )
-
self._format_exception(e)
- def get_sfi(self, sfi_id):
- self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
- filter_dict = {"id": sfi_id}
- sfi_list = self.get_sfi_list(filter_dict)
+ def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
+ """Neutron delete ports by id.
+ Args:
+ k_id (str): Port id in the VIM
+ """
+ try:
+ port_dict = self.neutron.list_ports()
+ existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
+
+ if k_id in existing_ports:
+ self.neutron.delete_port(k_id)
- if len(sfi_list) == 0:
- raise vimconn.VimConnNotFoundException(
- "Service Function Instance '{}' not found".format(sfi_id)
+ except Exception as e:
+ self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+
+ def _delete_volumes_by_id_wth_cinder(
+ self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
+ ) -> bool:
+ """Cinder delete volume by id.
+ Args:
+ k (str): Full item name in created_items
+ k_id (str): ID of floating ip in VIM
+ volumes_to_hold (list): Volumes not to delete
+ created_items (dict): All created items belongs to VM
+ """
+ try:
+ if k_id in volumes_to_hold:
+ return
+
+ if self.cinder.volumes.get(k_id).status != "available":
+ return True
+
+ else:
+ self.cinder.volumes.delete(k_id)
+ created_items[k] = None
+
+ except Exception as e:
+ self.logger.error(
+ "Error deleting volume: {}: {}".format(type(e).__name__, e)
)
- elif len(sfi_list) > 1:
- raise vimconn.VimConnConflictException(
- "Found more than one Service Function Instance with this criteria"
+
+ def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
+ """Neutron delete floating ip by id.
+ Args:
+ k (str): Full item name in created_items
+ k_id (str): ID of floating ip in VIM
+ created_items (dict): All created items belongs to VM
+ """
+ try:
+ self.neutron.delete_floatingip(k_id)
+ created_items[k] = None
+
+ except Exception as e:
+ self.logger.error(
+ "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
)
- sfi = sfi_list[0]
+ @staticmethod
+ def _get_item_name_id(k: str) -> Tuple[str, str]:
+ k_item, _, k_id = k.partition(":")
+ return k_item, k_id
- return sfi
+ def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
+ """Delete VM ports attached to the networks before deleting virtual machine.
+ Args:
+ created_items (dict): All created items belongs to VM
+ """
- def get_sfi_list(self, filter_dict={}):
- self.logger.debug(
- "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
- )
+ for k, v in created_items.items():
+ if not v: # skip already deleted
+ continue
- try:
- self._reload_connection()
- filter_dict_os = filter_dict.copy()
+ try:
+ k_item, k_id = self._get_item_name_id(k)
+ if k_item == "port":
+ self._delete_ports_by_id_wth_neutron(k_id)
- if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+ except Exception as e:
+ self.logger.error(
+ "Error deleting port: {}: {}".format(type(e).__name__, e)
+ )
- sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
- sfi_list = sfi_dict["port_pairs"]
- self.__sfi_os2mano(sfi_list)
+ def _delete_created_items(
+ self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
+ ) -> bool:
+ """Delete Volumes and floating ip if they exist in created_items."""
+ for k, v in created_items.items():
+ if not v: # skip already deleted
+ continue
- return sfi_list
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ try:
+ k_item, k_id = self._get_item_name_id(k)
+
+ if k_item == "volume":
+ unavailable_vol = self._delete_volumes_by_id_wth_cinder(
+ k, k_id, volumes_to_hold, created_items
+ )
+
+ if unavailable_vol:
+ keep_waiting = True
+
+ elif k_item == "floating_ip":
+ self._delete_floating_ip_by_id(k, k_id, created_items)
+
+ except Exception as e:
+ self.logger.error("Error deleting {}: {}".format(k, e))
- def delete_sfi(self, sfi_id):
- self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
+ return keep_waiting
+
+ @staticmethod
+ def _extract_items_wth_keep_flag_from_created_items(created_items: dict) -> dict:
+ """Remove the volumes which has key flag from created_items
+
+ Args:
+ created_items (dict): All created items belongs to VM
+
+ Returns:
+ created_items (dict): Persistent volumes eliminated created_items
+ """
+ return {
+ key: value
+ for (key, value) in created_items.items()
+ if len(key.split(":")) == 2
+ }
+
+ def delete_vminstance(
+ self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
+ ) -> None:
+ """Removes a VM instance from VIM. Returns the old identifier.
+ Args:
+ vm_id (str): Identifier of VM instance
+ created_items (dict): All created items belongs to VM
+ volumes_to_hold (list): Volumes_to_hold
+ """
+ if created_items is None:
+ created_items = {}
+ if volumes_to_hold is None:
+ volumes_to_hold = []
try:
+ created_items = self._extract_items_wth_keep_flag_from_created_items(
+ created_items
+ )
+
self._reload_connection()
- self.neutron.delete_sfc_port_pair(sfi_id)
- return sfi_id
+ # Delete VM ports attached to the networks before the virtual machine
+ if created_items:
+ self._delete_vm_ports_attached_to_network(created_items)
+
+ if vm_id:
+ self.nova.servers.delete(vm_id)
+
+ # Although having detached, volumes should have in active status before deleting.
+ # We ensure in this loop
+ keep_waiting = True
+ elapsed_time = 0
+
+ while keep_waiting and elapsed_time < volume_timeout:
+ keep_waiting = False
+
+ # Delete volumes and floating IP.
+ keep_waiting = self._delete_created_items(
+ created_items, volumes_to_hold, keep_waiting
+ )
+
+ if keep_waiting:
+ time.sleep(1)
+ elapsed_time += 1
+
except (
- neExceptions.ConnectionFailed,
- neExceptions.NeutronException,
+ nvExceptions.NotFound,
ksExceptions.ClientException,
- neExceptions.NeutronException,
+ nvExceptions.ClientException,
ConnectionError,
) as e:
self._format_exception(e)
- def new_sf(self, name, sfis, sfc_encap=True):
- self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
+ def refresh_vms_status(self, vm_list):
+ """Get the status of the virtual machines and their interfaces/ports
+ Params: the list of VM identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this Virtual Machine
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+ # CREATING (on building process), ERROR
+ # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+ #
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ interfaces:
+ - vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ mac_address: #Text format XX:XX:XX:XX:XX:XX
+ vim_net_id: #network id where this interface is connected
+ vim_interface_id: #interface/port VIM id
+ ip_address: #null, or text with IPv4, IPv6 address
+ compute_node: #identification of compute node where PF,VF interface is allocated
+ pci: #PCI address of the NIC that hosts the PF,VF
+ vlan: #physical VLAN used for VF
+ """
+ vm_dict = {}
+ self.logger.debug(
+ "refresh_vms status: Getting tenant VM instance information from VIM"
+ )
+
+ for vm_id in vm_list:
+ vm = {}
- try:
- new_sf = None
- self._reload_connection()
- # correlation = None
- # if sfc_encap:
- # correlation = "nsh"
+ try:
+ vm_vim = self.get_vminstance(vm_id)
- for instance in sfis:
- sfi = self.get_sfi(instance)
+ if vm_vim["status"] in vmStatus2manoFormat:
+ vm["status"] = vmStatus2manoFormat[vm_vim["status"]]
+ else:
+ vm["status"] = "OTHER"
+ vm["error_msg"] = "VIM status reported " + vm_vim["status"]
- if sfi.get("sfc_encap") != sfc_encap:
- raise vimconn.VimConnNotSupportedException(
- "OpenStack VIM connector requires all SFIs of the "
- "same SF to share the same SFC Encapsulation"
- )
+ vm_vim.pop("OS-EXT-SRV-ATTR:user_data", None)
+ vm_vim.pop("user_data", None)
+ vm["vim_info"] = self.serialize(vm_vim)
- sf_dict = {"name": name, "port_pairs": sfis}
- new_sf = self.neutron.create_sfc_port_pair_group(
- {"port_pair_group": sf_dict}
- )
+ vm["interfaces"] = []
+ if vm_vim.get("fault"):
+ vm["error_msg"] = str(vm_vim["fault"])
- return new_sf["port_pair_group"]["id"]
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- if new_sf:
+ # get interfaces
try:
- self.neutron.delete_sfc_port_pair_group(
- new_sf["port_pair_group"]["id"]
- )
- except Exception:
- self.logger.error(
- "Creation of Service Function failed, with "
- "subsequent deletion failure as well."
- )
+ self._reload_connection()
+ port_dict = self.neutron.list_ports(device_id=vm_id)
- self._format_exception(e)
+ for port in port_dict["ports"]:
+ interface = {}
+ interface["vim_info"] = self.serialize(port)
+ interface["mac_address"] = port.get("mac_address")
+ interface["vim_net_id"] = port["network_id"]
+ interface["vim_interface_id"] = port["id"]
+ # check if OS-EXT-SRV-ATTR:host is there,
+ # in case of non-admin credentials, it will be missing
+
+ if vm_vim.get("OS-EXT-SRV-ATTR:host"):
+ interface["compute_node"] = vm_vim["OS-EXT-SRV-ATTR:host"]
+
+ interface["pci"] = None
+
+ # check if binding:profile is there,
+ # in case of non-admin credentials, it will be missing
+ if port.get("binding:profile"):
+ if port["binding:profile"].get("pci_slot"):
+ # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting
+ # the slot to 0x00
+ # TODO: This is just a workaround valid for niantinc. Find a better way to do so
+ # CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2) assuming there are 2 ports per nic
+ pci = port["binding:profile"]["pci_slot"]
+ # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
+ interface["pci"] = pci
+
+ interface["vlan"] = None
+
+ if port.get("binding:vif_details"):
+ interface["vlan"] = port["binding:vif_details"].get("vlan")
+
+ # Get vlan from network in case not present in port for those old openstacks and cases where
+ # it is needed vlan at PT
+ if not interface["vlan"]:
+ # if network is of type vlan and port is of type direct (sr-iov) then set vlan id
+ network = self.neutron.show_network(port["network_id"])
+
+ if (
+ network["network"].get("provider:network_type")
+ == "vlan"
+ ):
+ # and port.get("binding:vnic_type") in ("direct", "direct-physical"):
+ interface["vlan"] = network["network"].get(
+ "provider:segmentation_id"
+ )
+
+ ips = []
+ # look for floating ip address
+ try:
+ floating_ip_dict = self.neutron.list_floatingips(
+ port_id=port["id"]
+ )
+
+ if floating_ip_dict.get("floatingips"):
+ ips.append(
+ floating_ip_dict["floatingips"][0].get(
+ "floating_ip_address"
+ )
+ )
+ except Exception:
+ pass
+
+ for subnet in port["fixed_ips"]:
+ ips.append(subnet["ip_address"])
+
+ interface["ip_address"] = ";".join(ips)
+ vm["interfaces"].append(interface)
+ except Exception as e:
+ self.logger.error(
+ "Error getting vm interface information {}: {}".format(
+ type(e).__name__, e
+ ),
+ exc_info=True,
+ )
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting vm status: %s", str(e))
+ vm["status"] = "DELETED"
+ vm["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting vm status: %s", str(e))
+ vm["status"] = "VIM_ERROR"
+ vm["error_msg"] = str(e)
+
+ vm_dict[vm_id] = vm
+
+ return vm_dict
+
+ def action_vminstance(self, vm_id, action_dict, created_items={}):
+ """Send and action over a VM instance from VIM
+ Returns None or the console dict if the action was successfully sent to the VIM
+ """
+ self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+
+ try:
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+
+ if "start" in action_dict:
+ if action_dict["start"] == "rebuild":
+ server.rebuild()
+ else:
+ if server.status == "PAUSED":
+ server.unpause()
+ elif server.status == "SUSPENDED":
+ server.resume()
+ elif server.status == "SHUTOFF":
+ server.start()
+ else:
+ self.logger.debug(
+ "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
+ )
+ raise vimconn.VimConnException(
+ "Cannot 'start' instance while it is in active state",
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+
+ elif "pause" in action_dict:
+ server.pause()
+ elif "resume" in action_dict:
+ server.resume()
+ elif "shutoff" in action_dict or "shutdown" in action_dict:
+ self.logger.debug("server status %s", server.status)
+ if server.status == "ACTIVE":
+ server.stop()
+ else:
+ self.logger.debug("ERROR: VM is not in Active state")
+ raise vimconn.VimConnException(
+ "VM is not in active state, stop operation is not allowed",
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ elif "forceOff" in action_dict:
+ server.stop() # TODO
+ elif "terminate" in action_dict:
+ server.delete()
+ elif "createImage" in action_dict:
+ server.create_image()
+ # "path":path_schema,
+ # "description":description_schema,
+ # "name":name_schema,
+ # "metadata":metadata_schema,
+ # "imageRef": id_schema,
+ # "disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
+ elif "rebuild" in action_dict:
+ server.rebuild(server.image["id"])
+ elif "reboot" in action_dict:
+ server.reboot() # reboot_type="SOFT"
+ elif "console" in action_dict:
+ console_type = action_dict["console"]
+
+ if console_type is None or console_type == "novnc":
+ console_dict = server.get_vnc_console("novnc")
+ elif console_type == "xvpvnc":
+ console_dict = server.get_vnc_console(console_type)
+ elif console_type == "rdp-html5":
+ console_dict = server.get_rdp_console(console_type)
+ elif console_type == "spice-html5":
+ console_dict = server.get_spice_console(console_type)
+ else:
+ raise vimconn.VimConnException(
+ "console type '{}' not allowed".format(console_type),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+
+ try:
+ console_url = console_dict["console"]["url"]
+ # parse console_url
+ protocol_index = console_url.find("//")
+ suffix_index = (
+ console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+ )
+ port_index = (
+ console_url[protocol_index + 2 : suffix_index].find(":")
+ + protocol_index
+ + 2
+ )
+
+ if protocol_index < 0 or port_index < 0 or suffix_index < 0:
+ raise vimconn.VimConnException(
+ "Unexpected response from VIM " + str(console_dict)
+ )
+
+ console_dict2 = {
+ "protocol": console_url[0:protocol_index],
+ "server": console_url[protocol_index + 2 : port_index],
+ "port": int(console_url[port_index + 1 : suffix_index]),
+ "suffix": console_url[suffix_index + 1 :],
+ }
+
+ return console_dict2
+ except Exception:
+ raise vimconn.VimConnException(
+ "Unexpected response from VIM " + str(console_dict)
+ )
+
+ return None
+ except (
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+ # TODO insert exception vimconn.HTTP_Unauthorized
+
+ # ###### VIO Specific Changes #########
+ def _generate_vlanID(self):
+ """
+ Method to get unused vlanID
+ Args:
+ None
+ Returns:
+ vlanID
+ """
+ # Get used VLAN IDs
+ usedVlanIDs = []
+ networks = self.get_network_list()
+
+ for net in networks:
+ if net.get("provider:segmentation_id"):
+ usedVlanIDs.append(net.get("provider:segmentation_id"))
+
+ used_vlanIDs = set(usedVlanIDs)
+
+ # find unused VLAN ID
+ for vlanID_range in self.config.get("dataplane_net_vlan_range"):
+ try:
+ start_vlanid, end_vlanid = map(
+ int, vlanID_range.replace(" ", "").split("-")
+ )
+
+ for vlanID in range(start_vlanid, end_vlanid + 1):
+ if vlanID not in used_vlanIDs:
+ return vlanID
+ except Exception as exp:
+ raise vimconn.VimConnException(
+ "Exception {} occurred while generating VLAN ID.".format(exp)
+ )
+ else:
+ raise vimconn.VimConnConflictException(
+ "Unable to create the SRIOV VLAN network. All given Vlan IDs {} are in use.".format(
+ self.config.get("dataplane_net_vlan_range")
+ )
+ )
+
+ def _generate_multisegment_vlanID(self):
+ """
+ Method to get unused vlanID
+ Args:
+ None
+ Returns:
+ vlanID
+ """
+ # Get used VLAN IDs
+ usedVlanIDs = []
+ networks = self.get_network_list()
+ for net in networks:
+ if net.get("provider:network_type") == "vlan" and net.get(
+ "provider:segmentation_id"
+ ):
+ usedVlanIDs.append(net.get("provider:segmentation_id"))
+ elif net.get("segments"):
+ for segment in net.get("segments"):
+ if segment.get("provider:network_type") == "vlan" and segment.get(
+ "provider:segmentation_id"
+ ):
+ usedVlanIDs.append(segment.get("provider:segmentation_id"))
+
+ used_vlanIDs = set(usedVlanIDs)
+
+ # find unused VLAN ID
+ for vlanID_range in self.config.get("multisegment_vlan_range"):
+ try:
+ start_vlanid, end_vlanid = map(
+ int, vlanID_range.replace(" ", "").split("-")
+ )
+
+ for vlanID in range(start_vlanid, end_vlanid + 1):
+ if vlanID not in used_vlanIDs:
+ return vlanID
+ except Exception as exp:
+ raise vimconn.VimConnException(
+ "Exception {} occurred while generating VLAN ID.".format(exp)
+ )
+ else:
+ raise vimconn.VimConnConflictException(
+ "Unable to create the VLAN segment. All VLAN IDs {} are in use.".format(
+ self.config.get("multisegment_vlan_range")
+ )
+ )
+
+ def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
+ """
+ Method to validate user given vlanID ranges
+ Args: None
+ Returns: None
+ """
+ for vlanID_range in input_vlan_range:
+ vlan_range = vlanID_range.replace(" ", "")
+ # validate format
+ vlanID_pattern = r"(\d)*-(\d)*$"
+ match_obj = re.match(vlanID_pattern, vlan_range)
+ if not match_obj:
+ raise vimconn.VimConnConflictException(
+ "Invalid VLAN range for {}: {}.You must provide "
+ "'{}' in format [start_ID - end_ID].".format(
+ text_vlan_range, vlanID_range, text_vlan_range
+ )
+ )
+
+ start_vlanid, end_vlanid = map(int, vlan_range.split("-"))
+ if start_vlanid <= 0:
+ raise vimconn.VimConnConflictException(
+ "Invalid VLAN range for {}: {}. Start ID can not be zero. For VLAN "
+ "networks valid IDs are 1 to 4094 ".format(
+ text_vlan_range, vlanID_range
+ )
+ )
+
+ if end_vlanid > 4094:
+ raise vimconn.VimConnConflictException(
+ "Invalid VLAN range for {}: {}. End VLAN ID can not be "
+ "greater than 4094. For VLAN networks valid IDs are 1 to 4094 ".format(
+ text_vlan_range, vlanID_range
+ )
+ )
+
+ if start_vlanid > end_vlanid:
+ raise vimconn.VimConnConflictException(
+ "Invalid VLAN range for {}: {}. You must provide '{}'"
+ " in format start_ID - end_ID and start_ID < end_ID ".format(
+ text_vlan_range, vlanID_range, text_vlan_range
+ )
+ )
+
+ def get_hosts_info(self):
+ """Get the information of deployed hosts
+ Returns the hosts content"""
+ if self.debug:
+ print("osconnector: Getting Host info from VIM")
+
+ try:
+ h_list = []
+ self._reload_connection()
+ hypervisors = self.nova.hypervisors.list()
- def get_sf(self, sf_id):
- self.logger.debug("Getting Service Function %s from VIM", sf_id)
- filter_dict = {"id": sf_id}
- sf_list = self.get_sf_list(filter_dict)
+ for hype in hypervisors:
+ h_list.append(hype.to_dict())
- if len(sf_list) == 0:
- raise vimconn.VimConnNotFoundException(
- "Service Function '{}' not found".format(sf_id)
- )
- elif len(sf_list) > 1:
- raise vimconn.VimConnConflictException(
- "Found more than one Service Function with this criteria"
+ return 1, {"hosts": h_list}
+ except nvExceptions.NotFound as e:
+ error_value = -vimconn.HTTP_Not_Found
+ error_text = str(e) if len(e.args) == 0 else str(e.args[0])
+ except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+ error_value = -vimconn.HTTP_Bad_Request
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
)
- sf = sf_list[0]
-
- return sf
+ # TODO insert exception vimconn.HTTP_Unauthorized
+ # if reaching here is because an exception
+ self.logger.debug("get_hosts_info " + error_text)
- def get_sf_list(self, filter_dict={}):
- self.logger.debug(
- "Getting Service Function from VIM filter: '%s'", str(filter_dict)
- )
+ return error_value, error_text
- try:
- self._reload_connection()
- filter_dict_os = filter_dict.copy()
+ def get_hosts(self, vim_tenant):
+ """Get the hosts and deployed instances
+ Returns the hosts content"""
+ r, hype_dict = self.get_hosts_info()
- if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+ if r < 0:
+ return r, hype_dict
- sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
- sf_list = sf_dict["port_pair_groups"]
- self.__sf_os2mano(sf_list)
+ hypervisors = hype_dict["hosts"]
- return sf_list
- except (
- neExceptions.ConnectionFailed,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ try:
+ servers = self.nova.servers.list()
+ for hype in hypervisors:
+ for server in servers:
+ if (
+ server.to_dict()["OS-EXT-SRV-ATTR:hypervisor_hostname"]
+ == hype["hypervisor_hostname"]
+ ):
+ if "vm" in hype:
+ hype["vm"].append(server.id)
+ else:
+ hype["vm"] = [server.id]
- def delete_sf(self, sf_id):
- self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
+ return 1, hype_dict
+ except nvExceptions.NotFound as e:
+ error_value = -vimconn.HTTP_Not_Found
+ error_text = str(e) if len(e.args) == 0 else str(e.args[0])
+ except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+ error_value = -vimconn.HTTP_Bad_Request
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ )
- try:
- self._reload_connection()
- self.neutron.delete_sfc_port_pair_group(sf_id)
+ # TODO insert exception vimconn.HTTP_Unauthorized
+ # if reaching here is because an exception
+ self.logger.debug("get_hosts " + error_text)
- return sf_id
- except (
- neExceptions.ConnectionFailed,
- neExceptions.NeutronException,
- ksExceptions.ClientException,
- neExceptions.NeutronException,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ return error_value, error_text
- def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
- self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
+ def new_affinity_group(self, affinity_group_data):
+ """Adds a server group to VIM
+ affinity_group_data contains a dictionary with information, keys:
+ name: name in VIM for the server group
+ type: affinity or anti-affinity
+ scope: Only nfvi-node allowed
+ Returns the server group identifier"""
+ self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
try:
- new_sfp = None
- self._reload_connection()
- # In networking-sfc the MPLS encapsulation is legacy
- # should be used when no full SFC Encapsulation is intended
- correlation = "mpls"
-
- if sfc_encap:
- correlation = "nsh"
-
- sfp_dict = {
- "name": name,
- "flow_classifiers": classifications,
- "port_pair_groups": sfs,
- "chain_parameters": {"correlation": correlation},
- }
+ name = affinity_group_data["name"]
+ policy = affinity_group_data["type"]
- if spi:
- sfp_dict["chain_id"] = spi
-
- new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
+ self._reload_connection()
+ new_server_group = self.nova.server_groups.create(name, policy)
- return new_sfp["port_chain"]["id"]
+ return new_server_group.id
except (
- neExceptions.ConnectionFailed,
ksExceptions.ClientException,
- neExceptions.NeutronException,
+ nvExceptions.ClientException,
ConnectionError,
+ KeyError,
) as e:
- if new_sfp:
- try:
- self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
- except Exception:
- self.logger.error(
- "Creation of Service Function Path failed, with "
- "subsequent deletion failure as well."
- )
-
self._format_exception(e)
- def get_sfp(self, sfp_id):
- self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
-
- filter_dict = {"id": sfp_id}
- sfp_list = self.get_sfp_list(filter_dict)
-
- if len(sfp_list) == 0:
- raise vimconn.VimConnNotFoundException(
- "Service Function Path '{}' not found".format(sfp_id)
- )
- elif len(sfp_list) > 1:
- raise vimconn.VimConnConflictException(
- "Found more than one Service Function Path with this criteria"
- )
-
- sfp = sfp_list[0]
-
- return sfp
-
- def get_sfp_list(self, filter_dict={}):
- self.logger.debug(
- "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
- )
-
+ def get_affinity_group(self, affinity_group_id):
+ """Obtain server group details from the VIM. Returns the server group detais as a dict"""
+ self.logger.debug("Getting flavor '%s'", affinity_group_id)
try:
self._reload_connection()
- filter_dict_os = filter_dict.copy()
+ server_group = self.nova.server_groups.find(id=affinity_group_id)
- if self.api_version3 and "tenant_id" in filter_dict_os:
- filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
-
- sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
- sfp_list = sfp_dict["port_chains"]
- self.__sfp_os2mano(sfp_list)
-
- return sfp_list
+ return server_group.to_dict()
except (
- neExceptions.ConnectionFailed,
+ nvExceptions.NotFound,
+ nvExceptions.ClientException,
ksExceptions.ClientException,
- neExceptions.NeutronException,
ConnectionError,
) as e:
self._format_exception(e)
- def delete_sfp(self, sfp_id):
- self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
-
+ def delete_affinity_group(self, affinity_group_id):
+ """Deletes a server group from the VIM. Returns the old affinity_group_id"""
+ self.logger.debug("Getting server group '%s'", affinity_group_id)
try:
self._reload_connection()
- self.neutron.delete_sfc_port_chain(sfp_id)
+ self.nova.server_groups.delete(affinity_group_id)
- return sfp_id
+ return affinity_group_id
except (
- neExceptions.ConnectionFailed,
- neExceptions.NeutronException,
+ nvExceptions.NotFound,
ksExceptions.ClientException,
- neExceptions.NeutronException,
+ nvExceptions.ClientException,
ConnectionError,
) as e:
self._format_exception(e)
- def refresh_sfps_status(self, sfp_list):
- """Get the status of the service function path
- Params: the list of sfp identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function path
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
+ def get_vdu_state(self, vm_id):
"""
- sfp_dict = {}
- self.logger.debug(
- "refresh_sfps status: Getting tenant SFP information from VIM"
- )
-
- for sfp_id in sfp_list:
- sfp = {}
-
- try:
- sfp_vim = self.get_sfp(sfp_id)
-
- if sfp_vim["spi"]:
- sfp["status"] = vmStatus2manoFormat["ACTIVE"]
- else:
- sfp["status"] = "OTHER"
- sfp["error_msg"] = "VIM status reported " + sfp["status"]
-
- sfp["vim_info"] = self.serialize(sfp_vim)
-
- if sfp_vim.get("fault"):
- sfp["error_msg"] = str(sfp_vim["fault"])
- except vimconn.VimConnNotFoundException as e:
- self.logger.error("Exception getting sfp status: %s", str(e))
- sfp["status"] = "DELETED"
- sfp["error_msg"] = str(e)
- except vimconn.VimConnException as e:
- self.logger.error("Exception getting sfp status: %s", str(e))
- sfp["status"] = "VIM_ERROR"
- sfp["error_msg"] = str(e)
-
- sfp_dict[sfp_id] = sfp
-
- return sfp_dict
-
- def refresh_sfis_status(self, sfi_list):
- """Get the status of the service function instances
- Params: the list of sfi identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function instance
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ Getting the state of a vdu
+ param:
+ vm_id: ID of an instance
"""
- sfi_dict = {}
- self.logger.debug(
- "refresh_sfis status: Getting tenant sfi information from VIM"
+ self.logger.debug("Getting the status of VM")
+ self.logger.debug("VIM VM ID %s", vm_id)
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+ server_dict = server.to_dict()
+ vdu_data = [
+ server_dict["status"],
+ server_dict["flavor"]["id"],
+ server_dict["OS-EXT-SRV-ATTR:host"],
+ server_dict["OS-EXT-AZ:availability_zone"],
+ ]
+ self.logger.debug("vdu_data %s", vdu_data)
+ return vdu_data
+
+ def check_compute_availability(self, host, server_flavor_details):
+ self._reload_connection()
+ hypervisor_search = self.nova.hypervisors.search(
+ hypervisor_match=host, servers=True
)
+ for hypervisor in hypervisor_search:
+ hypervisor_id = hypervisor.to_dict()["id"]
+ hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
+ hypervisor_dict = hypervisor_details.to_dict()
+ hypervisor_temp = json.dumps(hypervisor_dict)
+ hypervisor_json = json.loads(hypervisor_temp)
+ resources_available = [
+ hypervisor_json["free_ram_mb"],
+ hypervisor_json["disk_available_least"],
+ hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
+ ]
+ compute_available = all(
+ x > y for x, y in zip(resources_available, server_flavor_details)
+ )
+ if compute_available:
+ return host
- for sfi_id in sfi_list:
- sfi = {}
-
- try:
- sfi_vim = self.get_sfi(sfi_id)
-
- if sfi_vim:
- sfi["status"] = vmStatus2manoFormat["ACTIVE"]
+ def check_availability_zone(
+ self, old_az, server_flavor_details, old_host, host=None
+ ):
+ self._reload_connection()
+ az_check = {"zone_check": False, "compute_availability": None}
+ aggregates_list = self.nova.aggregates.list()
+ for aggregate in aggregates_list:
+ aggregate_details = aggregate.to_dict()
+ aggregate_temp = json.dumps(aggregate_details)
+ aggregate_json = json.loads(aggregate_temp)
+ if aggregate_json["availability_zone"] == old_az:
+ hosts_list = aggregate_json["hosts"]
+ if host is not None:
+ if host in hosts_list:
+ az_check["zone_check"] = True
+ available_compute_id = self.check_compute_availability(
+ host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["compute_availability"] = available_compute_id
else:
- sfi["status"] = "OTHER"
- sfi["error_msg"] = "VIM status reported " + sfi["status"]
-
- sfi["vim_info"] = self.serialize(sfi_vim)
-
- if sfi_vim.get("fault"):
- sfi["error_msg"] = str(sfi_vim["fault"])
- except vimconn.VimConnNotFoundException as e:
- self.logger.error("Exception getting sfi status: %s", str(e))
- sfi["status"] = "DELETED"
- sfi["error_msg"] = str(e)
- except vimconn.VimConnException as e:
- self.logger.error("Exception getting sfi status: %s", str(e))
- sfi["status"] = "VIM_ERROR"
- sfi["error_msg"] = str(e)
-
- sfi_dict[sfi_id] = sfi
-
- return sfi_dict
+ for check_host in hosts_list:
+ if check_host != old_host:
+ available_compute_id = self.check_compute_availability(
+ check_host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["zone_check"] = True
+ az_check["compute_availability"] = available_compute_id
+ break
+ else:
+ az_check["zone_check"] = True
+ return az_check
- def refresh_sfs_status(self, sf_list):
- """Get the status of the service functions
- Params: the list of sf identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this service function
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ def migrate_instance(self, vm_id, compute_host=None):
"""
- sf_dict = {}
- self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
-
- for sf_id in sf_list:
- sf = {}
-
- try:
- sf_vim = self.get_sf(sf_id)
-
- if sf_vim:
- sf["status"] = vmStatus2manoFormat["ACTIVE"]
- else:
- sf["status"] = "OTHER"
- sf["error_msg"] = "VIM status reported " + sf_vim["status"]
-
- sf["vim_info"] = self.serialize(sf_vim)
-
- if sf_vim.get("fault"):
- sf["error_msg"] = str(sf_vim["fault"])
- except vimconn.VimConnNotFoundException as e:
- self.logger.error("Exception getting sf status: %s", str(e))
- sf["status"] = "DELETED"
- sf["error_msg"] = str(e)
- except vimconn.VimConnException as e:
- self.logger.error("Exception getting sf status: %s", str(e))
- sf["status"] = "VIM_ERROR"
- sf["error_msg"] = str(e)
-
- sf_dict[sf_id] = sf
-
- return sf_dict
-
- def refresh_classifications_status(self, classification_list):
- """Get the status of the classifications
- Params: the list of classification identifiers
- Returns a dictionary with:
- vm_id: #VIM id of this classifier
- status: #Mandatory. Text with one of:
- # DELETED (not found at vim)
- # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
- # OTHER (Vim reported other status not understood)
- # ERROR (VIM indicates an ERROR status)
- # ACTIVE,
- # CREATING (on building process)
- error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
- vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
"""
- classification_dict = {}
- self.logger.debug(
- "refresh_classifications status: Getting tenant classification information from VIM"
- )
-
- for classification_id in classification_list:
- classification = {}
-
- try:
- classification_vim = self.get_classification(classification_id)
+ self._reload_connection()
+ vm_state = False
+ instance_state = self.get_vdu_state(vm_id)
+ server_flavor_id = instance_state[1]
+ server_hypervisor_name = instance_state[2]
+ server_availability_zone = instance_state[3]
+ try:
+ server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+ server_flavor_details = [
+ server_flavor["ram"],
+ server_flavor["disk"],
+ server_flavor["vcpus"],
+ ]
+ if compute_host == server_hypervisor_name:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to the same host '{}'".format(
+ vm_id, compute_host
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ az_status = self.check_availability_zone(
+ server_availability_zone,
+ server_flavor_details,
+ server_hypervisor_name,
+ compute_host,
+ )
+ availability_zone_check = az_status["zone_check"]
+ available_compute_id = az_status.get("compute_availability")
- if classification_vim:
- classification["status"] = vmStatus2manoFormat["ACTIVE"]
+ if availability_zone_check is False:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to a different availability zone".format(
+ vm_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ if available_compute_id is not None:
+ self.nova.servers.live_migrate(
+ server=vm_id,
+ host=available_compute_id,
+ block_migration=True,
+ disk_over_commit=False,
+ )
+ state = "MIGRATING"
+ changed_compute_host = ""
+ if state == "MIGRATING":
+ vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+ changed_compute_host = self.get_vdu_state(vm_id)[2]
+ if vm_state and changed_compute_host == available_compute_id:
+ self.logger.debug(
+ "Instance '{}' migrated to the new compute host '{}'".format(
+ vm_id, changed_compute_host
+ )
+ )
+ return state, available_compute_id
else:
- classification["status"] = "OTHER"
- classification["error_msg"] = (
- "VIM status reported " + classification["status"]
+ raise vimconn.VimConnException(
+ "Migration Failed. Instance '{}' not moved to the new host {}".format(
+ vm_id, available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
)
+ else:
+ raise vimconn.VimConnException(
+ "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+ available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ except (
+ nvExceptions.BadRequest,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ) as e:
+ self._format_exception(e)
- classification["vim_info"] = self.serialize(classification_vim)
+ def resize_instance(self, vm_id, new_flavor_id):
+ """
+ For resizing the vm based on the given
+ flavor details
+ param:
+ vm_id : ID of an instance
+ new_flavor_id : Flavor id to be resized
+ Return the status of a resized instance
+ """
+ self._reload_connection()
+ self.logger.debug("resize the flavor of an instance")
+ instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
+ old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
+ new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
+ try:
+ if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+ if old_flavor_disk > new_flavor_disk:
+ raise nvExceptions.BadRequest(
+ 400,
+ message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+ )
+ else:
+ self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+ vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+ if vm_state:
+ instance_resized_status = self.confirm_resize(vm_id)
+ return instance_resized_status
+ else:
+ raise nvExceptions.BadRequest(
+ 409,
+ message="Cannot 'resize' vm_state is in ERROR",
+ )
- if classification_vim.get("fault"):
- classification["error_msg"] = str(classification_vim["fault"])
- except vimconn.VimConnNotFoundException as e:
- self.logger.error("Exception getting classification status: %s", str(e))
- classification["status"] = "DELETED"
- classification["error_msg"] = str(e)
- except vimconn.VimConnException as e:
- self.logger.error("Exception getting classification status: %s", str(e))
- classification["status"] = "VIM_ERROR"
- classification["error_msg"] = str(e)
+ else:
+ self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+ raise nvExceptions.BadRequest(
+ 409,
+ message="Cannot 'resize' instance while it is in vm_state resized",
+ )
+ except (
+ nvExceptions.BadRequest,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ) as e:
+ self._format_exception(e)
- classification_dict[classification_id] = classification
+ def confirm_resize(self, vm_id):
+ """
+ Confirm the resize of an instance
+ param:
+ vm_id: ID of an instance
+ """
+ self._reload_connection()
+ self.nova.servers.confirm_resize(server=vm_id)
+ if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
+ self.__wait_for_vm(vm_id, "ACTIVE")
+ instance_status = self.get_vdu_state(vm_id)[0]
+ return instance_status
- return classification_dict
+ def get_monitoring_data(self):
+ try:
+ self.logger.debug("Getting servers and ports data from Openstack VIMs.")
+ self._reload_connection()
+ all_servers = self.nova.servers.list(detailed=True)
+ all_ports = self.neutron.list_ports()
+ return all_servers, all_ports
+ except (
+ vimconn.VimConnException,
+ vimconn.VimConnNotFoundException,
+ vimconn.VimConnConnectionException,
+ ) as e:
+ raise vimconn.VimConnException(
+ f"Exception in monitoring while getting VMs and ports status: {str(e)}"
+ )