- Service Function Path (OSM) -> Port Chain (Neutron)
"""
-from osm_ro_plugin import vimconn
-
-# import json
+import copy
+from http.client import HTTPException
+import json
import logging
-import netaddr
-import time
-import yaml
+from pprint import pformat
import random
import re
-import copy
-from pprint import pformat
-from novaclient import client as nClient, exceptions as nvExceptions
-from keystoneauth1.identity import v2, v3
+import time
+from typing import Dict, Optional, Tuple
+
+from cinderclient import client as cClient
+from glanceclient import client as glClient
+import glanceclient.exc as gl1Exceptions
from keystoneauth1 import session
+from keystoneauth1.identity import v2, v3
import keystoneclient.exceptions as ksExceptions
-import keystoneclient.v3.client as ksClient_v3
import keystoneclient.v2_0.client as ksClient_v2
-from glanceclient import client as glClient
-import glanceclient.exc as gl1Exceptions
-from cinderclient import client as cClient
-
-# TODO py3 check that this base exception matches python2 httplib.HTTPException
-from http.client import HTTPException
-from neutronclient.neutron import client as neClient
+import keystoneclient.v3.client as ksClient_v3
+import netaddr
from neutronclient.common import exceptions as neExceptions
+from neutronclient.neutron import client as neClient
+from novaclient import client as nClient, exceptions as nvExceptions
+from osm_ro_plugin import vimconn
from requests.exceptions import ConnectionError
+import yaml
__author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
__date__ = "$22-sep-2017 23:59:59$"
flavor.vcpus,
flavor.disk,
flavor.ephemeral,
- flavor.swap,
+ flavor.swap if isinstance(flavor.swap, int) else 0,
)
if flavor_data == flavor_target:
return flavor.id
if numas:
numa_nodes = len(numas)
- if numa_nodes > 1:
- return -1, "Can not add flavor with more than one numa"
-
extra_specs["hw:numa_nodes"] = str(numa_nodes)
- extra_specs["hw:mem_page_size"] = "large"
- extra_specs["hw:cpu_policy"] = "dedicated"
- extra_specs["hw:numa_mempolicy"] = "strict"
if self.vim_type == "VIO":
extra_specs[
extra_specs["vmware:latency_sensitivity_level"] = "high"
for numa in numas:
+ if "id" in numa:
+ node_id = numa["id"]
+
+ if "memory" in numa:
+ memory_mb = numa["memory"] * 1024
+ memory = "hw:numa_mem.{}".format(node_id)
+ extra_specs[memory] = int(memory_mb)
+
+ if "vcpu" in numa:
+ vcpu = numa["vcpu"]
+ cpu = "hw:numa_cpus.{}".format(node_id)
+ vcpu = ",".join(map(str, vcpu))
+ extra_specs[cpu] = vcpu
+
# overwrite ram and vcpus
# check if key "memory" is present in numa else use ram value at flavor
- if "memory" in numa:
- ram = numa["memory"] * 1024
# See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/
# implemented/virt-driver-cpu-thread-pinning.html
- extra_specs["hw:cpu_sockets"] = 1
+ extra_specs["hw:cpu_sockets"] = str(numa_nodes)
if "paired-threads" in numa:
vcpus = numa["paired-threads"] * 2
extended.get("disk-io-quota"), "disk_io", extra_specs
)
+ # Set the mempage size as specified in the descriptor
+ if extended.get("mempage-size"):
+ if extended.get("mempage-size") == "LARGE":
+ extra_specs["hw:mem_page_size"] = "large"
+ elif extended.get("mempage-size") == "SMALL":
+ extra_specs["hw:mem_page_size"] = "small"
+ elif extended.get("mempage-size") == "SIZE_2MB":
+ extra_specs["hw:mem_page_size"] = "2MB"
+ elif extended.get("mempage-size") == "SIZE_1GB":
+ extra_specs["hw:mem_page_size"] = "1GB"
+ elif extended.get("mempage-size") == "PREFER_LARGE":
+ extra_specs["hw:mem_page_size"] = "any"
+ else:
+ # The validations in NBI should make reaching here not possible.
+ # If this message is shown, check validations
+ self.logger.debug(
+ "Invalid mempage-size %s. Will be ignored",
+ extended.get("mempage-size"),
+ )
+ if extended.get("cpu-pinning-policy"):
+ extra_specs["hw:cpu_policy"] = extended.get(
+ "cpu-pinning-policy"
+ ).lower()
+
+ # Set the cpu thread pinning policy as specified in the descriptor
+ if extended.get("cpu-thread-pinning-policy"):
+ extra_specs["hw:cpu_thread_policy"] = extended.get(
+ "cpu-thread-pinning-policy"
+ ).lower()
+
+ # Set the mem policy as specified in the descriptor
+ if extended.get("mem-policy"):
+ extra_specs["hw:numa_mempolicy"] = extended.get(
+ "mem-policy"
+ ).lower()
+
# create flavor
new_flavor = self.nova.flavors.create(
name=name,
"No enough availability zones at VIM for this deployment"
)
- def new_vminstance(
- self,
- name,
- description,
- start,
- image_id,
- flavor_id,
- net_list,
- cloud_config=None,
- disk_list=None,
- availability_zone_index=None,
- availability_zone_list=None,
- ):
- """Adds a VM instance to VIM
- Params:
- start: indicates if VM must start or boot in pause mode. Ignored
- image_id,flavor_id: iamge and flavor uuid
- net_list: list of interfaces, each one is a dictionary with:
- name:
- net_id: network uuid to connect
- vpci: virtual vcpi to assign, ignored because openstack lack #TODO
- model: interface model, ignored #TODO
- mac_address: used for SR-IOV ifaces #TODO for other types
- use: 'data', 'bridge', 'mgmt'
- type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
- vim_id: filled/added by this function
- floating_ip: True/False (or it can be None)
- port_security: True/False
- 'cloud_config': (optional) dictionary with:
- 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
- 'users': (optional) list of users to be inserted, each item is a dict with:
- 'name': (mandatory) user name,
- 'key-pairs': (optional) list of strings with the public key to be inserted to the user
- 'user-data': (optional) string is a text script to be passed directly to cloud-init
- 'config-files': (optional). List of files to be transferred. Each item is a dict with:
- 'dest': (mandatory) string with the destination absolute path
- 'encoding': (optional, by default text). Can be one of:
- 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
- 'content' (mandatory): string with the content of the file
- 'permissions': (optional) string with file permissions, typically octal notation '0644'
- 'owner': (optional) file owner, string with the format 'owner:group'
- 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
- 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
- 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
- 'size': (mandatory) string with the size of the disk in GB
- 'vim_id' (optional) should use this existing volume id
- availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
- availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
- availability_zone_index is None
- #TODO ip, security groups
- Returns a tuple with the instance identifier and created_items or raises an exception on error
- created_items can be None or a dictionary where this method can include key-values that will be passed to
- the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
- Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
- as not present.
- """
- self.logger.debug(
- "new_vminstance input: image='%s' flavor='%s' nics='%s'",
- image_id,
- flavor_id,
- str(net_list),
- )
+ def _prepare_port_dict_security_groups(self, net: dict, port_dict: dict) -> None:
+ """Fill up the security_groups in the port_dict.
- try:
- server = None
- created_items = {}
- # metadata = {}
- net_list_vim = []
- external_network = []
- # ^list of external networks to be connected to instance, later on used to create floating_ip
- no_secured_ports = [] # List of port-is with port-security disabled
- self._reload_connection()
- # metadata_vpci = {} # For a specific neutron plugin
- block_device_mapping = None
+ Args:
+ net (dict): Network details
+ port_dict (dict): Port details
- for net in net_list:
- if not net.get("net_id"): # skip non connected iface
- continue
+ """
+ if (
+ self.config.get("security_groups")
+ and net.get("port_security") is not False
+ and not self.config.get("no_port_security_extension")
+ ):
+ if not self.security_groups_id:
+ self._get_ids_from_name()
- port_dict = {
- "network_id": net["net_id"],
- "name": net.get("name"),
- "admin_state_up": True,
- }
+ port_dict["security_groups"] = self.security_groups_id
- if (
- self.config.get("security_groups")
- and net.get("port_security") is not False
- and not self.config.get("no_port_security_extension")
- ):
- if not self.security_groups_id:
- self._get_ids_from_name()
+ def _prepare_port_dict_binding(self, net: dict, port_dict: dict) -> None:
+ """Fill up the network binding depending on network type in the port_dict.
- port_dict["security_groups"] = self.security_groups_id
+ Args:
+ net (dict): Network details
+ port_dict (dict): Port details
- if net["type"] == "virtual":
- pass
- # if "vpci" in net:
- # metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
- elif net["type"] == "VF" or net["type"] == "SR-IOV": # for VF
- # if "vpci" in net:
- # if "VF" not in metadata_vpci:
- # metadata_vpci["VF"]=[]
- # metadata_vpci["VF"].append([ net["vpci"], "" ])
- port_dict["binding:vnic_type"] = "direct"
-
- # VIO specific Changes
- if self.vim_type == "VIO":
- # Need to create port with port_security_enabled = False and no-security-groups
- port_dict["port_security_enabled"] = False
- port_dict["provider_security_groups"] = []
- port_dict["security_groups"] = []
- else: # For PT PCI-PASSTHROUGH
- # if "vpci" in net:
- # if "PF" not in metadata_vpci:
- # metadata_vpci["PF"]=[]
- # metadata_vpci["PF"].append([ net["vpci"], "" ])
- port_dict["binding:vnic_type"] = "direct-physical"
-
- if not port_dict["name"]:
- port_dict["name"] = name
-
- if net.get("mac_address"):
- port_dict["mac_address"] = net["mac_address"]
-
- if net.get("ip_address"):
- port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
- # TODO add "subnet_id": <subnet_id>
-
- new_port = self.neutron.create_port({"port": port_dict})
- created_items["port:" + str(new_port["port"]["id"])] = True
- net["mac_adress"] = new_port["port"]["mac_address"]
- net["vim_id"] = new_port["port"]["id"]
- # if try to use a network without subnetwork, it will return a emtpy list
- fixed_ips = new_port["port"].get("fixed_ips")
-
- if fixed_ips:
- net["ip"] = fixed_ips[0].get("ip_address")
- else:
- net["ip"] = None
-
- port = {"port-id": new_port["port"]["id"]}
- if float(self.nova.api_version.get_string()) >= 2.32:
- port["tag"] = new_port["port"]["name"]
-
- net_list_vim.append(port)
-
- if net.get("floating_ip", False):
- net["exit_on_floating_ip_error"] = True
- external_network.append(net)
- elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
- net["exit_on_floating_ip_error"] = False
- external_network.append(net)
- net["floating_ip"] = self.config.get("use_floating_ip")
-
- # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
- # is dropped.
- # As a workaround we wait until the VM is active and then disable the port-security
- if net.get("port_security") is False and not self.config.get(
- "no_port_security_extension"
- ):
- no_secured_ports.append(
- (
- new_port["port"]["id"],
- net.get("port_security_disable_strategy"),
- )
- )
+ """
+ if not net.get("type"):
+ raise vimconn.VimConnException("Type is missing in the network details.")
- # if metadata_vpci:
- # metadata = {"pci_assignement": json.dumps(metadata_vpci)}
- # if len(metadata["pci_assignement"]) >255:
- # #limit the metadata size
- # #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
- # self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
- # metadata = {}
+ if net["type"] == "virtual":
+ pass
- self.logger.debug(
- "name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
- name,
- image_id,
- flavor_id,
- str(net_list_vim),
- description,
- )
+ # For VF
+ elif net["type"] == "VF" or net["type"] == "SR-IOV":
- # cloud config
- config_drive, userdata = self._create_user_data(cloud_config)
+ port_dict["binding:vnic_type"] = "direct"
- # Create additional volumes in case these are present in disk_list
- base_disk_index = ord("b")
- if disk_list:
- block_device_mapping = {}
- for disk in disk_list:
- if disk.get("vim_id"):
- block_device_mapping["_vd" + chr(base_disk_index)] = disk[
- "vim_id"
- ]
- else:
- if "image_id" in disk:
- volume = self.cinder.volumes.create(
- size=disk["size"],
- name=name + "_vd" + chr(base_disk_index),
- imageRef=disk["image_id"],
- )
- else:
- volume = self.cinder.volumes.create(
- size=disk["size"],
- name=name + "_vd" + chr(base_disk_index),
- )
+ # VIO specific Changes
+ if self.vim_type == "VIO":
+ # Need to create port with port_security_enabled = False and no-security-groups
+ port_dict["port_security_enabled"] = False
+ port_dict["provider_security_groups"] = []
+ port_dict["security_groups"] = []
- created_items["volume:" + str(volume.id)] = True
- block_device_mapping["_vd" + chr(base_disk_index)] = volume.id
+ else:
+ # For PT PCI-PASSTHROUGH
+ port_dict["binding:vnic_type"] = "direct-physical"
- base_disk_index += 1
+ @staticmethod
+ def _set_fixed_ip(new_port: dict, net: dict) -> None:
+ """Set the "ip" parameter in net dictionary.
- # Wait until created volumes are with status available
- elapsed_time = 0
- while elapsed_time < volume_timeout:
- for created_item in created_items:
- v, _, volume_id = created_item.partition(":")
- if v == "volume":
- if self.cinder.volumes.get(volume_id).status != "available":
- break
- else: # all ready: break from while
- break
+ Args:
+ new_port (dict): New created port
+ net (dict): Network details
- time.sleep(5)
- elapsed_time += 5
+ """
+ fixed_ips = new_port["port"].get("fixed_ips")
- # If we exceeded the timeout rollback
- if elapsed_time >= volume_timeout:
- raise vimconn.VimConnException(
- "Timeout creating volumes for instance " + name,
- http_code=vimconn.HTTP_Request_Timeout,
- )
+ if fixed_ips:
+ net["ip"] = fixed_ips[0].get("ip_address")
+ else:
+ net["ip"] = None
- # get availability Zone
- vm_av_zone = self._get_vm_availability_zone(
- availability_zone_index, availability_zone_list
- )
+ @staticmethod
+ def _prepare_port_dict_mac_ip_addr(net: dict, port_dict: dict) -> None:
+ """Fill up the mac_address and fixed_ips in port_dict.
- self.logger.debug(
- "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
- "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
- "block_device_mapping={})".format(
- name,
- image_id,
- flavor_id,
- net_list_vim,
- self.config.get("security_groups"),
- vm_av_zone,
- self.config.get("keypair"),
- userdata,
- config_drive,
- block_device_mapping,
- )
- )
- server = self.nova.servers.create(
- name,
- image_id,
- flavor_id,
- nics=net_list_vim,
- security_groups=self.config.get("security_groups"),
- # TODO remove security_groups in future versions. Already at neutron port
- availability_zone=vm_av_zone,
- key_name=self.config.get("keypair"),
- userdata=userdata,
- config_drive=config_drive,
- block_device_mapping=block_device_mapping,
- ) # , description=description)
+ Args:
+ net (dict): Network details
+ port_dict (dict): Port details
- vm_start_time = time.time()
- # Previously mentioned workaround to wait until the VM is active and then disable the port-security
- if no_secured_ports:
- self.__wait_for_vm(server.id, "ACTIVE")
+ """
+ if net.get("mac_address"):
+ port_dict["mac_address"] = net["mac_address"]
- for port in no_secured_ports:
- port_update = {
- "port": {"port_security_enabled": False, "security_groups": None}
- }
+ if net.get("ip_address"):
+ port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+ # TODO add "subnet_id": <subnet_id>
- if port[1] == "allow-address-pairs":
- port_update = {
- "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
- }
+ def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
+ """Create new port using neutron.
- try:
- self.neutron.update_port(port[0], port_update)
- except Exception:
- raise vimconn.VimConnException(
- "It was not possible to disable port security for port {}".format(
- port[0]
- )
- )
+ Args:
+ port_dict (dict): Port details
+ created_items (dict): All created items
+ net (dict): Network details
- # print "DONE :-)", server
+ Returns:
+ new_port (dict): New created port
- # pool_id = None
- for floating_network in external_network:
- try:
- assigned = False
- floating_ip_retries = 3
- # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
- # several times
- while not assigned:
- floating_ips = self.neutron.list_floatingips().get(
- "floatingips", ()
- )
- random.shuffle(floating_ips) # randomize
- for fip in floating_ips:
- if (
- fip.get("port_id")
- or fip.get("tenant_id") != server.tenant_id
- ):
- continue
+ """
+ new_port = self.neutron.create_port({"port": port_dict})
+ created_items["port:" + str(new_port["port"]["id"])] = True
+ net["mac_adress"] = new_port["port"]["mac_address"]
+ net["vim_id"] = new_port["port"]["id"]
- if isinstance(floating_network["floating_ip"], str):
- if (
- fip.get("floating_network_id")
- != floating_network["floating_ip"]
- ):
- continue
+ return new_port
- free_floating_ip = fip["id"]
- break
- else:
- if (
- isinstance(floating_network["floating_ip"], str)
- and floating_network["floating_ip"].lower() != "true"
- ):
- pool_id = floating_network["floating_ip"]
- else:
- # Find the external network
- external_nets = list()
-
- for net in self.neutron.list_networks()["networks"]:
- if net["router:external"]:
- external_nets.append(net)
-
- if len(external_nets) == 0:
- raise vimconn.VimConnException(
- "Cannot create floating_ip automatically since "
- "no external network is present",
- http_code=vimconn.HTTP_Conflict,
- )
+ def _create_port(
+ self, net: dict, name: str, created_items: dict
+ ) -> Tuple[dict, dict]:
+ """Create port using net details.
- if len(external_nets) > 1:
- raise vimconn.VimConnException(
- "Cannot create floating_ip automatically since "
- "multiple external networks are present",
- http_code=vimconn.HTTP_Conflict,
- )
+ Args:
+ net (dict): Network details
+ name (str): Name to be used as network name if net dict does not include name
+ created_items (dict): All created items
- pool_id = external_nets[0].get("id")
-
- param = {
- "floatingip": {
- "floating_network_id": pool_id,
- "tenant_id": server.tenant_id,
- }
- }
-
- try:
- # self.logger.debug("Creating floating IP")
- new_floating_ip = self.neutron.create_floatingip(param)
- free_floating_ip = new_floating_ip["floatingip"]["id"]
- created_items[
- "floating_ip:" + str(free_floating_ip)
- ] = True
- except Exception as e:
- raise vimconn.VimConnException(
- type(e).__name__
- + ": Cannot create new floating_ip "
- + str(e),
- http_code=vimconn.HTTP_Conflict,
- )
+ Returns:
+ new_port, port New created port, port dictionary
- try:
- # for race condition ensure not already assigned
- fip = self.neutron.show_floatingip(free_floating_ip)
+ """
- if fip["floatingip"]["port_id"]:
- continue
+ port_dict = {
+ "network_id": net["net_id"],
+ "name": net.get("name"),
+ "admin_state_up": True,
+ }
- # the vim_id key contains the neutron.port_id
- self.neutron.update_floatingip(
- free_floating_ip,
- {"floatingip": {"port_id": floating_network["vim_id"]}},
- )
- # for race condition ensure not re-assigned to other VM after 5 seconds
- time.sleep(5)
- fip = self.neutron.show_floatingip(free_floating_ip)
+ if not port_dict["name"]:
+ port_dict["name"] = name
- if (
- fip["floatingip"]["port_id"]
- != floating_network["vim_id"]
- ):
- self.logger.error(
- "floating_ip {} re-assigned to other port".format(
- free_floating_ip
- )
- )
- continue
+ self._prepare_port_dict_security_groups(net, port_dict)
- self.logger.debug(
- "Assigned floating_ip {} to VM {}".format(
- free_floating_ip, server.id
- )
- )
- assigned = True
- except Exception as e:
- # openstack need some time after VM creation to assign an IP. So retry if fails
- vm_status = self.nova.servers.get(server.id).status
-
- if vm_status not in ("ACTIVE", "ERROR"):
- if time.time() - vm_start_time < server_timeout:
- time.sleep(5)
- continue
- elif floating_ip_retries > 0:
- floating_ip_retries -= 1
- continue
+ self._prepare_port_dict_binding(net, port_dict)
- raise vimconn.VimConnException(
- "Cannot create floating_ip: {} {}".format(
- type(e).__name__, e
- ),
- http_code=vimconn.HTTP_Conflict,
- )
+ vimconnector._prepare_port_dict_mac_ip_addr(net, port_dict)
- except Exception as e:
- if not floating_network["exit_on_floating_ip_error"]:
- self.logger.error("Cannot create floating_ip. %s", str(e))
- continue
+ new_port = self._create_new_port(port_dict, created_items, net)
- raise
+ vimconnector._set_fixed_ip(new_port, net)
- return server.id, created_items
- # except nvExceptions.NotFound as e:
- # error_value=-vimconn.HTTP_Not_Found
- # error_text= "vm instance %s not found" % vm_id
- # except TypeError as e:
- # raise vimconn.VimConnException(type(e).__name__ + ": "+ str(e), http_code=vimconn.HTTP_Bad_Request)
+ port = {"port-id": new_port["port"]["id"]}
- except Exception as e:
- server_id = None
- if server:
- server_id = server.id
+ if float(self.nova.api_version.get_string()) >= 2.32:
+ port["tag"] = new_port["port"]["name"]
- try:
- self.delete_vminstance(server_id, created_items)
- except Exception as e2:
- self.logger.error("new_vminstance rollback fail {}".format(e2))
+ return new_port, port
- self._format_exception(e)
+ def _prepare_network_for_vminstance(
+ self,
+ name: str,
+ net_list: list,
+ created_items: dict,
+ net_list_vim: list,
+ external_network: list,
+ no_secured_ports: list,
+ ) -> None:
+ """Create port and fill up net dictionary for new VM instance creation.
- def get_vminstance(self, vm_id):
- """Returns the VM instance information from VIM"""
- # self.logger.debug("Getting VM from VIM")
- try:
- self._reload_connection()
- server = self.nova.servers.find(id=vm_id)
- # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+ Args:
+ name (str): Name of network
+ net_list (list): List of networks
+ created_items (dict): All created items belongs to a VM
+ net_list_vim (list): List of ports
+ external_network (list): List of external-networks
+ no_secured_ports (list): Port security disabled ports
+ """
- return server.to_dict()
- except (
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- nvExceptions.NotFound,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ self._reload_connection()
- def get_vminstance_console(self, vm_id, console_type="vnc"):
- """
- Get a console for the virtual machine
- Params:
- vm_id: uuid of the VM
- console_type, can be:
- "novnc" (by default), "xvpvnc" for VNC types,
- "rdp-html5" for RDP types, "spice-html5" for SPICE types
- Returns dict with the console parameters:
- protocol: ssh, ftp, http, https, ...
- server: usually ip address
- port: the http, ssh, ... port
- suffix: extra text, e.g. the http path and query string
- """
- self.logger.debug("Getting VM CONSOLE from VIM")
+ for net in net_list:
+ # Skip non-connected iface
+ if not net.get("net_id"):
+ continue
- try:
- self._reload_connection()
- server = self.nova.servers.find(id=vm_id)
+ new_port, port = self._create_port(net, name, created_items)
- if console_type is None or console_type == "novnc":
- console_dict = server.get_vnc_console("novnc")
- elif console_type == "xvpvnc":
- console_dict = server.get_vnc_console(console_type)
- elif console_type == "rdp-html5":
- console_dict = server.get_rdp_console(console_type)
- elif console_type == "spice-html5":
- console_dict = server.get_spice_console(console_type)
- else:
- raise vimconn.VimConnException(
- "console type '{}' not allowed".format(console_type),
- http_code=vimconn.HTTP_Bad_Request,
- )
+ net_list_vim.append(port)
- console_dict1 = console_dict.get("console")
+ if net.get("floating_ip", False):
+ net["exit_on_floating_ip_error"] = True
+ external_network.append(net)
- if console_dict1:
- console_url = console_dict1.get("url")
+ elif net["use"] == "mgmt" and self.config.get("use_floating_ip"):
+ net["exit_on_floating_ip_error"] = False
+ external_network.append(net)
+ net["floating_ip"] = self.config.get("use_floating_ip")
- if console_url:
- # parse console_url
- protocol_index = console_url.find("//")
- suffix_index = (
- console_url[protocol_index + 2 :].find("/") + protocol_index + 2
- )
- port_index = (
- console_url[protocol_index + 2 : suffix_index].find(":")
- + protocol_index
- + 2
+ # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic
+ # is dropped. As a workaround we wait until the VM is active and then disable the port-security
+ if net.get("port_security") is False and not self.config.get(
+ "no_port_security_extension"
+ ):
+ no_secured_ports.append(
+ (
+ new_port["port"]["id"],
+ net.get("port_security_disable_strategy"),
)
+ )
- if protocol_index < 0 or port_index < 0 or suffix_index < 0:
- return (
- -vimconn.HTTP_Internal_Server_Error,
- "Unexpected response from VIM",
- )
+ def _prepare_persistent_root_volumes(
+ self,
+ name: str,
+ vm_av_zone: list,
+ disk: dict,
+ base_disk_index: int,
+ block_device_mapping: dict,
+ existing_vim_volumes: list,
+ created_items: dict,
+ ) -> Optional[str]:
+ """Prepare persistent root volumes for new VM instance.
- console_dict = {
- "protocol": console_url[0:protocol_index],
- "server": console_url[protocol_index + 2 : port_index],
- "port": console_url[port_index:suffix_index],
- "suffix": console_url[suffix_index + 1 :],
- }
- protocol_index += 2
+ Args:
+ name (str): Name of VM instance
+ vm_av_zone (list): List of availability zones
+ disk (dict): Disk details
+ base_disk_index (int): Disk index
+ block_device_mapping (dict): Block device details
+ existing_vim_volumes (list): Existing disk details
+ created_items (dict): All created items belongs to VM
- return console_dict
+ Returns:
+ boot_volume_id (str): ID of boot volume
+
+ """
+ # Disk may include only vim_volume_id or only vim_id."
+ # Use existing persistent root volume finding with volume_id or vim_id
+ key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
+
+ if disk.get(key_id):
+
+ block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+ existing_vim_volumes.append({"id": disk[key_id]})
+
+ else:
+ # Create persistent root volume
+ volume = self.cinder.volumes.create(
+ size=disk["size"],
+ name=name + "vd" + chr(base_disk_index),
+ imageRef=disk["image_id"],
+ # Make sure volume is in the same AZ as the VM to be attached to
+ availability_zone=vm_av_zone,
+ )
+ boot_volume_id = volume.id
+ created_items["volume:" + str(volume.id)] = True
+ block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+
+ return boot_volume_id
+
+ def _prepare_non_root_persistent_volumes(
+ self,
+ name: str,
+ disk: dict,
+ vm_av_zone: list,
+ block_device_mapping: dict,
+ base_disk_index: int,
+ existing_vim_volumes: list,
+ created_items: dict,
+ ) -> None:
+ """Prepare persistent volumes for new VM instance.
+
+ Args:
+ name (str): Name of VM instance
+ disk (dict): Disk details
+ vm_av_zone (list): List of availability zones
+ block_device_mapping (dict): Block device details
+ base_disk_index (int): Disk index
+ existing_vim_volumes (list): Existing disk details
+ created_items (dict): All created items belongs to VM
+ """
+ # Non-root persistent volumes
+ # Disk may include only vim_volume_id or only vim_id."
+ key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
+
+ if disk.get(key_id):
+
+ # Use existing persistent volume
+ block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
+ existing_vim_volumes.append({"id": disk[key_id]})
+
+ else:
+ # Create persistent volume
+ volume = self.cinder.volumes.create(
+ size=disk["size"],
+ name=name + "vd" + chr(base_disk_index),
+ # Make sure volume is in the same AZ as the VM to be attached to
+ availability_zone=vm_av_zone,
+ )
+ created_items["volume:" + str(volume.id)] = True
+ block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+
+ def _wait_for_created_volumes_availability(
+ self, elapsed_time: int, created_items: dict
+ ) -> Optional[int]:
+ """Wait till created volumes become available.
+
+ Args:
+ elapsed_time (int): Passed time while waiting
+ created_items (dict): All created items belongs to VM
+
+ Returns:
+ elapsed_time (int): Time spent while waiting
+
+ """
+
+ while elapsed_time < volume_timeout:
+ for created_item in created_items:
+ v, _, volume_id = created_item.partition(":")
+ if v == "volume":
+ if self.cinder.volumes.get(volume_id).status != "available":
+ break
+ else:
+ # All ready: break from while
+ break
+
+ time.sleep(5)
+ elapsed_time += 5
+
+ return elapsed_time
+
+ def _wait_for_existing_volumes_availability(
+ self, elapsed_time: int, existing_vim_volumes: list
+ ) -> Optional[int]:
+ """Wait till existing volumes become available.
+
+ Args:
+ elapsed_time (int): Passed time while waiting
+ existing_vim_volumes (list): Existing volume details
+
+ Returns:
+ elapsed_time (int): Time spent while waiting
+
+ """
+
+ while elapsed_time < volume_timeout:
+ for volume in existing_vim_volumes:
+ if self.cinder.volumes.get(volume["id"]).status != "available":
+ break
+ else: # all ready: break from while
+ break
+
+ time.sleep(5)
+ elapsed_time += 5
+
+ return elapsed_time
+
+ def _prepare_disk_for_vminstance(
+ self,
+ name: str,
+ existing_vim_volumes: list,
+ created_items: dict,
+ vm_av_zone: list,
+ block_device_mapping: dict,
+ disk_list: list = None,
+ ) -> None:
+ """Prepare all volumes for new VM instance.
+
+ Args:
+ name (str): Name of Instance
+ existing_vim_volumes (list): List of existing volumes
+ created_items (dict): All created items belongs to VM
+ vm_av_zone (list): VM availability zone
+ block_device_mapping (dict): Block devices to be attached to VM
+ disk_list (list): List of disks
+
+ """
+ # Create additional volumes in case these are present in disk_list
+ base_disk_index = ord("b")
+ boot_volume_id = None
+ elapsed_time = 0
+
+ for disk in disk_list:
+ if "image_id" in disk:
+ # Root persistent volume
+ base_disk_index = ord("a")
+ boot_volume_id = self._prepare_persistent_root_volumes(
+ name=name,
+ vm_av_zone=vm_av_zone,
+ disk=disk,
+ base_disk_index=base_disk_index,
+ block_device_mapping=block_device_mapping,
+ existing_vim_volumes=existing_vim_volumes,
+ created_items=created_items,
+ )
+ else:
+ # Non-root persistent volume
+ self._prepare_non_root_persistent_volumes(
+ name=name,
+ disk=disk,
+ vm_av_zone=vm_av_zone,
+ block_device_mapping=block_device_mapping,
+ base_disk_index=base_disk_index,
+ existing_vim_volumes=existing_vim_volumes,
+ created_items=created_items,
+ )
+ base_disk_index += 1
+
+ # Wait until created volumes are with status available
+ elapsed_time = self._wait_for_created_volumes_availability(
+ elapsed_time, created_items
+ )
+ # Wait until existing volumes in vim are with status available
+ elapsed_time = self._wait_for_existing_volumes_availability(
+ elapsed_time, existing_vim_volumes
+ )
+ # If we exceeded the timeout rollback
+ if elapsed_time >= volume_timeout:
+ raise vimconn.VimConnException(
+ "Timeout creating volumes for instance " + name,
+ http_code=vimconn.HTTP_Request_Timeout,
+ )
+ if boot_volume_id:
+ self.cinder.volumes.set_bootable(boot_volume_id, True)
+
+ def _find_the_external_network_for_floating_ip(self):
+ """Get the external network ip in order to create floating IP.
+
+ Returns:
+ pool_id (str): External network pool ID
+
+ """
+
+ # Find the external network
+ external_nets = list()
+
+ for net in self.neutron.list_networks()["networks"]:
+ if net["router:external"]:
+ external_nets.append(net)
+
+ if len(external_nets) == 0:
+ raise vimconn.VimConnException(
+ "Cannot create floating_ip automatically since "
+ "no external network is present",
+ http_code=vimconn.HTTP_Conflict,
+ )
+
+ if len(external_nets) > 1:
+ raise vimconn.VimConnException(
+ "Cannot create floating_ip automatically since "
+ "multiple external networks are present",
+ http_code=vimconn.HTTP_Conflict,
+ )
+
+ # Pool ID
+ return external_nets[0].get("id")
+
+ def _neutron_create_float_ip(self, param: dict, created_items: dict) -> None:
+ """Trigger neutron to create a new floating IP using external network ID.
+
+ Args:
+ param (dict): Input parameters to create a floating IP
+ created_items (dict): All created items belongs to new VM instance
+
+ Raises:
+
+ VimConnException
+ """
+ try:
+ self.logger.debug("Creating floating IP")
+ new_floating_ip = self.neutron.create_floatingip(param)
+ free_floating_ip = new_floating_ip["floatingip"]["id"]
+ created_items["floating_ip:" + str(free_floating_ip)] = True
+
+ except Exception as e:
+ raise vimconn.VimConnException(
+ type(e).__name__ + ": Cannot create new floating_ip " + str(e),
+ http_code=vimconn.HTTP_Conflict,
+ )
+
+ def _create_floating_ip(
+ self, floating_network: dict, server: object, created_items: dict
+ ) -> None:
+ """Get the available Pool ID and create a new floating IP.
+
+ Args:
+ floating_network (dict): Dict including external network ID
+ server (object): Server object
+ created_items (dict): All created items belongs to new VM instance
+
+ """
+
+ # Pool_id is available
+ if (
+ isinstance(floating_network["floating_ip"], str)
+ and floating_network["floating_ip"].lower() != "true"
+ ):
+ pool_id = floating_network["floating_ip"]
+
+ # Find the Pool_id
+ else:
+ pool_id = self._find_the_external_network_for_floating_ip()
+
+ param = {
+ "floatingip": {
+ "floating_network_id": pool_id,
+ "tenant_id": server.tenant_id,
+ }
+ }
+
+ self._neutron_create_float_ip(param, created_items)
+
+ def _find_floating_ip(
+ self,
+ server: object,
+ floating_ips: list,
+ floating_network: dict,
+ ) -> Optional[str]:
+ """Find the available free floating IPs if there are.
+
+ Args:
+ server (object): Server object
+ floating_ips (list): List of floating IPs
+ floating_network (dict): Details of floating network such as ID
+
+ Returns:
+ free_floating_ip (str): Free floating ip address
+
+ """
+ for fip in floating_ips:
+ if fip.get("port_id") or fip.get("tenant_id") != server.tenant_id:
+ continue
+
+ if isinstance(floating_network["floating_ip"], str):
+ if fip.get("floating_network_id") != floating_network["floating_ip"]:
+ continue
+
+ return fip["id"]
+
+ def _assign_floating_ip(
+ self, free_floating_ip: str, floating_network: dict
+ ) -> Dict:
+ """Assign the free floating ip address to port.
+
+ Args:
+ free_floating_ip (str): Floating IP to be assigned
+ floating_network (dict): ID of floating network
+
+ Returns:
+ fip (dict) (dict): Floating ip details
+
+ """
+ # The vim_id key contains the neutron.port_id
+ self.neutron.update_floatingip(
+ free_floating_ip,
+ {"floatingip": {"port_id": floating_network["vim_id"]}},
+ )
+ # For race condition ensure not re-assigned to other VM after 5 seconds
+ time.sleep(5)
+
+ return self.neutron.show_floatingip(free_floating_ip)
+
+ def _get_free_floating_ip(
+ self, server: object, floating_network: dict, created_items: dict
+ ) -> Optional[str]:
+ """Get the free floating IP address.
+
+ Args:
+ server (object): Server Object
+ floating_network (dict): Floating network details
+ created_items (dict): All created items belongs to new VM instance
+
+ Returns:
+ free_floating_ip (str): Free floating ip addr
+
+ """
+
+ floating_ips = self.neutron.list_floatingips().get("floatingips", ())
+
+ # Randomize
+ random.shuffle(floating_ips)
+
+ return self._find_floating_ip(
+ server, floating_ips, floating_network, created_items
+ )
+
+ def _prepare_external_network_for_vminstance(
+ self,
+ external_network: list,
+ server: object,
+ created_items: dict,
+ vm_start_time: float,
+ ) -> None:
+ """Assign floating IP address for VM instance.
+
+ Args:
+ external_network (list): ID of External network
+ server (object): Server Object
+ created_items (dict): All created items belongs to new VM instance
+ vm_start_time (float): Time as a floating point number expressed in seconds since the epoch, in UTC
+
+ Raises:
+ VimConnException
+
+ """
+ for floating_network in external_network:
+ try:
+ assigned = False
+ floating_ip_retries = 3
+ # In case of RO in HA there can be conflicts, two RO trying to assign same floating IP, so retry
+ # several times
+ while not assigned:
+
+ free_floating_ip = self._get_free_floating_ip(
+ server, floating_network, created_items
+ )
+
+ if not free_floating_ip:
+ self._create_floating_ip(
+ floating_network, server, created_items
+ )
+
+ try:
+ # For race condition ensure not already assigned
+ fip = self.neutron.show_floatingip(free_floating_ip)
+
+ if fip["floatingip"].get("port_id"):
+ continue
+
+ # Assign floating ip
+ fip = self._assign_floating_ip(
+ free_floating_ip, floating_network
+ )
+
+ if fip["floatingip"]["port_id"] != floating_network["vim_id"]:
+ self.logger.warning(
+ "floating_ip {} re-assigned to other port".format(
+ free_floating_ip
+ )
+ )
+ continue
+
+ self.logger.debug(
+ "Assigned floating_ip {} to VM {}".format(
+ free_floating_ip, server.id
+ )
+ )
+
+ assigned = True
+
+ except Exception as e:
+ # Openstack need some time after VM creation to assign an IP. So retry if fails
+ vm_status = self.nova.servers.get(server.id).status
+
+ if vm_status not in ("ACTIVE", "ERROR"):
+ if time.time() - vm_start_time < server_timeout:
+ time.sleep(5)
+ continue
+ elif floating_ip_retries > 0:
+ floating_ip_retries -= 1
+ continue
+
+ raise vimconn.VimConnException(
+ "Cannot create floating_ip: {} {}".format(
+ type(e).__name__, e
+ ),
+ http_code=vimconn.HTTP_Conflict,
+ )
+
+ except Exception as e:
+ if not floating_network["exit_on_floating_ip_error"]:
+ self.logger.error("Cannot create floating_ip. %s", str(e))
+ continue
+
+ raise
+
+ def _update_port_security_for_vminstance(
+ self,
+ no_secured_ports: list,
+ server: object,
+ ) -> None:
+ """Updates the port security according to no_secured_ports list.
+
+ Args:
+ no_secured_ports (list): List of ports that security will be disabled
+ server (object): Server Object
+
+ Raises:
+ VimConnException
+
+ """
+ # Wait until the VM is active and then disable the port-security
+ if no_secured_ports:
+ self.__wait_for_vm(server.id, "ACTIVE")
+
+ for port in no_secured_ports:
+ port_update = {
+ "port": {"port_security_enabled": False, "security_groups": None}
+ }
+
+ if port[1] == "allow-address-pairs":
+ port_update = {
+ "port": {"allowed_address_pairs": [{"ip_address": "0.0.0.0/0"}]}
+ }
+
+ try:
+ self.neutron.update_port(port[0], port_update)
+
+ except Exception:
+
+ raise vimconn.VimConnException(
+ "It was not possible to disable port security for port {}".format(
+ port[0]
+ )
+ )
+
+ def new_vminstance(
+ self,
+ name: str,
+ description: str,
+ start: bool,
+ image_id: str,
+ flavor_id: str,
+ affinity_group_list: list,
+ net_list: list,
+ cloud_config=None,
+ disk_list=None,
+ availability_zone_index=None,
+ availability_zone_list=None,
+ ) -> tuple:
+ """Adds a VM instance to VIM.
+
+ Args:
+ name (str): name of VM
+ description (str): description
+ start (bool): indicates if VM must start or boot in pause mode. Ignored
+ image_id (str) image uuid
+ flavor_id (str) flavor uuid
+ affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty.
+ net_list (list): list of interfaces, each one is a dictionary with:
+ name: name of network
+ net_id: network uuid to connect
+ vpci: virtual vcpi to assign, ignored because openstack lack #TODO
+ model: interface model, ignored #TODO
+ mac_address: used for SR-IOV ifaces #TODO for other types
+ use: 'data', 'bridge', 'mgmt'
+ type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
+ vim_id: filled/added by this function
+ floating_ip: True/False (or it can be None)
+ port_security: True/False
+ cloud_config (dict): (optional) dictionary with:
+ key-pairs: (optional) list of strings with the public key to be inserted to the default user
+ users: (optional) list of users to be inserted, each item is a dict with:
+ name: (mandatory) user name,
+ key-pairs: (optional) list of strings with the public key to be inserted to the user
+ user-data: (optional) string is a text script to be passed directly to cloud-init
+ config-files: (optional). List of files to be transferred. Each item is a dict with:
+ dest: (mandatory) string with the destination absolute path
+ encoding: (optional, by default text). Can be one of:
+ 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+ content : (mandatory) string with the content of the file
+ permissions: (optional) string with file permissions, typically octal notation '0644'
+ owner: (optional) file owner, string with the format 'owner:group'
+ boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
+ disk_list: (optional) list with additional disks to the VM. Each item is a dict with:
+ image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+ size: (mandatory) string with the size of the disk in GB
+ vim_id: (optional) should use this existing volume id
+ availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+ availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if
+ availability_zone_index is None
+ #TODO ip, security groups
+
+ Returns:
+ A tuple with the instance identifier and created_items or raises an exception on error
+ created_items can be None or a dictionary where this method can include key-values that will be passed to
+ the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+ Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+ as not present.
+ """
+ self.logger.debug(
+ "new_vminstance input: image='%s' flavor='%s' nics='%s'",
+ image_id,
+ flavor_id,
+ str(net_list),
+ )
+
+ try:
+ server = None
+ created_items = {}
+ net_list_vim = []
+ # list of external networks to be connected to instance, later on used to create floating_ip
+ external_network = []
+ # List of ports with port-security disabled
+ no_secured_ports = []
+ block_device_mapping = {}
+ existing_vim_volumes = []
+ server_group_id = None
+ scheduller_hints = {}
+
+ # Check the Openstack Connection
+ self._reload_connection()
+
+ # Prepare network list
+ self._prepare_network_for_vminstance(
+ name=name,
+ net_list=net_list,
+ created_items=created_items,
+ net_list_vim=net_list_vim,
+ external_network=external_network,
+ no_secured_ports=no_secured_ports,
+ )
+
+ # Cloud config
+ config_drive, userdata = self._create_user_data(cloud_config)
+
+ # Get availability Zone
+ vm_av_zone = self._get_vm_availability_zone(
+ availability_zone_index, availability_zone_list
+ )
+
+ if disk_list:
+ # Prepare disks
+ self._prepare_disk_for_vminstance(
+ name=name,
+ existing_vim_volumes=existing_vim_volumes,
+ created_items=created_items,
+ vm_av_zone=vm_av_zone,
+ block_device_mapping=block_device_mapping,
+ disk_list=disk_list,
+ )
+
+ if affinity_group_list:
+ # Only first id on the list will be used. Openstack restriction
+ server_group_id = affinity_group_list[0]["affinity_group_id"]
+ scheduller_hints["group"] = server_group_id
+
+ self.logger.debug(
+ "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
+ "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
+ "block_device_mapping={}, server_group={})".format(
+ name,
+ image_id,
+ flavor_id,
+ net_list_vim,
+ self.config.get("security_groups"),
+ vm_av_zone,
+ self.config.get("keypair"),
+ userdata,
+ config_drive,
+ block_device_mapping,
+ server_group_id,
+ )
+ )
+
+ # Create VM
+ server = self.nova.servers.create(
+ name=name,
+ image=image_id,
+ flavor=flavor_id,
+ nics=net_list_vim,
+ security_groups=self.config.get("security_groups"),
+ # TODO remove security_groups in future versions. Already at neutron port
+ availability_zone=vm_av_zone,
+ key_name=self.config.get("keypair"),
+ userdata=userdata,
+ config_drive=config_drive,
+ block_device_mapping=block_device_mapping,
+ scheduler_hints=scheduller_hints,
+ )
+
+ vm_start_time = time.time()
+
+ self._update_port_security_for_vminstance(no_secured_ports, server)
+
+ self._prepare_external_network_for_vminstance(
+ external_network=external_network,
+ server=server,
+ created_items=created_items,
+ vm_start_time=vm_start_time,
+ )
+
+ return server.id, created_items
+
+ except Exception as e:
+ server_id = None
+ if server:
+ server_id = server.id
+
+ try:
+ self.delete_vminstance(server_id, created_items)
+
+ except Exception as e2:
+ self.logger.error("new_vminstance rollback fail {}".format(e2))
+
+ self._format_exception(e)
+
+ def get_vminstance(self, vm_id):
+ """Returns the VM instance information from VIM"""
+ # self.logger.debug("Getting VM from VIM")
+ try:
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+ # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+
+ return server.to_dict()
+ except (
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def get_vminstance_console(self, vm_id, console_type="vnc"):
+ """
+ Get a console for the virtual machine
+ Params:
+ vm_id: uuid of the VM
+ console_type, can be:
+ "novnc" (by default), "xvpvnc" for VNC types,
+ "rdp-html5" for RDP types, "spice-html5" for SPICE types
+ Returns dict with the console parameters:
+ protocol: ssh, ftp, http, https, ...
+ server: usually ip address
+ port: the http, ssh, ... port
+ suffix: extra text, e.g. the http path and query string
+ """
+ self.logger.debug("Getting VM CONSOLE from VIM")
+
+ try:
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+
+ if console_type is None or console_type == "novnc":
+ console_dict = server.get_vnc_console("novnc")
+ elif console_type == "xvpvnc":
+ console_dict = server.get_vnc_console(console_type)
+ elif console_type == "rdp-html5":
+ console_dict = server.get_rdp_console(console_type)
+ elif console_type == "spice-html5":
+ console_dict = server.get_spice_console(console_type)
+ else:
+ raise vimconn.VimConnException(
+ "console type '{}' not allowed".format(console_type),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+
+ console_dict1 = console_dict.get("console")
+
+ if console_dict1:
+ console_url = console_dict1.get("url")
+
+ if console_url:
+ # parse console_url
+ protocol_index = console_url.find("//")
+ suffix_index = (
+ console_url[protocol_index + 2 :].find("/") + protocol_index + 2
+ )
+ port_index = (
+ console_url[protocol_index + 2 : suffix_index].find(":")
+ + protocol_index
+ + 2
+ )
+
+ if protocol_index < 0 or port_index < 0 or suffix_index < 0:
+ return (
+ -vimconn.HTTP_Internal_Server_Error,
+ "Unexpected response from VIM",
+ )
+
+ console_dict = {
+ "protocol": console_url[0:protocol_index],
+ "server": console_url[protocol_index + 2 : port_index],
+ "port": console_url[port_index:suffix_index],
+ "suffix": console_url[suffix_index + 1 :],
+ }
+ protocol_index += 2
+
+ return console_dict
raise vimconn.VimConnUnexpectedResponse("Unexpected response from VIM")
except (
nvExceptions.NotFound,
) as e:
self._format_exception(e)
- def delete_vminstance(self, vm_id, created_items=None):
- """Removes a VM instance from VIM. Returns the old identifier"""
- # print "osconnector: Getting VM from VIM"
+ def _delete_ports_by_id_wth_neutron(self, k_id: str) -> None:
+ """Neutron delete ports by id.
+ Args:
+ k_id (str): Port id in the VIM
+ """
+ try:
+
+ port_dict = self.neutron.list_ports()
+ existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
+
+ if k_id in existing_ports:
+ self.neutron.delete_port(k_id)
+
+ except Exception as e:
+
+ self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+
+ def _delete_volumes_by_id_wth_cinder(
+ self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
+ ) -> bool:
+ """Cinder delete volume by id.
+ Args:
+ k (str): Full item name in created_items
+ k_id (str): ID of floating ip in VIM
+ volumes_to_hold (list): Volumes not to delete
+ created_items (dict): All created items belongs to VM
+ """
+ try:
+ if k_id in volumes_to_hold:
+ return
+
+ if self.cinder.volumes.get(k_id).status != "available":
+ return True
+
+ else:
+ self.cinder.volumes.delete(k_id)
+ created_items[k] = None
+
+ except Exception as e:
+ self.logger.error(
+ "Error deleting volume: {}: {}".format(type(e).__name__, e)
+ )
+
+ def _delete_floating_ip_by_id(self, k: str, k_id: str, created_items: dict) -> None:
+ """Neutron delete floating ip by id.
+ Args:
+ k (str): Full item name in created_items
+ k_id (str): ID of floating ip in VIM
+ created_items (dict): All created items belongs to VM
+ """
+ try:
+ self.neutron.delete_floatingip(k_id)
+ created_items[k] = None
+
+ except Exception as e:
+ self.logger.error(
+ "Error deleting floating ip: {}: {}".format(type(e).__name__, e)
+ )
+
+ @staticmethod
+ def _get_item_name_id(k: str) -> Tuple[str, str]:
+ k_item, _, k_id = k.partition(":")
+ return k_item, k_id
+
+ def _delete_vm_ports_attached_to_network(self, created_items: dict) -> None:
+ """Delete VM ports attached to the networks before deleting virtual machine.
+ Args:
+ created_items (dict): All created items belongs to VM
+ """
+
+ for k, v in created_items.items():
+ if not v: # skip already deleted
+ continue
+
+ try:
+ k_item, k_id = self._get_item_name_id(k)
+ if k_item == "port":
+ self._delete_ports_by_id_wth_neutron(k_id)
+
+ except Exception as e:
+ self.logger.error(
+ "Error deleting port: {}: {}".format(type(e).__name__, e)
+ )
+
+ def _delete_created_items(
+ self, created_items: dict, volumes_to_hold: list, keep_waiting: bool
+ ) -> bool:
+ """Delete Volumes and floating ip if they exist in created_items."""
+ for k, v in created_items.items():
+ if not v: # skip already deleted
+ continue
+
+ try:
+ k_item, k_id = self._get_item_name_id(k)
+
+ if k_item == "volume":
+
+ unavailable_vol = self._delete_volumes_by_id_wth_cinder(
+ k, k_id, volumes_to_hold, created_items
+ )
+
+ if unavailable_vol:
+ keep_waiting = True
+
+ elif k_item == "floating_ip":
+
+ self._delete_floating_ip_by_id(k, k_id, created_items)
+
+ except Exception as e:
+ self.logger.error("Error deleting {}: {}".format(k, e))
+
+ return keep_waiting
+
+ def delete_vminstance(
+ self, vm_id: str, created_items: dict = None, volumes_to_hold: list = None
+ ) -> None:
+ """Removes a VM instance from VIM. Returns the old identifier.
+ Args:
+ vm_id (str): Identifier of VM instance
+ created_items (dict): All created items belongs to VM
+ volumes_to_hold (list): Volumes_to_hold
+ """
if created_items is None:
created_items = {}
+ if volumes_to_hold is None:
+ volumes_to_hold = []
try:
self._reload_connection()
- # delete VM ports attached to this networks before the virtual machine
- for k, v in created_items.items():
- if not v: # skip already deleted
- continue
-
- try:
- k_item, _, k_id = k.partition(":")
- if k_item == "port":
- self.neutron.delete_port(k_id)
- except Exception as e:
- self.logger.error(
- "Error deleting port: {}: {}".format(type(e).__name__, e)
- )
- # #commented because detaching the volumes makes the servers.delete not work properly ?!?
- # #dettach volumes attached
- # server = self.nova.servers.get(vm_id)
- # volumes_attached_dict = server._info["os-extended-volumes:volumes_attached"] #volume["id"]
- # #for volume in volumes_attached_dict:
- # # self.cinder.volumes.detach(volume["id"])
+ # Delete VM ports attached to the networks before the virtual machine
+ if created_items:
+ self._delete_vm_ports_attached_to_network(created_items)
if vm_id:
self.nova.servers.delete(vm_id)
- # delete volumes. Although having detached, they should have in active status before deleting
- # we ensure in this loop
+ # Although having detached, volumes should have in active status before deleting.
+ # We ensure in this loop
keep_waiting = True
elapsed_time = 0
while keep_waiting and elapsed_time < volume_timeout:
keep_waiting = False
- for k, v in created_items.items():
- if not v: # skip already deleted
- continue
-
- try:
- k_item, _, k_id = k.partition(":")
- if k_item == "volume":
- if self.cinder.volumes.get(k_id).status != "available":
- keep_waiting = True
- else:
- self.cinder.volumes.delete(k_id)
- created_items[k] = None
- elif k_item == "floating_ip": # floating ip
- self.neutron.delete_floatingip(k_id)
- created_items[k] = None
-
- except Exception as e:
- self.logger.error("Error deleting {}: {}".format(k, e))
+ # Delete volumes and floating IP.
+ keep_waiting = self._delete_created_items(
+ created_items, volumes_to_hold, keep_waiting
+ )
if keep_waiting:
time.sleep(1)
elapsed_time += 1
- return None
except (
nvExceptions.NotFound,
ksExceptions.ClientException,
server.resume()
elif server.status == "SHUTOFF":
server.start()
+ else:
+ self.logger.debug(
+ "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
+ )
+ raise vimconn.VimConnException(
+ "Cannot 'start' instance while it is in active state",
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+
elif "pause" in action_dict:
server.pause()
elif "resume" in action_dict:
server.resume()
elif "shutoff" in action_dict or "shutdown" in action_dict:
- server.stop()
+ self.logger.debug("server status %s", server.status)
+ if server.status == "ACTIVE":
+ server.stop()
+ else:
+ self.logger.debug("ERROR: VM is not in Active state")
+ raise vimconn.VimConnException(
+ "VM is not in active state, stop operation is not allowed",
+ http_code=vimconn.HTTP_Bad_Request,
+ )
elif "forceOff" in action_dict:
server.stop() # TODO
elif "terminate" in action_dict:
classification_dict[classification_id] = classification
return classification_dict
+
+ def new_affinity_group(self, affinity_group_data):
+ """Adds a server group to VIM
+ affinity_group_data contains a dictionary with information, keys:
+ name: name in VIM for the server group
+ type: affinity or anti-affinity
+ scope: Only nfvi-node allowed
+ Returns the server group identifier"""
+ self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+
+ try:
+ name = affinity_group_data["name"]
+ policy = affinity_group_data["type"]
+
+ self._reload_connection()
+ new_server_group = self.nova.server_groups.create(name, policy)
+
+ return new_server_group.id
+ except (
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ ConnectionError,
+ KeyError,
+ ) as e:
+ self._format_exception(e)
+
+ def get_affinity_group(self, affinity_group_id):
+ """Obtain server group details from the VIM. Returns the server group detais as a dict"""
+ self.logger.debug("Getting flavor '%s'", affinity_group_id)
+ try:
+ self._reload_connection()
+ server_group = self.nova.server_groups.find(id=affinity_group_id)
+
+ return server_group.to_dict()
+ except (
+ nvExceptions.NotFound,
+ nvExceptions.ClientException,
+ ksExceptions.ClientException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def delete_affinity_group(self, affinity_group_id):
+ """Deletes a server group from the VIM. Returns the old affinity_group_id"""
+ self.logger.debug("Getting server group '%s'", affinity_group_id)
+ try:
+ self._reload_connection()
+ self.nova.server_groups.delete(affinity_group_id)
+
+ return affinity_group_id
+ except (
+ nvExceptions.NotFound,
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def get_vdu_state(self, vm_id):
+ """
+ Getting the state of a vdu
+ param:
+ vm_id: ID of an instance
+ """
+ self.logger.debug("Getting the status of VM")
+ self.logger.debug("VIM VM ID %s", vm_id)
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+ server_dict = server.to_dict()
+ vdu_data = [
+ server_dict["status"],
+ server_dict["flavor"]["id"],
+ server_dict["OS-EXT-SRV-ATTR:host"],
+ server_dict["OS-EXT-AZ:availability_zone"],
+ ]
+ self.logger.debug("vdu_data %s", vdu_data)
+ return vdu_data
+
+ def check_compute_availability(self, host, server_flavor_details):
+ self._reload_connection()
+ hypervisor_search = self.nova.hypervisors.search(
+ hypervisor_match=host, servers=True
+ )
+ for hypervisor in hypervisor_search:
+ hypervisor_id = hypervisor.to_dict()["id"]
+ hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
+ hypervisor_dict = hypervisor_details.to_dict()
+ hypervisor_temp = json.dumps(hypervisor_dict)
+ hypervisor_json = json.loads(hypervisor_temp)
+ resources_available = [
+ hypervisor_json["free_ram_mb"],
+ hypervisor_json["disk_available_least"],
+ hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
+ ]
+ compute_available = all(
+ x > y for x, y in zip(resources_available, server_flavor_details)
+ )
+ if compute_available:
+ return host
+
+ def check_availability_zone(
+ self, old_az, server_flavor_details, old_host, host=None
+ ):
+ self._reload_connection()
+ az_check = {"zone_check": False, "compute_availability": None}
+ aggregates_list = self.nova.aggregates.list()
+ for aggregate in aggregates_list:
+ aggregate_details = aggregate.to_dict()
+ aggregate_temp = json.dumps(aggregate_details)
+ aggregate_json = json.loads(aggregate_temp)
+ if aggregate_json["availability_zone"] == old_az:
+ hosts_list = aggregate_json["hosts"]
+ if host is not None:
+ if host in hosts_list:
+ az_check["zone_check"] = True
+ available_compute_id = self.check_compute_availability(
+ host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["compute_availability"] = available_compute_id
+ else:
+ for check_host in hosts_list:
+ if check_host != old_host:
+ available_compute_id = self.check_compute_availability(
+ check_host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["zone_check"] = True
+ az_check["compute_availability"] = available_compute_id
+ break
+ else:
+ az_check["zone_check"] = True
+ return az_check
+
+ def migrate_instance(self, vm_id, compute_host=None):
+ """
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ """
+ self._reload_connection()
+ vm_state = False
+ instance_state = self.get_vdu_state(vm_id)
+ server_flavor_id = instance_state[1]
+ server_hypervisor_name = instance_state[2]
+ server_availability_zone = instance_state[3]
+ try:
+ server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+ server_flavor_details = [
+ server_flavor["ram"],
+ server_flavor["disk"],
+ server_flavor["vcpus"],
+ ]
+ if compute_host == server_hypervisor_name:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to the same host '{}'".format(
+ vm_id, compute_host
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ az_status = self.check_availability_zone(
+ server_availability_zone,
+ server_flavor_details,
+ server_hypervisor_name,
+ compute_host,
+ )
+ availability_zone_check = az_status["zone_check"]
+ available_compute_id = az_status.get("compute_availability")
+
+ if availability_zone_check is False:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to a different availability zone".format(
+ vm_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ if available_compute_id is not None:
+ self.nova.servers.live_migrate(
+ server=vm_id,
+ host=available_compute_id,
+ block_migration=True,
+ disk_over_commit=False,
+ )
+ state = "MIGRATING"
+ changed_compute_host = ""
+ if state == "MIGRATING":
+ vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+ changed_compute_host = self.get_vdu_state(vm_id)[2]
+ if vm_state and changed_compute_host == available_compute_id:
+ self.logger.debug(
+ "Instance '{}' migrated to the new compute host '{}'".format(
+ vm_id, changed_compute_host
+ )
+ )
+ return state, available_compute_id
+ else:
+ raise vimconn.VimConnException(
+ "Migration Failed. Instance '{}' not moved to the new host {}".format(
+ vm_id, available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ else:
+ raise vimconn.VimConnException(
+ "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+ available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ except (
+ nvExceptions.BadRequest,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ) as e:
+ self._format_exception(e)
+
+ def resize_instance(self, vm_id, new_flavor_id):
+ """
+ For resizing the vm based on the given
+ flavor details
+ param:
+ vm_id : ID of an instance
+ new_flavor_id : Flavor id to be resized
+ Return the status of a resized instance
+ """
+ self._reload_connection()
+ self.logger.debug("resize the flavor of an instance")
+ instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
+ old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
+ new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
+ try:
+ if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+ if old_flavor_disk > new_flavor_disk:
+ raise nvExceptions.BadRequest(
+ 400,
+ message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+ )
+ else:
+ self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+ vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+ if vm_state:
+ instance_resized_status = self.confirm_resize(vm_id)
+ return instance_resized_status
+ else:
+ raise nvExceptions.BadRequest(
+ 409,
+ message="Cannot 'resize' vm_state is in ERROR",
+ )
+
+ else:
+ self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+ raise nvExceptions.BadRequest(
+ 409,
+ message="Cannot 'resize' instance while it is in vm_state resized",
+ )
+ except (
+ nvExceptions.BadRequest,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ) as e:
+ self._format_exception(e)
+
+ def confirm_resize(self, vm_id):
+ """
+ Confirm the resize of an instance
+ param:
+ vm_id: ID of an instance
+ """
+ self._reload_connection()
+ self.nova.servers.confirm_resize(server=vm_id)
+ if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
+ self.__wait_for_vm(vm_id, "ACTIVE")
+ instance_status = self.get_vdu_state(vm_id)[0]
+ return instance_status