Feature 10922: Stop, start and rebuild
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index c59bf90..da1499d 100644 (file)
@@ -30,32 +30,30 @@ to the VIM connector's SFC resources as follows:
 - Service Function Path (OSM) -> Port Chain (Neutron)
 """
 
-from osm_ro_plugin import vimconn
-
-import json
+import copy
+from http.client import HTTPException
+import json
 import logging
-import netaddr
-import time
-import yaml
+from pprint import pformat
 import random
 import re
-import copy
-from pprint import pformat
-from novaclient import client as nClient, exceptions as nvExceptions
-from keystoneauth1.identity import v2, v3
+import time
+
+from cinderclient import client as cClient
+from glanceclient import client as glClient
+import glanceclient.exc as gl1Exceptions
 from keystoneauth1 import session
+from keystoneauth1.identity import v2, v3
 import keystoneclient.exceptions as ksExceptions
-import keystoneclient.v3.client as ksClient_v3
 import keystoneclient.v2_0.client as ksClient_v2
-from glanceclient import client as glClient
-import glanceclient.exc as gl1Exceptions
-from cinderclient import client as cClient
-
-# TODO py3 check that this base exception matches python2 httplib.HTTPException
-from http.client import HTTPException
-from neutronclient.neutron import client as neClient
+import keystoneclient.v3.client as ksClient_v3
+import netaddr
 from neutronclient.common import exceptions as neExceptions
+from neutronclient.neutron import client as neClient
+from novaclient import client as nClient, exceptions as nvExceptions
+from osm_ro_plugin import vimconn
 from requests.exceptions import ConnectionError
+import yaml
 
 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
 __date__ = "$22-sep-2017 23:59:59$"
@@ -864,6 +862,14 @@ class vimconnector(vimconn.VimConnector):
             if self.config.get("disable_network_port_security"):
                 network_dict["port_security_enabled"] = False
 
+            if self.config.get("neutron_availability_zone_hints"):
+                hints = self.config.get("neutron_availability_zone_hints")
+
+                if isinstance(hints, str):
+                    hints = [hints]
+
+                network_dict["availability_zone_hints"] = hints
+
             new_net = self.neutron.create_network({"network": network_dict})
             # print new_net
             # create subnetwork, even if there is no profile
@@ -1169,6 +1175,8 @@ class vimconnector(vimconn.VimConnector):
                 flavor_dict["ram"],
                 flavor_dict["vcpus"],
                 flavor_dict["disk"],
+                flavor_dict.get("ephemeral", 0),
+                flavor_dict.get("swap", 0),
             )
             # numa=None
             extended = flavor_dict.get("extended", {})
@@ -1188,7 +1196,13 @@ class vimconnector(vimconn.VimConnector):
                     continue
                     # TODO
 
-                flavor_data = (flavor.ram, flavor.vcpus, flavor.disk)
+                flavor_data = (
+                    flavor.ram,
+                    flavor.vcpus,
+                    flavor.disk,
+                    flavor.ephemeral,
+                    flavor.swap if isinstance(flavor.swap, int) else 0,
+                )
                 if flavor_data == flavor_target:
                     return flavor.id
                 elif (
@@ -1336,12 +1350,34 @@ class vimconnector(vimconn.VimConnector):
                                 extended.get("disk-io-quota"), "disk_io", extra_specs
                             )
 
+                        # Set the mempage size as specified in the descriptor
+                        if extended.get("mempage-size"):
+                            if extended.get("mempage-size") == "LARGE":
+                                extra_specs["hw:mem_page_size"] = "large"
+                            elif extended.get("mempage-size") == "SMALL":
+                                extra_specs["hw:mem_page_size"] = "small"
+                            elif extended.get("mempage-size") == "SIZE_2MB":
+                                extra_specs["hw:mem_page_size"] = "2MB"
+                            elif extended.get("mempage-size") == "SIZE_1GB":
+                                extra_specs["hw:mem_page_size"] = "1GB"
+                            elif extended.get("mempage-size") == "PREFER_LARGE":
+                                extra_specs["hw:mem_page_size"] = "any"
+                            else:
+                                # The validations in NBI should make reaching here not possible.
+                                # If this message is shown, check validations
+                                self.logger.debug(
+                                    "Invalid mempage-size %s. Will be ignored",
+                                    extended.get("mempage-size"),
+                                )
+
                     # create flavor
                     new_flavor = self.nova.flavors.create(
-                        name,
-                        ram,
-                        vcpus,
-                        flavor_data.get("disk", 0),
+                        name=name,
+                        ram=ram,
+                        vcpus=vcpus,
+                        disk=flavor_data.get("disk", 0),
+                        ephemeral=flavor_data.get("ephemeral", 0),
+                        swap=flavor_data.get("swap", 0),
                         is_public=flavor_data.get("is_public", True),
                     )
                     # add metadata
@@ -1666,6 +1702,7 @@ class vimconnector(vimconn.VimConnector):
         start,
         image_id,
         flavor_id,
+        affinity_group_list,
         net_list,
         cloud_config=None,
         disk_list=None,
@@ -1675,7 +1712,9 @@ class vimconnector(vimconn.VimConnector):
         """Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
-            image_id,flavor_id: iamge and flavor uuid
+            image_id,flavor_id: image and flavor uuid
+            affinity_group_list: list of affinity groups, each one is a dictionary.
+                Ignore if empty.
             net_list: list of interfaces, each one is a dictionary with:
                 name:
                 net_id: network uuid to connect
@@ -1847,8 +1886,15 @@ class vimconnector(vimconn.VimConnector):
             # cloud config
             config_drive, userdata = self._create_user_data(cloud_config)
 
+            # get availability Zone
+            vm_av_zone = self._get_vm_availability_zone(
+                availability_zone_index, availability_zone_list
+            )
+
             # Create additional volumes in case these are present in disk_list
+            existing_vim_volumes = []
             base_disk_index = ord("b")
+            boot_volume_id = None
             if disk_list:
                 block_device_mapping = {}
                 for disk in disk_list:
@@ -1856,17 +1902,24 @@ class vimconnector(vimconn.VimConnector):
                         block_device_mapping["_vd" + chr(base_disk_index)] = disk[
                             "vim_id"
                         ]
+                        existing_vim_volumes.append({"id": disk["vim_id"]})
                     else:
                         if "image_id" in disk:
+                            base_disk_index = ord("a")
                             volume = self.cinder.volumes.create(
                                 size=disk["size"],
                                 name=name + "_vd" + chr(base_disk_index),
                                 imageRef=disk["image_id"],
+                                # Make sure volume is in the same AZ as the VM to be attached to
+                                availability_zone=vm_av_zone,
                             )
+                            boot_volume_id = volume.id
                         else:
                             volume = self.cinder.volumes.create(
                                 size=disk["size"],
                                 name=name + "_vd" + chr(base_disk_index),
+                                # Make sure volume is in the same AZ as the VM to be attached to
+                                availability_zone=vm_av_zone,
                             )
 
                         created_items["volume:" + str(volume.id)] = True
@@ -1888,22 +1941,39 @@ class vimconnector(vimconn.VimConnector):
                     time.sleep(5)
                     elapsed_time += 5
 
+                # Wait until existing volumes in vim are with status available
+                while elapsed_time < volume_timeout:
+                    for volume in existing_vim_volumes:
+                        if self.cinder.volumes.get(volume["id"]).status != "available":
+                            break
+                    else:  # all ready: break from while
+                        break
+
+                    time.sleep(5)
+                    elapsed_time += 5
+
                 # If we exceeded the timeout rollback
                 if elapsed_time >= volume_timeout:
                     raise vimconn.VimConnException(
                         "Timeout creating volumes for instance " + name,
                         http_code=vimconn.HTTP_Request_Timeout,
                     )
+                if boot_volume_id:
+                    self.cinder.volumes.set_bootable(boot_volume_id, True)
 
-            # get availability Zone
-            vm_av_zone = self._get_vm_availability_zone(
-                availability_zone_index, availability_zone_list
-            )
+            # Manage affinity groups/server groups
+            server_group_id = None
+            scheduller_hints = {}
+
+            if affinity_group_list:
+                # Only first id on the list will be used. Openstack restriction
+                server_group_id = affinity_group_list[0]["affinity_group_id"]
+                scheduller_hints["group"] = server_group_id
 
             self.logger.debug(
                 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
                 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
-                "block_device_mapping={})".format(
+                "block_device_mapping={}, server_group={})".format(
                     name,
                     image_id,
                     flavor_id,
@@ -1914,6 +1984,7 @@ class vimconnector(vimconn.VimConnector):
                     userdata,
                     config_drive,
                     block_device_mapping,
+                    server_group_id,
                 )
             )
             server = self.nova.servers.create(
@@ -1928,6 +1999,7 @@ class vimconnector(vimconn.VimConnector):
                 userdata=userdata,
                 config_drive=config_drive,
                 block_device_mapping=block_device_mapping,
+                scheduler_hints=scheduller_hints,
             )  # , description=description)
 
             vm_start_time = time.time()
@@ -2207,7 +2279,7 @@ class vimconnector(vimconn.VimConnector):
         ) as e:
             self._format_exception(e)
 
-    def delete_vminstance(self, vm_id, created_items=None):
+    def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
         """Removes a VM instance from VIM. Returns the old identifier"""
         # print "osconnector: Getting VM from VIM"
         if created_items is None:
@@ -2223,7 +2295,12 @@ class vimconnector(vimconn.VimConnector):
                 try:
                     k_item, _, k_id = k.partition(":")
                     if k_item == "port":
-                        self.neutron.delete_port(k_id)
+                        port_dict = self.neutron.list_ports(device_id=vm_id)
+                        existing_ports = [
+                            port["id"] for port in port_dict["ports"] if port_dict
+                        ]
+                        if k_id in existing_ports:
+                            self.neutron.delete_port(k_id)
                 except Exception as e:
                     self.logger.error(
                         "Error deleting port: {}: {}".format(type(e).__name__, e)
@@ -2257,8 +2334,9 @@ class vimconnector(vimconn.VimConnector):
                             if self.cinder.volumes.get(k_id).status != "available":
                                 keep_waiting = True
                             else:
-                                self.cinder.volumes.delete(k_id)
-                                created_items[k] = None
+                                if k_id not in volumes_to_hold:
+                                    self.cinder.volumes.delete(k_id)
+                                    created_items[k] = None
                         elif k_item == "floating_ip":  # floating ip
                             self.neutron.delete_floatingip(k_id)
                             created_items[k] = None
@@ -2441,12 +2519,29 @@ class vimconnector(vimconn.VimConnector):
                         server.resume()
                     elif server.status == "SHUTOFF":
                         server.start()
+                    else:
+                        self.logger.debug(
+                            "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
+                        )
+                        raise vimconn.VimConnException(
+                            "Cannot 'start' instance while it is in active state",
+                            http_code=vimconn.HTTP_Bad_Request,
+                        )
+
             elif "pause" in action_dict:
                 server.pause()
             elif "resume" in action_dict:
                 server.resume()
             elif "shutoff" in action_dict or "shutdown" in action_dict:
-                server.stop()
+                self.logger.debug("server status %s", server.status)
+                if server.status == "ACTIVE":
+                    server.stop()
+                else:
+                    self.logger.debug("ERROR: VM is not in Active state")
+                    raise vimconn.VimConnException(
+                        "VM is not in active state, stop operation is not allowed",
+                        http_code=vimconn.HTTP_Bad_Request,
+                    )
             elif "forceOff" in action_dict:
                 server.stop()  # TODO
             elif "terminate" in action_dict:
@@ -3429,3 +3524,219 @@ class vimconnector(vimconn.VimConnector):
             classification_dict[classification_id] = classification
 
         return classification_dict
+
+    def new_affinity_group(self, affinity_group_data):
+        """Adds a server group to VIM
+            affinity_group_data contains a dictionary with information, keys:
+                name: name in VIM for the server group
+                type: affinity or anti-affinity
+                scope: Only nfvi-node allowed
+        Returns the server group identifier"""
+        self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+
+        try:
+            name = affinity_group_data["name"]
+            policy = affinity_group_data["type"]
+
+            self._reload_connection()
+            new_server_group = self.nova.server_groups.create(name, policy)
+
+            return new_server_group.id
+        except (
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            ConnectionError,
+            KeyError,
+        ) as e:
+            self._format_exception(e)
+
+    def get_affinity_group(self, affinity_group_id):
+        """Obtain server group details from the VIM. Returns the server group detais as a dict"""
+        self.logger.debug("Getting flavor '%s'", affinity_group_id)
+        try:
+            self._reload_connection()
+            server_group = self.nova.server_groups.find(id=affinity_group_id)
+
+            return server_group.to_dict()
+        except (
+            nvExceptions.NotFound,
+            nvExceptions.ClientException,
+            ksExceptions.ClientException,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
+    def delete_affinity_group(self, affinity_group_id):
+        """Deletes a server group from the VIM. Returns the old affinity_group_id"""
+        self.logger.debug("Getting server group '%s'", affinity_group_id)
+        try:
+            self._reload_connection()
+            self.nova.server_groups.delete(affinity_group_id)
+
+            return affinity_group_id
+        except (
+            nvExceptions.NotFound,
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
+    def get_vdu_state(self, vm_id):
+        """
+        Getting the state of a vdu
+        param:
+            vm_id: ID of an instance
+        """
+        self.logger.debug("Getting the status of VM")
+        self.logger.debug("VIM VM ID %s", vm_id)
+        self._reload_connection()
+        server = self.nova.servers.find(id=vm_id)
+        server_dict = server.to_dict()
+        vdu_data = [
+            server_dict["status"],
+            server_dict["flavor"]["id"],
+            server_dict["OS-EXT-SRV-ATTR:host"],
+            server_dict["OS-EXT-AZ:availability_zone"],
+        ]
+        self.logger.debug("vdu_data %s", vdu_data)
+        return vdu_data
+
+    def check_compute_availability(self, host, server_flavor_details):
+        self._reload_connection()
+        hypervisor_search = self.nova.hypervisors.search(
+            hypervisor_match=host, servers=True
+        )
+        for hypervisor in hypervisor_search:
+            hypervisor_id = hypervisor.to_dict()["id"]
+            hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
+            hypervisor_dict = hypervisor_details.to_dict()
+            hypervisor_temp = json.dumps(hypervisor_dict)
+            hypervisor_json = json.loads(hypervisor_temp)
+            resources_available = [
+                hypervisor_json["free_ram_mb"],
+                hypervisor_json["disk_available_least"],
+                hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
+            ]
+            compute_available = all(
+                x > y for x, y in zip(resources_available, server_flavor_details)
+            )
+            if compute_available:
+                return host
+
+    def check_availability_zone(
+        self, old_az, server_flavor_details, old_host, host=None
+    ):
+        self._reload_connection()
+        az_check = {"zone_check": False, "compute_availability": None}
+        aggregates_list = self.nova.aggregates.list()
+        for aggregate in aggregates_list:
+            aggregate_details = aggregate.to_dict()
+            aggregate_temp = json.dumps(aggregate_details)
+            aggregate_json = json.loads(aggregate_temp)
+            if aggregate_json["availability_zone"] == old_az:
+                hosts_list = aggregate_json["hosts"]
+                if host is not None:
+                    if host in hosts_list:
+                        az_check["zone_check"] = True
+                        available_compute_id = self.check_compute_availability(
+                            host, server_flavor_details
+                        )
+                        if available_compute_id is not None:
+                            az_check["compute_availability"] = available_compute_id
+                else:
+                    for check_host in hosts_list:
+                        if check_host != old_host:
+                            available_compute_id = self.check_compute_availability(
+                                check_host, server_flavor_details
+                            )
+                            if available_compute_id is not None:
+                                az_check["zone_check"] = True
+                                az_check["compute_availability"] = available_compute_id
+                                break
+                    else:
+                        az_check["zone_check"] = True
+        return az_check
+
+    def migrate_instance(self, vm_id, compute_host=None):
+        """
+        Migrate a vdu
+        param:
+            vm_id: ID of an instance
+            compute_host: Host to migrate the vdu to
+        """
+        self._reload_connection()
+        vm_state = False
+        instance_state = self.get_vdu_state(vm_id)
+        server_flavor_id = instance_state[1]
+        server_hypervisor_name = instance_state[2]
+        server_availability_zone = instance_state[3]
+        try:
+            server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+            server_flavor_details = [
+                server_flavor["ram"],
+                server_flavor["disk"],
+                server_flavor["vcpus"],
+            ]
+            if compute_host == server_hypervisor_name:
+                raise vimconn.VimConnException(
+                    "Unable to migrate instance '{}' to the same host '{}'".format(
+                        vm_id, compute_host
+                    ),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+            az_status = self.check_availability_zone(
+                server_availability_zone,
+                server_flavor_details,
+                server_hypervisor_name,
+                compute_host,
+            )
+            availability_zone_check = az_status["zone_check"]
+            available_compute_id = az_status.get("compute_availability")
+
+            if availability_zone_check is False:
+                raise vimconn.VimConnException(
+                    "Unable to migrate instance '{}' to a different availability zone".format(
+                        vm_id
+                    ),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+            if available_compute_id is not None:
+                self.nova.servers.live_migrate(
+                    server=vm_id,
+                    host=available_compute_id,
+                    block_migration=True,
+                    disk_over_commit=False,
+                )
+                state = "MIGRATING"
+                changed_compute_host = ""
+                if state == "MIGRATING":
+                    vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+                    changed_compute_host = self.get_vdu_state(vm_id)[2]
+                if vm_state and changed_compute_host == available_compute_id:
+                    self.logger.debug(
+                        "Instance '{}' migrated to the new compute host '{}'".format(
+                            vm_id, changed_compute_host
+                        )
+                    )
+                    return state, available_compute_id
+                else:
+                    raise vimconn.VimConnException(
+                        "Migration Failed. Instance '{}' not moved to the new host {}".format(
+                            vm_id, available_compute_id
+                        ),
+                        http_code=vimconn.HTTP_Bad_Request,
+                    )
+            else:
+                raise vimconn.VimConnException(
+                    "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+                        available_compute_id
+                    ),
+                    http_code=vimconn.HTTP_Bad_Request,
+                )
+        except (
+            nvExceptions.BadRequest,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+        ) as e:
+            self._format_exception(e)