Fixes bug 2026 : Persistent volumes to be created in the same AZ as the VM that will...
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index 1083e39..3d54903 100644 (file)
@@ -1681,6 +1681,7 @@ class vimconnector(vimconn.VimConnector):
         start,
         image_id,
         flavor_id,
+        affinity_group_list,
         net_list,
         cloud_config=None,
         disk_list=None,
@@ -1690,7 +1691,9 @@ class vimconnector(vimconn.VimConnector):
         """Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
-            image_id,flavor_id: iamge and flavor uuid
+            image_id,flavor_id: image and flavor uuid
+            affinity_group_list: list of affinity groups, each one is a dictionary.
+                Ignore if empty.
             net_list: list of interfaces, each one is a dictionary with:
                 name:
                 net_id: network uuid to connect
@@ -1862,8 +1865,15 @@ class vimconnector(vimconn.VimConnector):
             # cloud config
             config_drive, userdata = self._create_user_data(cloud_config)
 
+            # get availability Zone
+            vm_av_zone = self._get_vm_availability_zone(
+                availability_zone_index, availability_zone_list
+            )
+
             # Create additional volumes in case these are present in disk_list
+            existing_vim_volumes = []
             base_disk_index = ord("b")
+            boot_volume_id = None
             if disk_list:
                 block_device_mapping = {}
                 for disk in disk_list:
@@ -1871,17 +1881,24 @@ class vimconnector(vimconn.VimConnector):
                         block_device_mapping["_vd" + chr(base_disk_index)] = disk[
                             "vim_id"
                         ]
+                        existing_vim_volumes.append({"id": disk["vim_id"]})
                     else:
                         if "image_id" in disk:
+                            base_disk_index = ord("a")
                             volume = self.cinder.volumes.create(
                                 size=disk["size"],
                                 name=name + "_vd" + chr(base_disk_index),
                                 imageRef=disk["image_id"],
+                                # Make sure volume is in the same AZ as the VM to be attached to
+                                availability_zone=vm_av_zone,
                             )
+                            boot_volume_id = volume.id
                         else:
                             volume = self.cinder.volumes.create(
                                 size=disk["size"],
                                 name=name + "_vd" + chr(base_disk_index),
+                                # Make sure volume is in the same AZ as the VM to be attached to
+                                availability_zone=vm_av_zone,
                             )
 
                         created_items["volume:" + str(volume.id)] = True
@@ -1903,22 +1920,39 @@ class vimconnector(vimconn.VimConnector):
                     time.sleep(5)
                     elapsed_time += 5
 
+                # Wait until existing volumes in vim are with status available
+                while elapsed_time < volume_timeout:
+                    for volume in existing_vim_volumes:
+                        if self.cinder.volumes.get(volume["id"]).status != "available":
+                            break
+                    else:  # all ready: break from while
+                        break
+
+                    time.sleep(5)
+                    elapsed_time += 5
+
                 # If we exceeded the timeout rollback
                 if elapsed_time >= volume_timeout:
                     raise vimconn.VimConnException(
                         "Timeout creating volumes for instance " + name,
                         http_code=vimconn.HTTP_Request_Timeout,
                     )
+                if boot_volume_id:
+                    self.cinder.volumes.set_bootable(boot_volume_id, True)
 
-            # get availability Zone
-            vm_av_zone = self._get_vm_availability_zone(
-                availability_zone_index, availability_zone_list
-            )
+            # Manage affinity groups/server groups
+            server_group_id = None
+            scheduller_hints = {}
+
+            if affinity_group_list:
+                # Only first id on the list will be used. Openstack restriction
+                server_group_id = affinity_group_list[0]["affinity_group_id"]
+                scheduller_hints["group"] = server_group_id
 
             self.logger.debug(
                 "nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
                 "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
-                "block_device_mapping={})".format(
+                "block_device_mapping={}, server_group={})".format(
                     name,
                     image_id,
                     flavor_id,
@@ -1929,6 +1963,7 @@ class vimconnector(vimconn.VimConnector):
                     userdata,
                     config_drive,
                     block_device_mapping,
+                    server_group_id,
                 )
             )
             server = self.nova.servers.create(
@@ -1943,6 +1978,7 @@ class vimconnector(vimconn.VimConnector):
                 userdata=userdata,
                 config_drive=config_drive,
                 block_device_mapping=block_device_mapping,
+                scheduler_hints=scheduller_hints,
             )  # , description=description)
 
             vm_start_time = time.time()
@@ -2222,7 +2258,7 @@ class vimconnector(vimconn.VimConnector):
         ) as e:
             self._format_exception(e)
 
-    def delete_vminstance(self, vm_id, created_items=None):
+    def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
         """Removes a VM instance from VIM. Returns the old identifier"""
         # print "osconnector: Getting VM from VIM"
         if created_items is None:
@@ -2272,8 +2308,9 @@ class vimconnector(vimconn.VimConnector):
                             if self.cinder.volumes.get(k_id).status != "available":
                                 keep_waiting = True
                             else:
-                                self.cinder.volumes.delete(k_id)
-                                created_items[k] = None
+                                if k_id not in volumes_to_hold:
+                                    self.cinder.volumes.delete(k_id)
+                                    created_items[k] = None
                         elif k_item == "floating_ip":  # floating ip
                             self.neutron.delete_floatingip(k_id)
                             created_items[k] = None
@@ -3444,3 +3481,60 @@ class vimconnector(vimconn.VimConnector):
             classification_dict[classification_id] = classification
 
         return classification_dict
+
+    def new_affinity_group(self, affinity_group_data):
+        """Adds a server group to VIM
+            affinity_group_data contains a dictionary with information, keys:
+                name: name in VIM for the server group
+                type: affinity or anti-affinity
+                scope: Only nfvi-node allowed
+        Returns the server group identifier"""
+        self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+
+        try:
+            name = affinity_group_data["name"]
+            policy = affinity_group_data["type"]
+
+            self._reload_connection()
+            new_server_group = self.nova.server_groups.create(name, policy)
+
+            return new_server_group.id
+        except (
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            ConnectionError,
+            KeyError,
+        ) as e:
+            self._format_exception(e)
+
+    def get_affinity_group(self, affinity_group_id):
+        """Obtain server group details from the VIM. Returns the server group detais as a dict"""
+        self.logger.debug("Getting flavor '%s'", affinity_group_id)
+        try:
+            self._reload_connection()
+            server_group = self.nova.server_groups.find(id=affinity_group_id)
+
+            return server_group.to_dict()
+        except (
+            nvExceptions.NotFound,
+            nvExceptions.ClientException,
+            ksExceptions.ClientException,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
+    def delete_affinity_group(self, affinity_group_id):
+        """Deletes a server group from the VIM. Returns the old affinity_group_id"""
+        self.logger.debug("Getting server group '%s'", affinity_group_id)
+        try:
+            self._reload_connection()
+            self.nova.server_groups.delete(affinity_group_id)
+
+            return affinity_group_id
+        except (
+            nvExceptions.NotFound,
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)