Fixes bug 2026 : Persistent volumes to be created in the same AZ as the VM that will...
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index a15a53c..3d54903 100644 (file)
@@ -1865,8 +1865,15 @@ class vimconnector(vimconn.VimConnector):
             # cloud config
             config_drive, userdata = self._create_user_data(cloud_config)
 
+            # get availability Zone
+            vm_av_zone = self._get_vm_availability_zone(
+                availability_zone_index, availability_zone_list
+            )
+
             # Create additional volumes in case these are present in disk_list
+            existing_vim_volumes = []
             base_disk_index = ord("b")
+            boot_volume_id = None
             if disk_list:
                 block_device_mapping = {}
                 for disk in disk_list:
@@ -1874,17 +1881,24 @@ class vimconnector(vimconn.VimConnector):
                         block_device_mapping["_vd" + chr(base_disk_index)] = disk[
                             "vim_id"
                         ]
+                        existing_vim_volumes.append({"id": disk["vim_id"]})
                     else:
                         if "image_id" in disk:
+                            base_disk_index = ord("a")
                             volume = self.cinder.volumes.create(
                                 size=disk["size"],
                                 name=name + "_vd" + chr(base_disk_index),
                                 imageRef=disk["image_id"],
+                                # Make sure volume is in the same AZ as the VM to be attached to
+                                availability_zone=vm_av_zone,
                             )
+                            boot_volume_id = volume.id
                         else:
                             volume = self.cinder.volumes.create(
                                 size=disk["size"],
                                 name=name + "_vd" + chr(base_disk_index),
+                                # Make sure volume is in the same AZ as the VM to be attached to
+                                availability_zone=vm_av_zone,
                             )
 
                         created_items["volume:" + str(volume.id)] = True
@@ -1906,17 +1920,25 @@ class vimconnector(vimconn.VimConnector):
                     time.sleep(5)
                     elapsed_time += 5
 
+                # Wait until existing volumes in vim are with status available
+                while elapsed_time < volume_timeout:
+                    for volume in existing_vim_volumes:
+                        if self.cinder.volumes.get(volume["id"]).status != "available":
+                            break
+                    else:  # all ready: break from while
+                        break
+
+                    time.sleep(5)
+                    elapsed_time += 5
+
                 # If we exceeded the timeout rollback
                 if elapsed_time >= volume_timeout:
                     raise vimconn.VimConnException(
                         "Timeout creating volumes for instance " + name,
                         http_code=vimconn.HTTP_Request_Timeout,
                     )
-
-            # get availability Zone
-            vm_av_zone = self._get_vm_availability_zone(
-                availability_zone_index, availability_zone_list
-            )
+                if boot_volume_id:
+                    self.cinder.volumes.set_bootable(boot_volume_id, True)
 
             # Manage affinity groups/server groups
             server_group_id = None
@@ -2236,7 +2258,7 @@ class vimconnector(vimconn.VimConnector):
         ) as e:
             self._format_exception(e)
 
-    def delete_vminstance(self, vm_id, created_items=None):
+    def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
         """Removes a VM instance from VIM. Returns the old identifier"""
         # print "osconnector: Getting VM from VIM"
         if created_items is None:
@@ -2286,8 +2308,9 @@ class vimconnector(vimconn.VimConnector):
                             if self.cinder.volumes.get(k_id).status != "available":
                                 keep_waiting = True
                             else:
-                                self.cinder.volumes.delete(k_id)
-                                created_items[k] = None
+                                if k_id not in volumes_to_hold:
+                                    self.cinder.volumes.delete(k_id)
+                                    created_items[k] = None
                         elif k_item == "floating_ip":  # floating ip
                             self.neutron.delete_floatingip(k_id)
                             created_items[k] = None