Fixes bug 2026 : Persistent volumes to be created in the same AZ as the VM that will...
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index 8f96f6b..3d54903 100644 (file)
@@ -1865,7 +1865,13 @@ class vimconnector(vimconn.VimConnector):
             # cloud config
             config_drive, userdata = self._create_user_data(cloud_config)
 
+            # get availability Zone
+            vm_av_zone = self._get_vm_availability_zone(
+                availability_zone_index, availability_zone_list
+            )
+
             # Create additional volumes in case these are present in disk_list
+            existing_vim_volumes = []
             base_disk_index = ord("b")
             boot_volume_id = None
             if disk_list:
@@ -1875,6 +1881,7 @@ class vimconnector(vimconn.VimConnector):
                         block_device_mapping["_vd" + chr(base_disk_index)] = disk[
                             "vim_id"
                         ]
+                        existing_vim_volumes.append({"id": disk["vim_id"]})
                     else:
                         if "image_id" in disk:
                             base_disk_index = ord("a")
@@ -1882,12 +1889,16 @@ class vimconnector(vimconn.VimConnector):
                                 size=disk["size"],
                                 name=name + "_vd" + chr(base_disk_index),
                                 imageRef=disk["image_id"],
+                                # Make sure volume is in the same AZ as the VM to be attached to
+                                availability_zone=vm_av_zone,
                             )
                             boot_volume_id = volume.id
                         else:
                             volume = self.cinder.volumes.create(
                                 size=disk["size"],
                                 name=name + "_vd" + chr(base_disk_index),
+                                # Make sure volume is in the same AZ as the VM to be attached to
+                                availability_zone=vm_av_zone,
                             )
 
                         created_items["volume:" + str(volume.id)] = True
@@ -1909,6 +1920,17 @@ class vimconnector(vimconn.VimConnector):
                     time.sleep(5)
                     elapsed_time += 5
 
+                # Wait until existing volumes in vim are with status available
+                while elapsed_time < volume_timeout:
+                    for volume in existing_vim_volumes:
+                        if self.cinder.volumes.get(volume["id"]).status != "available":
+                            break
+                    else:  # all ready: break from while
+                        break
+
+                    time.sleep(5)
+                    elapsed_time += 5
+
                 # If we exceeded the timeout rollback
                 if elapsed_time >= volume_timeout:
                     raise vimconn.VimConnException(
@@ -1918,11 +1940,6 @@ class vimconnector(vimconn.VimConnector):
                 if boot_volume_id:
                     self.cinder.volumes.set_bootable(boot_volume_id, True)
 
-            # get availability Zone
-            vm_av_zone = self._get_vm_availability_zone(
-                availability_zone_index, availability_zone_list
-            )
-
             # Manage affinity groups/server groups
             server_group_id = None
             scheduller_hints = {}
@@ -2241,7 +2258,7 @@ class vimconnector(vimconn.VimConnector):
         ) as e:
             self._format_exception(e)
 
-    def delete_vminstance(self, vm_id, created_items=None):
+    def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
         """Removes a VM instance from VIM. Returns the old identifier"""
         # print "osconnector: Getting VM from VIM"
         if created_items is None:
@@ -2291,8 +2308,9 @@ class vimconnector(vimconn.VimConnector):
                             if self.cinder.volumes.get(k_id).status != "available":
                                 keep_waiting = True
                             else:
-                                self.cinder.volumes.delete(k_id)
-                                created_items[k] = None
+                                if k_id not in volumes_to_hold:
+                                    self.cinder.volumes.delete(k_id)
+                                    created_items[k] = None
                         elif k_item == "floating_ip":  # floating ip
                             self.neutron.delete_floatingip(k_id)
                             created_items[k] = None