Bug 2029 mempage-size is ignored
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index 8f96f6b..ab9ef30 100644 (file)
@@ -1349,6 +1349,26 @@ class vimconnector(vimconn.VimConnector):
                                 extended.get("disk-io-quota"), "disk_io", extra_specs
                             )
 
+                        # Set the mempage size as specified in the descriptor
+                        if extended.get("mempage-size"):
+                            if extended.get("mempage-size") == "LARGE":
+                                extra_specs["hw:mem_page_size"] = "large"
+                            elif extended.get("mempage-size") == "SMALL":
+                                extra_specs["hw:mem_page_size"] = "small"
+                            elif extended.get("mempage-size") == "SIZE_2MB":
+                                extra_specs["hw:mem_page_size"] = "2MB"
+                            elif extended.get("mempage-size") == "SIZE_1GB":
+                                extra_specs["hw:mem_page_size"] = "1GB"
+                            elif extended.get("mempage-size") == "PREFER_LARGE":
+                                extra_specs["hw:mem_page_size"] = "any"
+                            else:
+                                # The validations in NBI should make reaching here not possible.
+                                # If this message is shown, check validations
+                                self.logger.debug(
+                                    "Invalid mempage-size %s. Will be ignored",
+                                    extended.get("mempage-size"),
+                                )
+
                     # create flavor
                     new_flavor = self.nova.flavors.create(
                         name=name,
@@ -1865,7 +1885,13 @@ class vimconnector(vimconn.VimConnector):
             # cloud config
             config_drive, userdata = self._create_user_data(cloud_config)
 
+            # get availability Zone
+            vm_av_zone = self._get_vm_availability_zone(
+                availability_zone_index, availability_zone_list
+            )
+
             # Create additional volumes in case these are present in disk_list
+            existing_vim_volumes = []
             base_disk_index = ord("b")
             boot_volume_id = None
             if disk_list:
@@ -1875,6 +1901,7 @@ class vimconnector(vimconn.VimConnector):
                         block_device_mapping["_vd" + chr(base_disk_index)] = disk[
                             "vim_id"
                         ]
+                        existing_vim_volumes.append({"id": disk["vim_id"]})
                     else:
                         if "image_id" in disk:
                             base_disk_index = ord("a")
@@ -1882,12 +1909,16 @@ class vimconnector(vimconn.VimConnector):
                                 size=disk["size"],
                                 name=name + "_vd" + chr(base_disk_index),
                                 imageRef=disk["image_id"],
+                                # Make sure volume is in the same AZ as the VM to be attached to
+                                availability_zone=vm_av_zone,
                             )
                             boot_volume_id = volume.id
                         else:
                             volume = self.cinder.volumes.create(
                                 size=disk["size"],
                                 name=name + "_vd" + chr(base_disk_index),
+                                # Make sure volume is in the same AZ as the VM to be attached to
+                                availability_zone=vm_av_zone,
                             )
 
                         created_items["volume:" + str(volume.id)] = True
@@ -1909,6 +1940,17 @@ class vimconnector(vimconn.VimConnector):
                     time.sleep(5)
                     elapsed_time += 5
 
+                # Wait until existing volumes in vim are with status available
+                while elapsed_time < volume_timeout:
+                    for volume in existing_vim_volumes:
+                        if self.cinder.volumes.get(volume["id"]).status != "available":
+                            break
+                    else:  # all ready: break from while
+                        break
+
+                    time.sleep(5)
+                    elapsed_time += 5
+
                 # If we exceeded the timeout rollback
                 if elapsed_time >= volume_timeout:
                     raise vimconn.VimConnException(
@@ -1918,11 +1960,6 @@ class vimconnector(vimconn.VimConnector):
                 if boot_volume_id:
                     self.cinder.volumes.set_bootable(boot_volume_id, True)
 
-            # get availability Zone
-            vm_av_zone = self._get_vm_availability_zone(
-                availability_zone_index, availability_zone_list
-            )
-
             # Manage affinity groups/server groups
             server_group_id = None
             scheduller_hints = {}
@@ -2241,7 +2278,7 @@ class vimconnector(vimconn.VimConnector):
         ) as e:
             self._format_exception(e)
 
-    def delete_vminstance(self, vm_id, created_items=None):
+    def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
         """Removes a VM instance from VIM. Returns the old identifier"""
         # print "osconnector: Getting VM from VIM"
         if created_items is None:
@@ -2291,8 +2328,9 @@ class vimconnector(vimconn.VimConnector):
                             if self.cinder.volumes.get(k_id).status != "available":
                                 keep_waiting = True
                             else:
-                                self.cinder.volumes.delete(k_id)
-                                created_items[k] = None
+                                if k_id not in volumes_to_hold:
+                                    self.cinder.volumes.delete(k_id)
+                                    created_items[k] = None
                         elif k_item == "floating_ip":  # floating ip
                             self.neutron.delete_floatingip(k_id)
                             created_items[k] = None