extended.get("disk-io-quota"), "disk_io", extra_specs
)
+ # Set the mempage size as specified in the descriptor
+ if extended.get("mempage-size"):
+ if extended.get("mempage-size") == "LARGE":
+ extra_specs["hw:mem_page_size"] = "large"
+ elif extended.get("mempage-size") == "SMALL":
+ extra_specs["hw:mem_page_size"] = "small"
+ elif extended.get("mempage-size") == "SIZE_2MB":
+ extra_specs["hw:mem_page_size"] = "2MB"
+ elif extended.get("mempage-size") == "SIZE_1GB":
+ extra_specs["hw:mem_page_size"] = "1GB"
+ elif extended.get("mempage-size") == "PREFER_LARGE":
+ extra_specs["hw:mem_page_size"] = "any"
+ else:
+ # The validations in NBI should make reaching here not possible.
+ # If this message is shown, check validations
+ self.logger.debug(
+ "Invalid mempage-size %s. Will be ignored",
+ extended.get("mempage-size"),
+ )
+
# create flavor
new_flavor = self.nova.flavors.create(
name=name,
# cloud config
config_drive, userdata = self._create_user_data(cloud_config)
+ # get availability Zone
+ vm_av_zone = self._get_vm_availability_zone(
+ availability_zone_index, availability_zone_list
+ )
+
# Create additional volumes in case these are present in disk_list
+ existing_vim_volumes = []
base_disk_index = ord("b")
+ boot_volume_id = None
if disk_list:
block_device_mapping = {}
for disk in disk_list:
block_device_mapping["_vd" + chr(base_disk_index)] = disk[
"vim_id"
]
+ existing_vim_volumes.append({"id": disk["vim_id"]})
else:
if "image_id" in disk:
+ base_disk_index = ord("a")
volume = self.cinder.volumes.create(
size=disk["size"],
name=name + "_vd" + chr(base_disk_index),
imageRef=disk["image_id"],
+ # Make sure volume is in the same AZ as the VM to be attached to
+ availability_zone=vm_av_zone,
)
+ boot_volume_id = volume.id
else:
volume = self.cinder.volumes.create(
size=disk["size"],
name=name + "_vd" + chr(base_disk_index),
+ # Make sure volume is in the same AZ as the VM to be attached to
+ availability_zone=vm_av_zone,
)
created_items["volume:" + str(volume.id)] = True
time.sleep(5)
elapsed_time += 5
+ # Wait until existing volumes in vim are with status available
+ while elapsed_time < volume_timeout:
+ for volume in existing_vim_volumes:
+ if self.cinder.volumes.get(volume["id"]).status != "available":
+ break
+ else: # all ready: break from while
+ break
+
+ time.sleep(5)
+ elapsed_time += 5
+
# If we exceeded the timeout rollback
if elapsed_time >= volume_timeout:
raise vimconn.VimConnException(
"Timeout creating volumes for instance " + name,
http_code=vimconn.HTTP_Request_Timeout,
)
-
- # get availability Zone
- vm_av_zone = self._get_vm_availability_zone(
- availability_zone_index, availability_zone_list
- )
+ if boot_volume_id:
+ self.cinder.volumes.set_bootable(boot_volume_id, True)
# Manage affinity groups/server groups
server_group_id = None
) as e:
self._format_exception(e)
- def delete_vminstance(self, vm_id, created_items=None):
+ def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
"""Removes a VM instance from VIM. Returns the old identifier"""
# print "osconnector: Getting VM from VIM"
if created_items is None:
if self.cinder.volumes.get(k_id).status != "available":
keep_waiting = True
else:
- self.cinder.volumes.delete(k_id)
- created_items[k] = None
+ if k_id not in volumes_to_hold:
+ self.cinder.volumes.delete(k_id)
+ created_items[k] = None
elif k_item == "floating_ip": # floating ip
self.neutron.delete_floatingip(k_id)
created_items[k] = None