# cloud config
config_drive, userdata = self._create_user_data(cloud_config)
+ # get availability Zone
+ vm_av_zone = self._get_vm_availability_zone(
+ availability_zone_index, availability_zone_list
+ )
+
# Create additional volumes in case these are present in disk_list
+ existing_vim_volumes = []
base_disk_index = ord("b")
boot_volume_id = None
if disk_list:
block_device_mapping["_vd" + chr(base_disk_index)] = disk[
"vim_id"
]
+ existing_vim_volumes.append({"id": disk["vim_id"]})
else:
if "image_id" in disk:
base_disk_index = ord("a")
size=disk["size"],
name=name + "_vd" + chr(base_disk_index),
imageRef=disk["image_id"],
+ # Make sure volume is in the same AZ as the VM to be attached to
+ availability_zone=vm_av_zone,
)
boot_volume_id = volume.id
else:
volume = self.cinder.volumes.create(
size=disk["size"],
name=name + "_vd" + chr(base_disk_index),
+ # Make sure volume is in the same AZ as the VM to be attached to
+ availability_zone=vm_av_zone,
)
created_items["volume:" + str(volume.id)] = True
time.sleep(5)
elapsed_time += 5
+ # Wait until existing volumes in vim are with status available
+ while elapsed_time < volume_timeout:
+ for volume in existing_vim_volumes:
+ if self.cinder.volumes.get(volume["id"]).status != "available":
+ break
+ else: # all ready: break from while
+ break
+
+ time.sleep(5)
+ elapsed_time += 5
+
# If we exceeded the timeout rollback
if elapsed_time >= volume_timeout:
raise vimconn.VimConnException(
if boot_volume_id:
self.cinder.volumes.set_bootable(boot_volume_id, True)
- # get availability Zone
- vm_av_zone = self._get_vm_availability_zone(
- availability_zone_index, availability_zone_list
- )
-
# Manage affinity groups/server groups
server_group_id = None
scheduller_hints = {}
) as e:
self._format_exception(e)
- def delete_vminstance(self, vm_id, created_items=None):
+ def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):
"""Removes a VM instance from VIM. Returns the old identifier"""
# print "osconnector: Getting VM from VIM"
if created_items is None:
if self.cinder.volumes.get(k_id).status != "available":
keep_waiting = True
else:
- self.cinder.volumes.delete(k_id)
- created_items[k] = None
+ if k_id not in volumes_to_hold:
+ self.cinder.volumes.delete(k_id)
+ created_items[k] = None
elif k_item == "floating_ip": # floating ip
self.neutron.delete_floatingip(k_id)
created_items[k] = None