ksExceptions.BadRequest,
),
):
+ if message_error == "OS-EXT-SRV-ATTR:host":
+ tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
+ raise vimconn.VimConnInsufficientCredentials(
+ type(exception).__name__ + ": " + message_error + tip
+ )
raise vimconn.VimConnException(
type(exception).__name__ + ": " + message_error
)
+
elif isinstance(
exception,
(
"Created volume is not valid, does not have id attribute."
)
+ block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+ if disk.get("multiattach"): # multiattach volumes do not belong to VDUs
+ return
volume_txt = "volume:" + str(volume.id)
if disk.get("keep"):
volume_txt += ":keep"
created_items[volume_txt] = True
- block_device_mapping["vd" + chr(base_disk_index)] = volume.id
def new_shared_volumes(self, shared_volume_data) -> (str, str):
try:
volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
if volumes.get(disk["name"]):
sv_id = volumes[disk["name"]]
- volume = self.cinder.volumes.get(sv_id)
- self.update_block_device_mapping(
- volume=volume,
- block_device_mapping=block_device_mapping,
- base_disk_index=base_disk_index,
- disk=disk,
- created_items=created_items,
+ max_retries = 3
+ vol_status = ""
+ # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
+ while max_retries:
+ max_retries -= 1
+ volume = self.cinder.volumes.get(sv_id)
+ vol_status = volume.status
+ if volume.status not in ("in-use", "available"):
+ time.sleep(5)
+ continue
+ self.update_block_device_mapping(
+ volume=volume,
+ block_device_mapping=block_device_mapping,
+ base_disk_index=base_disk_index,
+ disk=disk,
+ created_items=created_items,
+ )
+ return
+ raise vimconn.VimConnException(
+ "Shared volume is not prepared, status is: {}".format(vol_status),
+ http_code=vimconn.HTTP_Internal_Server_Error,
)
def _prepare_non_root_persistent_volumes(
Args:
shared_volume_vim_id (str): ID of shared volume in VIM
"""
+ elapsed_time = 0
try:
- if self.cinder.volumes.get(shared_volume_vim_id).status != "available":
- return True
+ while elapsed_time < server_timeout:
+ vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
+ if vol_status == "available":
+ self.cinder.volumes.delete(shared_volume_vim_id)
+ return True
- else:
- self.cinder.volumes.delete(shared_volume_vim_id)
+ time.sleep(5)
+ elapsed_time += 5
+
+ if elapsed_time >= server_timeout:
+ raise vimconn.VimConnException(
+ "Timeout waiting for volume "
+ + shared_volume_vim_id
+ + " to be available",
+ http_code=vimconn.HTTP_Request_Timeout,
+ )
except Exception as e:
self.logger.error(
"Error deleting volume: {}: {}".format(type(e).__name__, e)
)
+ self._format_exception(e)
def _delete_volumes_by_id_wth_cinder(
self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
) as e:
self._format_exception(e)
- def get_vdu_state(self, vm_id):
- """
- Getting the state of a vdu
- param:
- vm_id: ID of an instance
+ def get_vdu_state(self, vm_id, host_is_required=False) -> list:
+ """Getting the state of a VDU.
+ Args:
+ vm_id (str): ID of an instance
+ host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
+ and if this is set to True, it raises KeyError.
+ Returns:
+ vdu_data (list): VDU details including state, flavor, host_info, AZ
"""
self.logger.debug("Getting the status of VM")
self.logger.debug("VIM VM ID %s", vm_id)
- self._reload_connection()
- server_dict = self._find_nova_server(vm_id)
- vdu_data = [
- server_dict["status"],
- server_dict["flavor"]["id"],
- server_dict["OS-EXT-SRV-ATTR:host"],
- server_dict["OS-EXT-AZ:availability_zone"],
- ]
- self.logger.debug("vdu_data %s", vdu_data)
- return vdu_data
+ try:
+ self._reload_connection()
+ server_dict = self._find_nova_server(vm_id)
+ srv_attr = "OS-EXT-SRV-ATTR:host"
+ host_info = (
+ server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
+ )
+ vdu_data = [
+ server_dict["status"],
+ server_dict["flavor"]["id"],
+ host_info,
+ server_dict["OS-EXT-AZ:availability_zone"],
+ ]
+ self.logger.debug("vdu_data %s", vdu_data)
+ return vdu_data
+
+ except Exception as e:
+ self._format_exception(e)
def check_compute_availability(self, host, server_flavor_details):
self._reload_connection()
"""
self._reload_connection()
vm_state = False
- instance_state = self.get_vdu_state(vm_id)
+ instance_state = self.get_vdu_state(vm_id, host_is_required=True)
server_flavor_id = instance_state[1]
server_hypervisor_name = instance_state[2]
server_availability_zone = instance_state[3]
http_code=vimconn.HTTP_Bad_Request,
)
if available_compute_id is not None:
+ # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
self.nova.servers.live_migrate(
server=vm_id,
host=available_compute_id,
block_migration=True,
- disk_over_commit=False,
)
state = "MIGRATING"
changed_compute_host = ""
if state == "MIGRATING":
vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
- changed_compute_host = self.get_vdu_state(vm_id)[2]
+ changed_compute_host = self.get_vdu_state(
+ vm_id, host_is_required=True
+ )[2]
if vm_state and changed_compute_host == available_compute_id:
self.logger.debug(
"Instance '{}' migrated to the new compute host '{}'".format(