version = self.config.get("microversion")
if not version:
- version = "2.1"
+ version = "2.60"
# addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
# Titanium cloud and StarlingX
endpoint_type=self.endpoint_type,
region_name=region_name,
)
- self.cinder = self.session["cinder"] = cClient.Client(
- 2,
- session=sess,
- endpoint_type=self.endpoint_type,
- region_name=region_name,
- )
+
+ if sess.get_all_version_data(service_type="volumev2"):
+ self.cinder = self.session["cinder"] = cClient.Client(
+ 2,
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
+ else:
+ self.cinder = self.session["cinder"] = cClient.Client(
+ 3,
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
try:
self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
ksExceptions.BadRequest,
),
):
+ if message_error == "OS-EXT-SRV-ATTR:host":
+ tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
+ raise vimconn.VimConnInsufficientCredentials(
+ type(exception).__name__ + ": " + message_error + tip
+ )
raise vimconn.VimConnException(
type(exception).__name__ + ": " + message_error
)
+
elif isinstance(
exception,
(
"Not found security group {} for this tenant".format(sg)
)
+ def _find_nova_server(self, vm_id):
+ """
+ Returns the VM instance from Openstack and completes it with flavor ID
+ Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
+ """
+ try:
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+ # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+ server_dict = server.to_dict()
+ try:
+ server_dict["flavor"]["id"] = self.nova.flavors.find(
+ name=server_dict["flavor"]["original_name"]
+ ).id
+ except nClient.exceptions.NotFound as e:
+ self.logger.warning(str(e.message))
+ return server_dict
+ except (
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
def check_vim_connectivity(self):
# just get network list to check connectivity and credentials
self.get_network_list(filter_dict={})
if not ip_profile.get("subnet_address"):
# Fake subnet is required
- subnet_rand = random.randint(0, 255)
+ subnet_rand = random.SystemRandom().randint(0, 255)
ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
if "ip_version" not in ip_profile:
ip_str = str(netaddr.IPAddress(ip_int))
subnet["allocation_pools"][0]["end"] = ip_str
+ if (
+ ip_profile.get("ipv6_address_mode")
+ and ip_profile["ip_version"] != "IPv4"
+ ):
+ subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
+ # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
+ # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
+ subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
+
# self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
self.neutron.create_subnet({"subnet": subnet})
extra_specs (dict): Extra specs dict to be updated
"""
- # If there is not any numa, numas_nodes equals to 0.
- if not numa_nodes:
- extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
-
# If there are several numas, we do not define specific affinity.
extra_specs["vmware:latency_sensitivity_level"] = "high"
if net.get("mac_address"):
port_dict["mac_address"] = net["mac_address"]
- if net.get("ip_address"):
- port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+ ip_dual_list = []
+ if ip_list := net.get("ip_address"):
+ if not isinstance(ip_list, list):
+ ip_list = [ip_list]
+ for ip in ip_list:
+ ip_dict = {"ip_address": ip}
+ ip_dual_list.append(ip_dict)
+ port_dict["fixed_ips"] = ip_dual_list
# TODO add "subnet_id": <subnet_id>
def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
"""
new_port = self.neutron.create_port({"port": port_dict})
created_items["port:" + str(new_port["port"]["id"])] = True
- net["mac_adress"] = new_port["port"]["mac_address"]
+ net["mac_address"] = new_port["port"]["mac_address"]
net["vim_id"] = new_port["port"]["id"]
return new_port
"Created volume is not valid, does not have id attribute."
)
+ block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+ if disk.get("multiattach"): # multiattach volumes do not belong to VDUs
+ return
volume_txt = "volume:" + str(volume.id)
if disk.get("keep"):
volume_txt += ":keep"
created_items[volume_txt] = True
- block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+
+ def new_shared_volumes(self, shared_volume_data) -> (str, str):
+ try:
+ volume = self.cinder.volumes.create(
+ size=shared_volume_data["size"],
+ name=shared_volume_data["name"],
+ volume_type="multiattach",
+ )
+ return (volume.name, volume.id)
+ except (ConnectionError, KeyError) as e:
+ self._format_exception(e)
+
+ def _prepare_shared_volumes(
+ self,
+ name: str,
+ disk: dict,
+ base_disk_index: int,
+ block_device_mapping: dict,
+ existing_vim_volumes: list,
+ created_items: dict,
+ ):
+ volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
+ if volumes.get(disk["name"]):
+ sv_id = volumes[disk["name"]]
+ max_retries = 3
+ vol_status = ""
+ # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
+ while max_retries:
+ max_retries -= 1
+ volume = self.cinder.volumes.get(sv_id)
+ vol_status = volume.status
+ if volume.status not in ("in-use", "available"):
+ time.sleep(5)
+ continue
+ self.update_block_device_mapping(
+ volume=volume,
+ block_device_mapping=block_device_mapping,
+ base_disk_index=base_disk_index,
+ disk=disk,
+ created_items=created_items,
+ )
+ return
+ raise vimconn.VimConnException(
+ "Shared volume is not prepared, status is: {}".format(vol_status),
+ http_code=vimconn.HTTP_Internal_Server_Error,
+ )
def _prepare_non_root_persistent_volumes(
self,
# Non-root persistent volumes
# Disk may include only vim_volume_id or only vim_id."
key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
-
if disk.get(key_id):
# Use existing persistent volume
block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
existing_vim_volumes.append({"id": disk[key_id]})
-
else:
- # Create persistent volume
+ volume_name = f"{name}vd{chr(base_disk_index)}"
volume = self.cinder.volumes.create(
size=disk["size"],
- name=name + "vd" + chr(base_disk_index),
+ name=volume_name,
# Make sure volume is in the same AZ as the VM to be attached to
availability_zone=vm_av_zone,
)
elapsed_time (int): Time spent while waiting
"""
-
while elapsed_time < volume_timeout:
for created_item in created_items:
v, volume_id = (
created_item.split(":")[1],
)
if v == "volume":
- if self.cinder.volumes.get(volume_id).status != "available":
+ volume = self.cinder.volumes.get(volume_id)
+ if (
+ volume.volume_type == "multiattach"
+ and volume.status == "in-use"
+ ):
+ return elapsed_time
+ elif volume.status != "available":
break
else:
# All ready: break from while
while elapsed_time < volume_timeout:
for volume in existing_vim_volumes:
- if self.cinder.volumes.get(volume["id"]).status != "available":
+ v = self.cinder.volumes.get(volume["id"])
+ if v.volume_type == "multiattach" and v.status == "in-use":
+ return elapsed_time
+ elif v.status != "available":
break
else: # all ready: break from while
break
base_disk_index = ord("b")
boot_volume_id = None
elapsed_time = 0
-
for disk in disk_list:
if "image_id" in disk:
# Root persistent volume
existing_vim_volumes=existing_vim_volumes,
created_items=created_items,
)
+ elif disk.get("multiattach"):
+ self._prepare_shared_volumes(
+ name=name,
+ disk=disk,
+ base_disk_index=base_disk_index,
+ block_device_mapping=block_device_mapping,
+ existing_vim_volumes=existing_vim_volumes,
+ created_items=created_items,
+ )
else:
# Non-root persistent volume
self._prepare_non_root_persistent_volumes(
server_group_id,
)
)
-
# Create VM
server = self.nova.servers.create(
name=name,
def get_vminstance(self, vm_id):
"""Returns the VM instance information from VIM"""
- # self.logger.debug("Getting VM from VIM")
- try:
- self._reload_connection()
- server = self.nova.servers.find(id=vm_id)
- # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
- return server.to_dict()
- except (
- ksExceptions.ClientException,
- nvExceptions.ClientException,
- nvExceptions.NotFound,
- ConnectionError,
- ) as e:
- self._format_exception(e)
+ return self._find_nova_server(vm_id)
def get_vminstance_console(self, vm_id, console_type="vnc"):
"""
k_id (str): Port id in the VIM
"""
try:
- port_dict = self.neutron.list_ports()
- existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
-
- if k_id in existing_ports:
- self.neutron.delete_port(k_id)
+ self.neutron.delete_port(k_id)
except Exception as e:
self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+ def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
+ """Cinder delete volume by id.
+ Args:
+ shared_volume_vim_id (str): ID of shared volume in VIM
+ """
+ elapsed_time = 0
+ try:
+ while elapsed_time < server_timeout:
+ vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
+ if vol_status == "available":
+ self.cinder.volumes.delete(shared_volume_vim_id)
+ return True
+
+ time.sleep(5)
+ elapsed_time += 5
+
+ if elapsed_time >= server_timeout:
+ raise vimconn.VimConnException(
+ "Timeout waiting for volume "
+ + shared_volume_vim_id
+ + " to be available",
+ http_code=vimconn.HTTP_Request_Timeout,
+ )
+
+ except Exception as e:
+ self.logger.error(
+ "Error deleting volume: {}: {}".format(type(e).__name__, e)
+ )
+ self._format_exception(e)
+
def _delete_volumes_by_id_wth_cinder(
self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
) -> bool:
try:
k_item, k_id = self._get_item_name_id(k)
-
if k_item == "volume":
unavailable_vol = self._delete_volumes_by_id_wth_cinder(
k, k_id, volumes_to_hold, created_items
) as e:
self._format_exception(e)
- def get_vdu_state(self, vm_id):
- """
- Getting the state of a vdu
- param:
- vm_id: ID of an instance
+ def get_vdu_state(self, vm_id, host_is_required=False) -> list:
+ """Getting the state of a VDU.
+ Args:
+ vm_id (str): ID of an instance
+ host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
+ and if this is set to True, it raises KeyError.
+ Returns:
+ vdu_data (list): VDU details including state, flavor, host_info, AZ
"""
self.logger.debug("Getting the status of VM")
self.logger.debug("VIM VM ID %s", vm_id)
- self._reload_connection()
- server = self.nova.servers.find(id=vm_id)
- server_dict = server.to_dict()
- vdu_data = [
- server_dict["status"],
- server_dict["flavor"]["id"],
- server_dict["OS-EXT-SRV-ATTR:host"],
- server_dict["OS-EXT-AZ:availability_zone"],
- ]
- self.logger.debug("vdu_data %s", vdu_data)
- return vdu_data
+ try:
+ self._reload_connection()
+ server_dict = self._find_nova_server(vm_id)
+ srv_attr = "OS-EXT-SRV-ATTR:host"
+ host_info = (
+ server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
+ )
+ vdu_data = [
+ server_dict["status"],
+ server_dict["flavor"]["id"],
+ host_info,
+ server_dict["OS-EXT-AZ:availability_zone"],
+ ]
+ self.logger.debug("vdu_data %s", vdu_data)
+ return vdu_data
+
+ except Exception as e:
+ self._format_exception(e)
def check_compute_availability(self, host, server_flavor_details):
self._reload_connection()
"""
self._reload_connection()
vm_state = False
- instance_state = self.get_vdu_state(vm_id)
+ instance_state = self.get_vdu_state(vm_id, host_is_required=True)
server_flavor_id = instance_state[1]
server_hypervisor_name = instance_state[2]
server_availability_zone = instance_state[3]
http_code=vimconn.HTTP_Bad_Request,
)
if available_compute_id is not None:
+ # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
self.nova.servers.live_migrate(
server=vm_id,
host=available_compute_id,
block_migration=True,
- disk_over_commit=False,
)
state = "MIGRATING"
changed_compute_host = ""
if state == "MIGRATING":
vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
- changed_compute_host = self.get_vdu_state(vm_id)[2]
+ changed_compute_host = self.get_vdu_state(
+ vm_id, host_is_required=True
+ )[2]
if vm_state and changed_compute_host == available_compute_id:
self.logger.debug(
"Instance '{}' migrated to the new compute host '{}'".format(
self.__wait_for_vm(vm_id, "ACTIVE")
instance_status = self.get_vdu_state(vm_id)[0]
return instance_status
+
+ def get_monitoring_data(self):
+ try:
+ self.logger.debug("Getting servers and ports data from Openstack VIMs.")
+ self._reload_connection()
+ all_servers = self.nova.servers.list(detailed=True)
+ try:
+ for server in all_servers:
+ server.flavor["id"] = self.nova.flavors.find(
+ name=server.flavor["original_name"]
+ ).id
+ except nClient.exceptions.NotFound as e:
+ self.logger.warning(str(e.message))
+ all_ports = self.neutron.list_ports()
+ return all_servers, all_ports
+ except (
+ vimconn.VimConnException,
+ vimconn.VimConnNotFoundException,
+ vimconn.VimConnConnectionException,
+ ) as e:
+ raise vimconn.VimConnException(
+ f"Exception in monitoring while getting VMs and ports status: {str(e)}"
+ )