version = self.config.get("microversion")
if not version:
- version = "2.1"
+ version = "2.60"
# addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
# Titanium cloud and StarlingX
endpoint_type=self.endpoint_type,
region_name=region_name,
)
- self.cinder = self.session["cinder"] = cClient.Client(
- 2,
- session=sess,
- endpoint_type=self.endpoint_type,
- region_name=region_name,
- )
+
+ if sess.get_all_version_data(service_type="volumev2"):
+ self.cinder = self.session["cinder"] = cClient.Client(
+ 2,
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
+ else:
+ self.cinder = self.session["cinder"] = cClient.Client(
+ 3,
+ session=sess,
+ endpoint_type=self.endpoint_type,
+ region_name=region_name,
+ )
try:
self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id()
if not ip_profile.get("subnet_address"):
# Fake subnet is required
- subnet_rand = random.randint(0, 255)
+ subnet_rand = random.SystemRandom().randint(0, 255)
ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
if "ip_version" not in ip_profile:
ip_str = str(netaddr.IPAddress(ip_int))
subnet["allocation_pools"][0]["end"] = ip_str
+ if (
+ ip_profile.get("ipv6_address_mode")
+ and ip_profile["ip_version"] != "IPv4"
+ ):
+ subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
+ # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
+ # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
+ subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
+
# self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
self.neutron.create_subnet({"subnet": subnet})
extra_specs (dict): Extra specs dict to be updated
"""
- # If there is not any numa, numas_nodes equals to 0.
- if not numa_nodes:
- extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
-
# If there are several numas, we do not define specific affinity.
extra_specs["vmware:latency_sensitivity_level"] = "high"
if net.get("mac_address"):
port_dict["mac_address"] = net["mac_address"]
- if net.get("ip_address"):
- port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+ ip_dual_list = []
+ if ip_list := net.get("ip_address"):
+ if not isinstance(ip_list, list):
+ ip_list = [ip_list]
+ for ip in ip_list:
+ ip_dict = {"ip_address": ip}
+ ip_dual_list.append(ip_dict)
+ port_dict["fixed_ips"] = ip_dual_list
# TODO add "subnet_id": <subnet_id>
def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
"""
new_port = self.neutron.create_port({"port": port_dict})
created_items["port:" + str(new_port["port"]["id"])] = True
- net["mac_adress"] = new_port["port"]["mac_address"]
+ net["mac_address"] = new_port["port"]["mac_address"]
net["vim_id"] = new_port["port"]["id"]
return new_port
created_items[volume_txt] = True
block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+ def new_shared_volumes(self, shared_volume_data) -> (str, str):
+ try:
+ volume = self.cinder.volumes.create(
+ size=shared_volume_data["size"],
+ name=shared_volume_data["name"],
+ volume_type="multiattach",
+ )
+ return (volume.name, volume.id)
+ except (ConnectionError, KeyError) as e:
+ self._format_exception(e)
+
+ def _prepare_shared_volumes(
+ self,
+ name: str,
+ disk: dict,
+ base_disk_index: int,
+ block_device_mapping: dict,
+ existing_vim_volumes: list,
+ created_items: dict,
+ ):
+ volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
+ if volumes.get(disk["name"]):
+ sv_id = volumes[disk["name"]]
+ volume = self.cinder.volumes.get(sv_id)
+ self.update_block_device_mapping(
+ volume=volume,
+ block_device_mapping=block_device_mapping,
+ base_disk_index=base_disk_index,
+ disk=disk,
+ created_items=created_items,
+ )
+
def _prepare_non_root_persistent_volumes(
self,
name: str,
# Non-root persistent volumes
# Disk may include only vim_volume_id or only vim_id."
key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
-
if disk.get(key_id):
# Use existing persistent volume
block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
existing_vim_volumes.append({"id": disk[key_id]})
-
else:
- # Create persistent volume
+ volume_name = f"{name}vd{chr(base_disk_index)}"
volume = self.cinder.volumes.create(
size=disk["size"],
- name=name + "vd" + chr(base_disk_index),
+ name=volume_name,
# Make sure volume is in the same AZ as the VM to be attached to
availability_zone=vm_av_zone,
)
elapsed_time (int): Time spent while waiting
"""
-
while elapsed_time < volume_timeout:
for created_item in created_items:
v, volume_id = (
created_item.split(":")[1],
)
if v == "volume":
- if self.cinder.volumes.get(volume_id).status != "available":
+ volume = self.cinder.volumes.get(volume_id)
+ if (
+ volume.volume_type == "multiattach"
+ and volume.status == "in-use"
+ ):
+ return elapsed_time
+ elif volume.status != "available":
break
else:
# All ready: break from while
while elapsed_time < volume_timeout:
for volume in existing_vim_volumes:
- if self.cinder.volumes.get(volume["id"]).status != "available":
+ v = self.cinder.volumes.get(volume["id"])
+ if v.volume_type == "multiattach" and v.status == "in-use":
+ return elapsed_time
+ elif v.status != "available":
break
else: # all ready: break from while
break
base_disk_index = ord("b")
boot_volume_id = None
elapsed_time = 0
-
for disk in disk_list:
if "image_id" in disk:
# Root persistent volume
existing_vim_volumes=existing_vim_volumes,
created_items=created_items,
)
+ elif disk.get("multiattach"):
+ self._prepare_shared_volumes(
+ name=name,
+ disk=disk,
+ base_disk_index=base_disk_index,
+ block_device_mapping=block_device_mapping,
+ existing_vim_volumes=existing_vim_volumes,
+ created_items=created_items,
+ )
else:
# Non-root persistent volume
self._prepare_non_root_persistent_volumes(
server_group_id,
)
)
-
# Create VM
server = self.nova.servers.create(
name=name,
except Exception as e:
self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+ def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
+ """Cinder delete volume by id.
+ Args:
+ shared_volume_vim_id (str): ID of shared volume in VIM
+ """
+ try:
+ if self.cinder.volumes.get(shared_volume_vim_id).status != "available":
+ return True
+
+ else:
+ self.cinder.volumes.delete(shared_volume_vim_id)
+
+ except Exception as e:
+ self.logger.error(
+ "Error deleting volume: {}: {}".format(type(e).__name__, e)
+ )
+
def _delete_volumes_by_id_wth_cinder(
self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
) -> bool:
try:
k_item, k_id = self._get_item_name_id(k)
-
if k_item == "volume":
unavailable_vol = self._delete_volumes_by_id_wth_cinder(
k, k_id, volumes_to_hold, created_items
self.__wait_for_vm(vm_id, "ACTIVE")
instance_status = self.get_vdu_state(vm_id)[0]
return instance_status
+
+ def get_monitoring_data(self):
+ try:
+ self.logger.debug("Getting servers and ports data from Openstack VIMs.")
+ self._reload_connection()
+ all_servers = self.nova.servers.list(detailed=True)
+ all_ports = self.neutron.list_ports()
+ return all_servers, all_ports
+ except (
+ vimconn.VimConnException,
+ vimconn.VimConnNotFoundException,
+ vimconn.VimConnConnectionException,
+ ) as e:
+ raise vimconn.VimConnException(
+ f"Exception in monitoring while getting VMs and ports status: {str(e)}"
+ )