From 6af6f6d140491bf9881dbab4d634ded6e43ffd44 Mon Sep 17 00:00:00 2001 From: lloretgalleg Date: Tue, 4 Mar 2025 11:37:14 +0000 Subject: [PATCH] Feature 11051: VIM plugin for VMware vCenter Change-Id: If63f0625725da9141585bef675224258bbd047cc Signed-off-by: lloretgalleg --- Dockerfile.local | 15 +- RO-VIM-vcenter/osm_rovim_vcenter/__init__.py | 15 + .../osm_rovim_vcenter/nsx_client.py | 128 ++ .../osm_rovim_vcenter/vcenter_config.py | 279 ++++ .../osm_rovim_vcenter/vcenter_ipmanager.py | 365 +++++ .../osm_rovim_vcenter/vcenter_network.py | 281 ++++ .../osm_rovim_vcenter/vcenter_util.py | 163 ++ .../osm_rovim_vcenter/vcenter_vms.py | 1147 +++++++++++++ .../osm_rovim_vcenter/vim_helper.py | 94 ++ .../osm_rovim_vcenter/vimconn_vcenter.py | 1442 +++++++++++++++++ RO-VIM-vcenter/requirements.in | 21 + RO-VIM-vcenter/setup.py | 59 + RO-VIM-vcenter/stdeb.cfg | 19 + devops-stages/stage-build.sh | 1 + tox.ini | 15 + 15 files changed, 4042 insertions(+), 2 deletions(-) create mode 100644 RO-VIM-vcenter/osm_rovim_vcenter/__init__.py create mode 100644 RO-VIM-vcenter/osm_rovim_vcenter/nsx_client.py create mode 100644 RO-VIM-vcenter/osm_rovim_vcenter/vcenter_config.py create mode 100644 RO-VIM-vcenter/osm_rovim_vcenter/vcenter_ipmanager.py create mode 100644 RO-VIM-vcenter/osm_rovim_vcenter/vcenter_network.py create mode 100644 RO-VIM-vcenter/osm_rovim_vcenter/vcenter_util.py create mode 100644 RO-VIM-vcenter/osm_rovim_vcenter/vcenter_vms.py create mode 100644 RO-VIM-vcenter/osm_rovim_vcenter/vim_helper.py create mode 100644 RO-VIM-vcenter/osm_rovim_vcenter/vimconn_vcenter.py create mode 100644 RO-VIM-vcenter/requirements.in create mode 100644 RO-VIM-vcenter/setup.py create mode 100644 RO-VIM-vcenter/stdeb.cfg diff --git a/Dockerfile.local b/Dockerfile.local index 1aea781f..b3ebca3f 100644 --- a/Dockerfile.local +++ b/Dockerfile.local @@ -21,8 +21,11 @@ WORKDIR /build RUN DEBIAN_FRONTEND=noninteractive apt-get --yes update && \ DEBIAN_FRONTEND=noninteractive apt-get --yes install \ gcc python3 python3-dev python3-venv python3-pip \ - python3-setuptools curl git genisoimage netbase && \ - python3 -m pip install -U pip build + python3-setuptools curl git genisoimage netbase libmagic1 file && \ + apt-get clean && rm -rf /var/lib/apt/lists/* + +# Upgrade pip and install build tools +RUN python3 -m pip install -U pip build COPY . /build @@ -46,6 +49,9 @@ RUN python3 -m build /build/RO-VIM-vmware && \ RUN python3 -m build /build/RO-VIM-openstack && \ python3 -m pip install /build/RO-VIM-openstack/dist/*.whl +RUN python3 -m build /build/RO-VIM-vcenter && \ + python3 -m pip install /build/RO-VIM-vcenter/dist/*.whl + RUN python3 -m build /build/RO-VIM-openvim && \ python3 -m pip install /build/RO-VIM-openvim/dist/*.whl @@ -95,6 +101,11 @@ COPY --from=INSTALL /usr/local/lib/python3.10/dist-packages /usr/local/lib/pyth COPY --from=INSTALL /usr/bin/genisoimage /usr/bin/genisoimage COPY --from=INSTALL /etc/protocols /etc/protocols +# Copy libmagic.so.1 and dependencies +COPY --from=INSTALL /usr/lib/x86_64-linux-gnu/libmagic.so.1 /usr/lib/x86_64-linux-gnu/ +COPY --from=INSTALL /usr/lib/x86_64-linux-gnu/libmagic.so.1.* /usr/lib/x86_64-linux-gnu/ +COPY --from=INSTALL /usr/share/misc/magic.mgc /usr/share/misc/magic.mgc + VOLUME /var/log/osm EXPOSE 9090 diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/__init__.py b/RO-VIM-vcenter/osm_rovim_vcenter/__init__.py new file mode 100644 index 00000000..676bd0e9 --- /dev/null +++ b/RO-VIM-vcenter/osm_rovim_vcenter/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright {2025} Indra +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/nsx_client.py b/RO-VIM-vcenter/osm_rovim_vcenter/nsx_client.py new file mode 100644 index 00000000..364435eb --- /dev/null +++ b/RO-VIM-vcenter/osm_rovim_vcenter/nsx_client.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Indra +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utility class to deal with NSX in vcenter +""" +import logging +import os + +from osm_ro_plugin import vimconn +import requests +from requests.auth import HTTPBasicAuth + + +class NsxClient: + """ + Class that handles interactions with vcenter NSX + """ + + NSX_POLICY_V1_API_PREFIX = "/policy/api/v1" + + def __init__( + self, nsx_manager_url, user, password, verify_ssl=False, log_level=None + ): + self.nsx_manager_url = nsx_manager_url + self.user = user + self.password = password + self.verify_ssl = verify_ssl + + self.logger = logging.getLogger("ro.vim.vcenter.vms") + if log_level: + self.logger.setLevel(getattr(logging, log_level)) + + self.logger.info("verify_ssl: %s", self.verify_ssl) + if not self.verify_ssl: + self.logger.info("Insecure access to nsx is configured") + + def get_nsx_segment_dhcp_config(self, segment_path): + """ + Obtain nsx subnet config from segment path + """ + self.logger.debug("Obtain nsx segment dhcp configuration: %s", segment_path) + url = f"{self.nsx_manager_url}{self.NSX_POLICY_V1_API_PREFIX}{segment_path}" + response_json = self._process_http_get_request(url) + subnets = response_json.get("subnets") + self.logger.debug("Subnets recovered: %s", subnets) + return subnets + + def _process_http_get_request(self, get_request_url): + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + auth = self._get_auth() + if isinstance(auth, dict): # Token-based or API-key authentication + headers.update(auth) + + response = requests.get( + get_request_url, + headers=headers, + auth=auth if not isinstance(auth, dict) else None, + verify=self.verify_ssl, + ) + try: + if not response.ok: + raise vimconn.VimConnException( + f"Error nsx get request, text: {response.text}", + http_code=response.status_code, + ) + else: + return response.json() + except requests.RequestException as e: + self.logger.error(f"Error nsx get request, url: {get_request_url}", e) + raise vimconn.VimConnException( + f"Error nsx get request, url: {get_request_url}, error: {str(e)}" + ) + + def _get_auth(self): + # Obtain authentication, by the moment it will be basic authentication, + # it could be modified to support other authentication methods + return HTTPBasicAuth(self.user, self.password) + + +if __name__ == "__main__": + # Init logger + log_format = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(funcName)s(): %(message)s" + logging.basicConfig( + level=logging.DEBUG, # Set the logging level + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", # Set the log message format + datefmt="%Y-%m-%dT%H:%M:%S", + handlers=[ + logging.StreamHandler(), # Log to the console + ], + ) + logger = logging.getLogger("ro.vim.vmware.test_nsx") + logger.setLevel(level=logging.DEBUG) + + test_nsx_url = os.getenv("NSX_URL") + test_nsx_user = os.getenv("NSX_USER") + test_nsx_password = os.getenv("NSX_PASSWORD") + if os.getenv("NSX_CACERT"): + test_verify_ssl = os.getenv("NSX_CACERT") + else: + test_verify_ssl = False + + logger.debug("Create nsx client") + nsx_client = NsxClient( + test_nsx_url, + test_nsx_user, + test_nsx_password, + verify_ssl=test_verify_ssl, + log_level="DEBUG", + ) + test_segment_path = "/infra/segments/b5a27856-e7ef-49ab-a09e-e4d3416db3d2" + nsx_client.get_nsx_segment_dhcp_config(test_segment_path) diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_config.py b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_config.py new file mode 100644 index 00000000..76bab58f --- /dev/null +++ b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_config.py @@ -0,0 +1,279 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Indra +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utility class to get configuration information in vcenter +It should be used to get information about datacenters, datastore, +clusters and other configuration objects in vcenter +""" +import logging + +from osm_ro_plugin import vimconn +from osm_rovim_vcenter import vcenter_util as vcutil +from pyVmomi import vim + + +DEFAULT_BASE_FOLDER_NAME = "OSM" +DEFAULT_IMAGES_FOLDER_NAME = "OSM-Images" +DEFAULT_INSTANCES_FOLDER_NAME = "OSM-Instances" + + +class VCenterConfig: + """ + Class used to handle vcenter configuration, used to recover + basic objects information: cluster, datastore, etc + """ + + def __init__( + self, + availability_zones, + tenant_id, + tenant_name, + distributed_switches_names=None, + datastore_name=None, + log_level=None, + ): + + if isinstance(availability_zones, str): + self.availability_zones_names = [availability_zones] + else: + self.availability_zones_names = availability_zones + + self.distributed_switches_names = None + if isinstance(availability_zones, str): + self.distributed_switches_names = [distributed_switches_names] + else: + self.distributed_switches_names = distributed_switches_names + + self.datastore_name = datastore_name + self.tenant_id = tenant_id + self.tenant_name = tenant_name + self.datacenter_name = None + + # Initialize vim availability zones to None, it will be set the first time it + # is recovered + self.vim_availability_zones = None + + # Configuration of folders + self.base_folder_name = DEFAULT_BASE_FOLDER_NAME + self.images_folder_name = DEFAULT_IMAGES_FOLDER_NAME + self.instances_folder_name = DEFAULT_INSTANCES_FOLDER_NAME + + self.logger = logging.getLogger("ro.vim.vcenter.config") + if log_level: + self.logger.setLevel(getattr(logging, log_level)) + + def get_dvs_names(self, session): + """ + Obtains distributed switches names, in case it is configured just returns the list + If distributed switches names is not configured then it recovers distributed switches + names from the distributed switches available for the cluster list + """ + dvs_names = self.distributed_switches_names + if not dvs_names: + self.logger.debug( + "Recover distributed switches names from cluster configuration" + ) + self.logger.warning( + "Method to get distributed switches names from cluster not " + "implemented" + ) + dvs_names = [] + return dvs_names + + def get_images_folder(self, session): + """ + Obtain OSM images folder + """ + # Improvement: - take into account the tenant_id + base_folder = vcutil.get_vcenter_folder(session, self.base_folder_name) + if not base_folder: + raise vimconn.VimConnNotFoundException( + "base folder for current tenant not found" + ) + + # Get images folder (inside the osm base folder) + images_folder = vcutil.get_vcenter_folder( + session, self.images_folder_name, base_folder + ) + if not images_folder: + raise vimconn.VimConnNotFoundException( + "images folder for current tenant not found" + ) + + return images_folder + + def get_instances_folder(self, session): + """ + Obtain OSM instances folder + """ + osm_base_folder = vcutil.get_vcenter_folder(session, self.base_folder_name) + if not osm_base_folder: + raise vimconn.VimConnNotFoundException( + f"base folder name {osm_base_folder} for current tenant not found" + ) + + # Get instances folder (inside the osm base folder) + base_vms_folder = self._get_or_create_child_folder( + osm_base_folder, self.instances_folder_name + ) + + # For each tenant there will be a subfolder + instances_folder = self._get_or_create_child_folder( + base_vms_folder, self.tenant_name + ) + + return instances_folder + + def _get_or_create_child_folder(self, vm_base_folder, child_folder_name): + + # Check if the folder already exists + child_folder = None + for child in vm_base_folder.childEntity: + if isinstance(child, vim.Folder) and child.name == child_folder_name: + child_folder = child + break + + if not child_folder: + # Create a new folder + child_folder = vm_base_folder.CreateFolder(child_folder_name) + self.logger.debug("Folder '%s' created successfully", child_folder) + + return child_folder + + def get_datastore(self, session): + """ + Get the datastore from the configuration if one datastore is configured, otherwise get + from the image + """ + datastore = None + + datastore = vcutil.get_vcenter_obj( + session, [vim.Datastore], self.datastore_name + ) + if not datastore: + raise vimconn.VimConnNotFoundException( + f"Datastore with name: {self.datastore_name} not found" + ) + + return datastore + + def get_datacenter_name(self, session): + """ + Obtains the datacenter name, this data is cached + """ + if not self.datacenter_name: + self.datacenter_name = self._get_datacenter_from_datastore(session) + return self.datacenter_name + + def _get_datacenter_from_datastore(self, session): + datacenter_name = None + + # Create a view of all datastores + content = session.RetrieveContent() + container = content.viewManager.CreateContainerView( + content.rootFolder, [vim.Datastore], True + ) + datastores = container.view + + for datastore in datastores: + if datastore.name == self.datastore_name: + # Traverse up the hierarchy to find the datacenter + parent = datastore.parent + while parent and not isinstance(parent, vim.Datacenter): + parent = parent.parent + if isinstance(parent, vim.Datacenter): + datacenter_name = parent.name + break # Return the datacenter name and exit the loop + container.Destroy() + + # Raise exception if no datacenter was found + if datacenter_name is None: + raise vimconn.VimConnException("Unable to find datacenter") + return datacenter_name + + def get_cluster_rp_from_av_zone( + self, session, availability_zone_index, availability_zone_list + ): + """ + Gets the resource pool and cluster corresponding to the indicated avzone + """ + + # get the availability zone from configuration + avzone_name = self.availability_zones_names[0] + return self._get_resource_pool_cluster_from_av_zone(session, avzone_name) + + def _get_resource_pool_cluster_from_av_zone(self, session, avzone_name): + self.logger.debug("Search availability_zone name: %s", avzone_name) + # We have an availability zone that can correspond to a resource pool or to a cluster + # If it is a resource pool will find a cluster associated + # If it is a cluster will get the first resource pool associated + + # Check if there is a resource group with this name + resource_pool = self._get_resource_pool(session, avzone_name) + + if resource_pool: + cluster = self._get_cluster_from_resource_pool(session, resource_pool) + if not cluster: + raise vimconn.VimConnNotFoundException( + "unable to find cluster for resource pool" + f"name : {resource_pool.name}" + ) + else: + # Check if there is a cluster with this name + cluster = self._get_vcenter_cluster(session, avzone_name) + if not cluster: + raise vimconn.VimConnNotFoundException( + f"Unable to find either cluster or resource pool with name {avzone_name}" + ) + + # Obtain resource pool for cluster + resource_pool = cluster.resourcePool + + self.logger.debug( + "Recovered cluster name: %s and resource_pool: %s", + cluster.name, + resource_pool.name, + ) + return cluster, resource_pool + + def _get_cluster_from_resource_pool(self, server_instance, resource_pool): + cluster = None + + parent = resource_pool.parent + while parent: + if isinstance(parent, vim.ClusterComputeResource): + cluster = parent + self.logger.debug( + "Recovered cluster name: %s for resouce pool: %s", + cluster.name, + resource_pool.name, + ) + break + elif isinstance(parent, vim.ClusterComputeResource): + self.logger.warning("Parent is a host not a cluster") + cluster = parent + else: + parent = parent.parent + + return cluster + + def _get_resource_pool(self, session, resource_pool_name): + return vcutil.get_vcenter_obj(session, [vim.ResourcePool], resource_pool_name) + + def _get_vcenter_cluster(self, server_instance, cluster_name): + return vcutil.get_vcenter_obj( + server_instance, [vim.ClusterComputeResource], cluster_name + ) diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_ipmanager.py b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_ipmanager.py new file mode 100644 index 00000000..b322107e --- /dev/null +++ b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_ipmanager.py @@ -0,0 +1,365 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Indra +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utility class to get and the the information about ip address +""" +import ipaddress +import logging +import os +import re +import ssl + +from osm_ro_plugin import vimconn +from osm_rovim_vcenter import vcenter_util as vcutil +from osm_rovim_vcenter.nsx_client import NsxClient +from osm_rovim_vcenter.vcenter_config import VCenterConfig +from osm_rovim_vcenter.vcenter_network import VCenterNetworkUtil +from osm_rovim_vcenter.vcenter_vms import VCenterVmsUtil +from pyVim.connect import Disconnect, SmartConnect +from pyVmomi import vim + + +class VCenterIpManager: + """ + Helper class to deal with setting and recovering fixed ip addresses + """ + + def __init__( + self, + vc_netutil: VCenterNetworkUtil, + nsx_url=None, + nsx_user=None, + nsx_password=None, + nsx_verify_ssl=False, + log_level=None, + dhcp_configure_always=False, + ): + self.logger = logging.getLogger("ro.vim.vcenter.network") + if log_level: + self.logger.setLevel(getattr(logging, log_level)) + + self.vc_netutil = vc_netutil + self.dhcp_configure_always = dhcp_configure_always + + self.nsx_url = nsx_url + self.nsx_user = nsx_user + self.nsx_password = nsx_password + self.nsx_verify_ssl = nsx_verify_ssl + + self.nsx_client = None + self.logger.debug( + "Nsx url: %s, nsx_user: %s, nsx_password: %s", + self.nsx_url, + self.nsx_url, + self.nsx_password, + ) + if self.nsx_url and self.nsx_user and self.nsx_password: + self.logger.debug("Configure nsx client") + self.nsx_client = NsxClient( + nsx_url, + nsx_user, + nsx_password, + verify_ssl=self.nsx_verify_ssl, + log_level=log_level, + ) + + self.logger = logging.getLogger("ro.vim.vcenter.network") + if log_level: + self.logger.setLevel(getattr(logging, log_level)) + + def get_vm_ips(self, session, vm): + """ + Obtain using vmware tool the ips for the provided vm + """ + self.logger.debug("Obtain vm fixed ips configuration for vm name: %s", vm.name) + + mac_ips_dict = {} + + if vm.guest.toolsRunningStatus != "guestToolsRunning": + self.logger.warning( + "Unable to get vm ips for vm name: '%s' as vm tools is not running", + vm.name, + ) + else: + if vm.guest.net: + for nic in vm.guest.net: + if nic.macAddress and nic.ipAddress: + mac_ips_dict[nic.macAddress] = nic.ipAddress + return mac_ips_dict + + def set_vm_ips(self, session, vm_name, vm, net_list): + """ + Set the vm fixed ip address using vmware tools, the subnet information (gateway, network + mask, dns, etc...) is obtained querying the NSX + """ + self.logger.debug( + "Set ip address for vm name: %s, net_list: %s", vm.name, net_list + ) + + # 1 - Check data, check if need to set some fixed ip address + # Obtain interfaces with ip_addresses to set + nets_fixed_ip = { + net["net_id"]: net for net in net_list if net.get("ip_address") + } + if nets_fixed_ip: + # Must set some fixed ip, check nsx configuration is provided + + # Check nsx client is configured, only nsx networks are supported: + # it is needed to obtain subnet parameters and + # only obtaining them by nsx is supported + if not self.nsx_client: + raise vimconn.VimConnException( + "Manual ip assigment can not be done as nsx configuration is not provided" + ) + else: + # There are not fixed ips to set, if configure to set dhcp configuration do it + # otherwise return + if not self.dhcp_configure_always: + self.logger.debug( + "There are not ip fixed address to configure and " + "dhcp_configure_always:%s", + self.dhcp_configure_always, + ) + return + + # 2 - Check vmware tools are installed + if vm.guest.toolsStatus in ["toolsNotInstalled", None]: + raise vimconn.VimConnException( + "VMware Tools is not installed or not detected. To assign fixed ip it is required." + ) + + # 3 - Iterate network interfaces and configure ip assignment for each interface + custom_spec = vim.vm.customization.Specification() + custom_spec.nicSettingMap = [] + + subnet_params_dict = {} + dns_servers = None + + for device in vm.config.hardware.device: + if isinstance(device, vim.vm.device.VirtualEthernetCard): + net = self._get_net_with_mac(net_list, device.macAddress) + + if net.get("ip_address"): + subnets = self._get_subnets_for_net_id( + session, subnet_params_dict, net.get("net_id") + ) + self.logger.debug("Subnets info obtained for net_id: %s", subnets) + + # Update ip addresses + fixed_ip_dict = self._prepare_fixed_ip_dics( + net.get("ip_address"), subnets + ) + if not dns_servers: + dns_servers = fixed_ip_dict.get("dns_servers") + self.logger.debug("Fixed ip dict: %s", fixed_ip_dict) + + self._update_nic_fixedip_address_spec( + custom_spec, net.get("mac_address"), fixed_ip_dict + ) + + else: + self._update_nic_dhcp_spec(custom_spec, device.macAddress) + + # Update vm configuration + self._customize_ip_address(vm_name, vm, custom_spec, dns_servers) + + @staticmethod + def _get_net_with_mac(net_list, mac_address): + net = None + for net in net_list: + if net.get("mac_address") == mac_address: + return net + if not net: + raise vimconn.VimConnException( + f"Unable to find net with previously asigned mac address: {mac_address}" + ) + + def _get_subnets_for_net_id(self, session, subnets_params_dic, net_id): + """ + Obtains subnet network parameters + """ + subnets = subnets_params_dic.get(net_id) + + if not subnets: + # Obtain network using network id + self.logger.debug("Obtain network with net_id: %s", net_id) + network = self.vc_netutil.get_network_by_id(session, net_id) + self.logger.debug("Network revovered: %s", network) + + # Network recovered, do not have to check types because only distributed port groups + # are supported so far + if network.config.backingType == "nsx": + # Obtain subnet parameters for network + segment_path = network.config.segmentId + self.logger.debug( + "Obtain subnet parameters for nsx segment path: %s", segment_path + ) + subnets = self.nsx_client.get_nsx_segment_dhcp_config(segment_path) + subnets_params_dic[net_id] = subnets + else: + raise vimconn.VimConnException( + f"Network with id: {net_id} is not a backed nsx " + "network and assigning fixed ip address is not supported" + ) + + return subnets + + def _prepare_fixed_ip_dics(self, ip_address, subnets): + # Improvement - check if it should be done something else of more that one subnet is + # supported for one segment + fixed_ip_dict = {"ip_address": ip_address} + subnet = subnets[0] + gateway = str(ipaddress.IPv4Interface(subnet.get("gateway_address")).ip) + subnet_mask = str( + ipaddress.IPv4Network(subnet.get("network"), strict=False).netmask + ) + fixed_ip_dict["gateway"] = gateway + fixed_ip_dict["subnet_mask"] = subnet_mask + + dns_servers = subnet.get("dhcp_config", {}).get("dns_servers", []) + fixed_ip_dict["dns_servers"] = dns_servers + return fixed_ip_dict + + def _update_nic_fixedip_address_spec(self, custom_spec, mac_address, fixed_ip_dics): + + # Create a Fixed IP object + fixed_ip = vim.vm.customization.FixedIp( + ipAddress=fixed_ip_dics.get("ip_address") + ) + + adapter_mapping = vim.vm.customization.AdapterMapping() + adapter_mapping.adapter = vim.vm.customization.IPSettings( + ip=fixed_ip, + subnetMask=fixed_ip_dics.get("subnet_mask"), + gateway=fixed_ip_dics.get("gateway"), + ) + adapter_mapping.macAddress = mac_address + custom_spec.nicSettingMap.append(adapter_mapping) + + def _update_nic_dhcp_spec(self, custom_spec, mac_address): + adapter_mapping = vim.vm.customization.AdapterMapping() + adapter_mapping.adapter = vim.vm.customization.IPSettings( + ip=vim.vm.customization.DhcpIpGenerator() + ) + adapter_mapping.macAddress = mac_address + custom_spec.nicSettingMap.append(adapter_mapping) + + def _customize_ip_address(self, vm_name, vm, custom_spec, dns_servers): + # Check the vm name + name = self._sanitize_vm_name(vm_name) + + # Optionally configure the hostname + identity = vim.vm.customization.LinuxPrep( + domain="domain.local", hostName=vim.vm.customization.FixedName(name=name) + ) + custom_spec.identity = identity + + global_ip_settings = vim.vm.customization.GlobalIPSettings() + if dns_servers: + global_ip_settings = vim.vm.customization.GlobalIPSettings( + dnsServerList=dns_servers + ) + custom_spec.globalIPSettings = global_ip_settings + + customize_task = vm.CustomizeVM_Task(spec=custom_spec) + vcutil.wait_for_task(customize_task) + self.logger.debug("VM spec updated") + + def _sanitize_vm_name(self, vm_name): + corrected_vm_name = vm_name.replace("_", "-")[:63] + if not re.match(r"^[a-zA-Z0-9-]+$", corrected_vm_name): + raise vimconn.VimConnException(f"Invalid hostname: {corrected_vm_name}") + return corrected_vm_name + + +if __name__ == "__main__": + # Init logger + log_format = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(funcName)s(): %(message)s" + logging.basicConfig( + level=logging.DEBUG, # Set the logging level + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", # Set the log message format + datefmt="%Y-%m-%dT%H:%M:%S", + handlers=[ + logging.StreamHandler(), # Log to the console + ], + ) + logger = logging.getLogger("ro.vim.vmware.test_nsx") + logger.setLevel(level=logging.DEBUG) + + test_nsx_url = os.getenv("NSX_URL") + test_nsx_user = os.getenv("NSX_USER") + test_nsx_password = os.getenv("NSX_PASSWORD") + + vcnet_util = VCenterNetworkUtil(log_level="DEBUG") + vc_ipmanager = VCenterIpManager( + vc_netutil=vcnet_util, + nsx_url=test_nsx_url, + nsx_user=test_nsx_user, + nsx_password=test_nsx_password, + log_level="DEBUG", + ) + + vcenter_cluster = os.getenv("TEST_CLUSTER_NAME") + VCENTER_TENANT_ID = "default" + VCENTER_TENANT_NAME = "default" + vc_config = VCenterConfig( + availability_zones=vcenter_cluster, + tenant_id=VCENTER_TENANT_ID, + tenant_name=VCENTER_TENANT_NAME, + log_level="DEBUG", + ) + + vcenter_cert_path = os.getenv("VCENTER_CERT_PATH") + vcenter_host = os.getenv("VCENTER_SERVER") + vcenter_user = os.getenv("VCENTER_USER") + vcenter_password = os.getenv("VCENTER_PASSWORD") + ssl_context = ssl.create_default_context(cafile=vcenter_cert_path) + test_session = SmartConnect( + host=vcenter_host, + user=vcenter_user, + pwd=vcenter_password, + port=443, + sslContext=ssl_context, + ) + logger.debug("Connected to vcenter") + + try: + # Obtain a vm + vc_vmsutil = VCenterVmsUtil(vcenter_config=vc_config, log_level="DEBUG") + + # Test set ips + """ + #vm = vc_vmsutil.get_vm_by_uuid(session, "5035b827-e3c4-1ca4-b689-9fadb1cc78d7") + vm = vc_vmsutil.get_vm_by_uuid(session, "5035f893-c302-08e3-8465-345165aaf921") + logger.debug("Vm recovered") + net_list = [ + {'name': 'eth0', 'net_id': 'vim.dvs.DistributedVirtualPortgroup:dvportgroup-44614', + 'type': 'SRIOV', 'use': 'data'}, + {'name': 'eth1', 'net_id': 'vim.dvs.DistributedVirtualPortgroup:dvportgroup-47674', + 'type': 'virtual', 'use': 'data', 'ip_address': '192.168.228.23'} + ] + vc_ipmanager.set_vm_ips(session, vm, net_list) + """ + + # Test get ips + test_vm = vc_vmsutil.get_vm_by_uuid( + test_session, "50359c0a-41ee-9afc-d21b-e398b8ac1d64" + ) + mac_ips = vc_ipmanager.get_vm_ips(test_session, test_vm) + logger.debug("Ip address for vm mac address: %s", mac_ips) + finally: + Disconnect(test_session) + logger.debug("Disconnected to vcenter") diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_network.py b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_network.py new file mode 100644 index 00000000..7a256bd9 --- /dev/null +++ b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_network.py @@ -0,0 +1,281 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Indra +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utility class to get networks information in vcenter +""" +import logging + +from osm_ro_plugin import vimconn +from osm_rovim_vcenter import vcenter_util as vcutil +from pyVmomi import vim + +DISTRIBUTED_PORTGROUP_KEY_PREFIX = "vim.dvs.DistributedVirtualPortgroup:" + + +class VCenterNetworkUtil: + """ + Helper class to deal with vcenter networks + """ + + def __init__(self, log_level=None): + self.logger = logging.getLogger("ro.vim.vcenter.network") + if log_level: + self.logger.setLevel(getattr(logging, log_level)) + + def get_dvs_list(self, session, dvs_names): + """ + Obtains distributed switches with the provided distributed switches names + """ + self.logger.debug("Get dvs for dvs_names: %s", dvs_names) + dvs = [] + content = vcutil.get_vcenter_content(session) + container = content.viewManager.CreateContainerView( + content.rootFolder, [vim.DistributedVirtualSwitch], True + ) + for dswitch in container.view: + if dswitch.name in dvs_names: + dvs.append(dswitch) + return dvs + + def get_port_groups_by_dvs_name(self, session, dvs_names): + """ + Obtains distributed port groups for the indicated distributed switches + """ + self.logger.debug("Get port groups for dvs_names: %s", dvs_names) + dport_groups = [] + content = vcutil.get_vcenter_content(session) + container = content.viewManager.CreateContainerView( + content.rootFolder, [vim.DistributedVirtualSwitch], True + ) + for dswitch in container.view: + if dswitch.name in dvs_names: + for portgroup in dswitch.portgroup: + dport_groups.append(portgroup) + return dport_groups + + def find_port_group_by_name_dvs(self, session, dvs, port_group_name): + """ + Obtains the distributed port group with the provided name searching in the distributed + virtual switch dvs + """ + port_group = None + + for pg in dvs.portgroup: + if pg.name == port_group_name: + port_group = pg + + if not port_group: + raise vimconn.VimConnNotFoundException( + f"Distributed port group with name: {port_group_name} not found" + ) + + return port_group + + def get_network_by_id(self, session, net_id): + """ + Obtains a pyvmomi network instance object by id + Currently only obtains distributed port group + """ + if net_id.startswith(DISTRIBUTED_PORTGROUP_KEY_PREFIX): + pg_key = net_id.removeprefix(DISTRIBUTED_PORTGROUP_KEY_PREFIX) + pg = self._get_portgroup_by_key(session, pg_key) + return pg + else: + self.logger.error( + "Network: %s is not a distributed port group, currently not supported", + net_id, + ) + raise vimconn.VimConnNotFoundException( + f"Network: {net_id} is not a distributed port group, currently not supported" + ) + + def get_vim_network_by_id(self, session, net_id): + """ + Obtains a vim network from vim_id + """ + if net_id.startswith(DISTRIBUTED_PORTGROUP_KEY_PREFIX): + pg_key = net_id.removeprefix(DISTRIBUTED_PORTGROUP_KEY_PREFIX) + pg = self._get_portgroup_by_key(session, pg_key) + return self.get_vim_network_from_pg(pg) + else: + self.logger.error( + "Network: %s is not a distributed port group, currently not supported", + net_id, + ) + raise vimconn.VimConnNotFoundException( + f"Network: {net_id} is not a distributed port group, currently not supported" + ) + + def _get_portgroup_by_key(self, session, key): + """ + Obtains a distributed port group with the indicated key + """ + port_group = None + + content = vcutil.get_vcenter_content(session) + container = content.viewManager.CreateContainerView( + content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True + ) + for pg in container.view: + if pg.key == key: + port_group = pg + if not port_group: + raise vimconn.VimConnNotFoundException( + f"Portgroup with key: {key} not found" + ) + else: + return port_group + + def get_vim_network_from_pg(self, portgroup): + """ + Obtains a vim network object from a distributed port group + """ + port_number = portgroup.config.numPorts + binding_type = portgroup.config.type + backing_type = portgroup.config.backingType + + # Get VLAN Information + vlan_spec = portgroup.config.defaultPortConfig.vlan + vlan_id = None + if isinstance(vlan_spec, vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec): + vlan_id = vlan_spec.vlanId + elif isinstance( + vlan_spec, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec + ): + vlan_id = [(vlan.start, vlan.end) for vlan in vlan_spec.vlanId] + + vim_network = { + "id": DISTRIBUTED_PORTGROUP_KEY_PREFIX + portgroup.key, + "name": portgroup.name, + # There is no functionaly in vcenter to check if a network is active + "port_number": port_number, + "binding_type": binding_type, + "vlan_id": vlan_id, + "net_backing_type": backing_type, + } + return vim_network + + def get_dvs(self, session, dvs_name): + """ + Obtains a distributed virtual switch using its name + """ + dvs = vcutil.get_vcenter_obj(session, [vim.DistributedVirtualSwitch], dvs_name) + if not dvs: + raise vimconn.VimConnNotFoundException( + f"Distributed virtual switch with name: {dvs_name} not found" + ) + return dvs + + def create_distributed_port_group( + self, session, port_group_name, dvs_name, vlan=None + ): + """ + Creates a distributed port group with name port_group_name in the + distributed_virtual_switch named dvs_name + """ + try: + # Obtain dvs with name dvs_name + dvs = self.get_dvs(session, dvs_name) + + # Create portgroup + port_group_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec() + port_group_spec.name = port_group_name + port_group_spec.type = ( + vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding + ) + + if vlan: + vlan_spec = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec() + vlan_spec.vlanId = vlan + vlan_spec.inherited = False # Ensure it's explicitly set + port_group_spec.defaultPortConfig = ( + vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy() + ) + port_group_spec.defaultPortConfig.vlan = vlan_spec + + task = dvs.AddDVPortgroup_Task([port_group_spec]) + vcutil.wait_for_task(task) + self.logger.debug( + "Distributed port group with name: %s created", port_group_name + ) + + # Obtain portgroup created and return it + port_group = self.find_port_group_by_name_dvs(session, dvs, port_group_name) + net_key = DISTRIBUTED_PORTGROUP_KEY_PREFIX + port_group.key + + return net_key, port_group + except vim.fault.DuplicateName as e: + self.logger.error( + f"Distributed port group with name: {port_group_name} already exists", + exc_info=True, + ) + raise vimconn.VimConnConflictException( + f"Distributed port group with name: {port_group_name} already exists" + ) from e + + def delete_distributed_port_group(self, port_group): + """ + Deletes the indicated distributed port group + """ + self.logger.debug("Delete distributed port group key: %s", port_group.key) + task = port_group.Destroy_Task() + vcutil.wait_for_task(task) + self.logger.debug("Distributed port group deleted") + + def is_distributed_port_group(self, net_id): + """ + Checks if the net with net_id is a distributed port group + """ + if net_id.startswith(DISTRIBUTED_PORTGROUP_KEY_PREFIX): + return True + else: + return False + + def get_distributed_port_connected_vms(self, port_group): + """ + Obtains the vms connected to the provided distributed port group + """ + vms = [] + for vm in port_group.vm: + vms.append(vm) + return vms + + def is_nsx_port_group(self, port_group): + """ + Check if the distributed port group backing type is nsx + """ + if port_group.config.backingType == "nsx": + return True + else: + return False + + def _get_distributed_port_group(self, session, portgroup_key): + portgroup = None + content = vcutil.get_vcenter_content(session) + container = content.viewManager.CreateContainerView( + content.rootFolder, [vim.DistributedVirtualSwitch], True + ) + for dswitch in container.view: + for pg in dswitch.portgroup: + if pg.key == portgroup_key: + portgroup = pg + if portgroup: + break + + if not portgroup: + raise vimconn.VimConnNotFoundException( + f"unable to find portgroup key: {portgroup_key}" + ) diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_util.py b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_util.py new file mode 100644 index 00000000..6a997f41 --- /dev/null +++ b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_util.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Indra +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utility class with helper methods to deal with vcenter +""" +import logging +import time + +from osm_ro_plugin import vimconn +from pyVmomi import vim +import requests + + +def get_vcenter_content(session): + """ + Obtains the vcenter content object + """ + return session.RetrieveContent() + + +def get_vcenter_obj(session, vim_type, name, folder=None): + """ + Get the vSphere object associated with a given text name + """ + obj = None + + content = get_vcenter_content(session) + if not folder: + folder = content.rootFolder + + container = content.viewManager.CreateContainerView(folder, vim_type, True) + for c in container.view: + if c.name == name: + obj = c + break + container.Destroy() + return obj + + +def get_vcenter_folder(server_instance, folder_name, base_folder=None): + """ + Obtains the vcenter folder object with the provided folder_name + """ + return get_vcenter_obj(server_instance, [vim.Folder], folder_name, base_folder) + + +def wait_for_task(task): + """Wait for a task to complete and handle any errors.""" + if task: + while task.info.state not in [ + vim.TaskInfo.State.success, + vim.TaskInfo.State.error, + ]: + time.sleep(1) + if task.info.state == vim.TaskInfo.State.success: + return task.info.result + else: + raise task.info.error # Raise the specific exception + + +def wait_for_tasks(tasks): + """Wait until all tasks in the list are finished. If any task fails, raise an error.""" + while any(task.info.state not in ["success", "error"] for task in tasks): + time.sleep(2) + + for task in tasks: + if task.info.state == "error": + raise task.info.error + + +class VCenterFileUploader: + """ + Helper class to upload files to vcenter + """ + + def __init__( + self, + host, + port, + user, + password, + ca_cert_path, + log_level=None, + default_timeout=None, + ): + self.logger = logging.getLogger("ro.vim.vcenter.util") + if log_level: + self.logger.setLevel(getattr(logging, log_level)) + + self.host = host + self.port = port + self.user = user + self.password = password + self.ssl_verify = False + if ca_cert_path: + self.ssl_verify = ca_cert_path + + self.default_timeout = default_timeout or 30 + + def upload_file( + self, + local_file_path, + datacenter_name, + datastore_name, + folder_name, + file_name, + timeout=None, + ): + """ + Upload local file to a vmware datastore into the indicated folder + and with the indicated name + """ + timeout = timeout or self.default_timeout + self.logger.debug( + "Upload file %s to datastore %s, folder %s, timeout %s", + local_file_path, + datastore_name, + folder_name, + timeout, + ) + + upload_path = f"/folder/{folder_name}/{file_name}" + url = f"https://{self.host}:{self.port}{upload_path}?dcPath={datacenter_name}&dsName={datastore_name}" + self.logger.debug("Upload file to url: %s", url) + + with open(local_file_path, "rb") as file: + headers = {"Content-Type": "application/octet-stream"} + response = requests.put( + url, + headers=headers, + auth=(self.user, self.password), # Basic authentication + data=file, + verify=self.ssl_verify, + timeout=timeout, + ) + + self.logger.debug( + "Response code: %s, text: %s", response.status_code, response.text + ) + if response.status_code not in (200, 201): + self.logger.error( + "Error uploading file error_code: %s, text: %s", + response.status_code, + response.text, + ) + raise vimconn.VimConnException( + f"Error uploading file error_code: {response.status_code}, text {response.textt}" + ) + else: + self.logger.debug("ISO File updated successfully") diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_vms.py b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_vms.py new file mode 100644 index 00000000..9ee99c0e --- /dev/null +++ b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_vms.py @@ -0,0 +1,1147 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Indra +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Utility class to deal with vms in vcenter +""" +import logging +from urllib.parse import quote, unquote + +from osm_ro_plugin import vimconn +from osm_rovim_vcenter import vcenter_network as vcnetwork +from osm_rovim_vcenter import vcenter_util as vcutil +from osm_rovim_vcenter.vcenter_config import VCenterConfig +from osm_rovim_vcenter.vcenter_network import VCenterNetworkUtil +from pyVmomi import vim + +vmPowerState2osm = { + "poweredOff": "INACTIVE", + "poweredOn": "ACTIVE", + "suspended": "PAUSED", + "other": "OTHER", +} + +# keys for flavor dict +FLAVOR_RAM_KEY = "ram" +FLAVOR_VCPUS_KEY = "vcpus" +FLAVOR_DISK_KEY = "disk" + +# Maximum number of devices of an scsi controller +SCSI_CONTROLLER_MAX_DEVICES = 16 + + +class VCenterVmsUtil: + """ + Utility class to get information about vms + """ + + def __init__(self, vcenter_config: VCenterConfig, log_level=None): + self.vcenter_config = vcenter_config + + self.logger = logging.getLogger("ro.vim.vcenter.vms") + if log_level: + self.logger.setLevel(getattr(logging, log_level)) + + def list_images(self, session, filter_dict=None): + """ + Obtain images from tenant images folder + """ + filter_dict = filter_dict or {} + + # Obtain images folder + images_folder = self.vcenter_config.get_images_folder(session) + + # List images in folder + image_list = [] + vm_images = self._list_vms(session, images_folder, filter_dict) + for image in vm_images: + image_list.append( + { + "id": image.config.instanceUuid, + "name": image.name, + "moref": image._moId, + } + ) + + return image_list + + def _list_vms(self, session, folder=None, filter_dict=None): + """ + Lists vms in a folder, supported filter id (vcenter instanceUuid) and name + """ + self.logger.debug("List vms for the folder: %s", folder) + vms = [] + filter_dict = filter_dict or {} + + content = vcutil.get_vcenter_content(session) + if not folder: + self.logger.debug("Folder is not provided, search from root folder") + folder = content.rootFolder + + container = content.viewManager.CreateContainerView( + folder, [vim.VirtualMachine], True + ) + for vm in container.view: + if filter_dict: + if ( + filter_dict.get("id") + and str(vm.config.instanceUuid) != filter_dict["id"] + ): + continue + + if filter_dict.get("name") and str(vm.name) != filter_dict["name"]: + continue + + vms.append(vm) + + return vms + + def get_vm_by_uuid(self, session, vm_id): + """ + Obtains vm by its uuid + """ + search_index = session.content.searchIndex + vm = search_index.FindByUuid(None, vm_id, True, True) + if vm: + return vm + else: + raise vimconn.VimConnNotFoundException(f"Vm with id: {vm_id} not found") + + def get_image_by_uuid(self, session, image_id): + """ + Obtains an image from its uuid, today just gets a vm, will leave it this way to be + able to change it in the future if needed + """ + return self.get_vm_by_uuid(session, image_id) + + @staticmethod + def get_vim_vm_basic(vm): + """ + Creates an object with the vm basic info in the vim format from the vcenter vm data + """ + vim_vm = { + "id": vm.config.instanceUuid, + "name": vm.name, + "moref": vm._moId, + "status": vmPowerState2osm.get(vm.runtime.powerState, "other"), + } + return vim_vm + + def get_vm_nics_list(self, vm): + """ + Gets the list of nics for the provided vm and its associated info (dict) + """ + interfaces_info = [] + for device in vm.config.hardware.device: + if isinstance(device, vim.vm.device.VirtualEthernetCard): + interface = {} + interface["vim_interface_id"] = device.key + interface["mac_address"] = device.macAddress + + # Obtain net_id + if isinstance( + device.backing, + vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo, + ): + + interface["port_id"] = device.backing.port.portKey + interface["vim_net_id"] = ( + vcnetwork.DISTRIBUTED_PORTGROUP_KEY_PREFIX + + device.backing.port.portgroupKey + ) + interface["switch_uuid"] = device.backing.port.switchUuid + else: + self.logger.warning( + "nic device type not supported yet %s", {type(device).__name__} + ) + + # Complete values for vim_info, info from the data + vim_info = {} + vim_info["key"] = device.key + vim_info["controllerKey"] = device.controllerKey + vim_info["wakeOnLanEnabled"] = device.wakeOnLanEnabled + if device.deviceInfo: + vim_info["label"] = device.deviceInfo.label + vim_info["summary"] = device.deviceInfo.summary + + interfaces_info.append(interface) + + return interfaces_info + + def delete_vm(self, session, vm_id): + """ + Deletes the vm with the indicated instanceUuid, to delete must obtain a refreshed vm + """ + vm = self.get_vm_by_uuid(session, vm_id) + + if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: + powerof_task = vm.PowerOffVM_Task() + vcutil.wait_for_task(powerof_task) + + destroy_task = vm.Destroy_Task() + vcutil.wait_for_task(destroy_task) + self.logger.debug("vm id: %s deleted", vm_id) + + def get_vm_cluster(self, session, vm): + """ + Obtains the cluster associated to a vm + """ + host = vm.runtime.host + cluster = host.parent + return cluster + + def start_vm(self, vm): + """ + Starts the provided vm + """ + if vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOn: + task = vm.PowerOn() + return task + else: + self.logger.warning("WARN : Instance is already started") + return None + + def stop_vm(self, vm): + """ + Stops the provided vm + """ + if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: + task = vm.PowerOff() + return task + else: + self.logger.warning("WARN : Instance is not in Active state") + return None + + def unattach_volumes(self, session, vm, volumes): + """ + Unattach the indicated volumes, volumes includes the volume_path quoted + """ + self.logger.debug("Volumes to unattach: %s", volumes) + + volumes_to_unattach = self._get_devices_from_volume_list(vm, volumes) + + # Unattach devices + self._unattach_devices(session, vm, volumes_to_unattach) + + def _get_devices_from_volume_list(self, vm, volumes): + + # The list of volumes is identified by the file path encoded, unencode the list + volume_file_paths = [unquote(volume_id) for volume_id in volumes] + self.logger.debug("Volume file paths: %s", volume_file_paths) + + # Obtain the devices to unattach + volumes_to_unattach = [] + for volume_path in volume_file_paths: + # Flag to check if volume is found + found = False + + # Iterate over devices in the VM + for device in vm.config.hardware.device: + # Check if the device is a VirtualDisk and its backing file matches the volume path + if ( + isinstance(device, vim.vm.device.VirtualDisk) + and hasattr(device.backing, "fileName") + and device.backing.fileName == volume_path + ): + volumes_to_unattach.append(device) + found = True + break # Exit the inner loop as the volume is found + + # Log a warning if volume is not found + if not found: + self.logger.warning( + "Volume path '%s' not found in VM device list.", volume_path + ) + + return volumes_to_unattach + + def _unattach_devices(self, session, vm, device_list): + """ + Unattach the indicated list of devices + """ + if device_list: + change_spec = vim.vm.ConfigSpec() + change_spec.deviceChange = [] + + for device in device_list: + device_change = vim.vm.device.VirtualDeviceSpec() + device_change.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.remove + ) + device_change.device = device + change_spec.deviceChange.append(device_change) + + # Reconfigure vm + task = vm.ReconfigVM_Task(spec=change_spec) + vcutil.wait_for_task(task) + self.logger.debug("Devices unattached") + + else: + self.logger.warning("No devices to unattach provided, will do nothing") + + def reconfig_vm(self, session, vm, reconfig_spec): + """ + Reconfigure the indicated vm with the provided reconfigure spec + """ + if reconfig_spec: + # Reconfigure vm + task = vm.ReconfigVM_Task(spec=reconfig_spec) + vcutil.wait_for_task(task) + self.logger.debug("Vm reconfigured") + + def prepare_unattach_volumes(self, vm, volumes, unattach_spec): + """ + Prepares an unattach spec to be able to unattach volumes to keep + """ + self.logger.debug("Prepare unattach volumes: %s", volumes) + unattach_device_list = self._get_devices_from_volume_list(vm, volumes) + + # Prepare unattach spec + unattach_spec = self._prepare_unattach_spec(unattach_spec, unattach_device_list) + + return unattach_spec + + def prepare_unattach_cloudinitiso(self, vm, cloudinitiso_list, unattach_spec): + """ + Prepares an unattach spec to be able to unattach iso + """ + self.logger.debug("Prepare unattach cloudinitiso: %s", cloudinitiso_list) + unattach_device_list = self._get_cdromiso_from_list(vm, cloudinitiso_list) + + # Prepare unattach spec + unattach_spec = self._prepare_unattach_spec(unattach_spec, unattach_device_list) + + return unattach_spec + + def _prepare_unattach_spec(self, change_spec, devices_to_unattach): + # Prepare unattach spec + if not change_spec: + change_spec = vim.vm.ConfigSpec() + change_spec.deviceChange = [] + + for device in devices_to_unattach: + device_change = vim.vm.device.VirtualDeviceSpec() + device_change.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove + device_change.device = device + change_spec.deviceChange.append(device_change) + + return change_spec + + def _get_cdromiso_from_list(self, vm, cloudinitiso_list): + + # The list of volumes is identified by the file path encoded, unencode the list + cloudinitiso_paths = [ + unquote(cloudinitiso) for cloudinitiso in cloudinitiso_list + ] + self.logger.debug("Cloud init iso: %s", cloudinitiso_paths) + + # Obtain the iso cdrom to unattach + devices_to_unattach = [] + for cloudinitiso_file in cloudinitiso_paths: + found = False + + # Iterate over devices in the VM + for device in vm.config.hardware.device: + # Check if the device is a VirtualCdRom and its backing file matches the volume path + if ( + isinstance(device, vim.vm.device.VirtualCdrom) + and hasattr(device.backing, "fileName") + and device.backing.fileName == cloudinitiso_file + ): + devices_to_unattach.append(device) + found = True + break # Exit the inner loop as the volume is found + + # Log a warning if volume is not found + if not found: + self.logger.warning( + "Iso path '%s' not found in VM device list.", cloudinitiso_file + ) + + return devices_to_unattach + + def delete_iso_files(self, session, iso_file_list): + """ + Deletes the file indicated in the isp_file_list, + The file path is quoted and must be unquoted before delete + """ + self.logger.debug("Delete files: %s", iso_file_list) + + isofile_paths = [unquote(cloudinitiso) for cloudinitiso in iso_file_list] + for file_path in isofile_paths: + self.delete_datastore_file(session, file_path) + + def delete_datastore_file(self, session, file_path): + """ + Deletes the file indicated in the file_path + """ + try: + # Retrieve the file manager + self.logger.debug("Delete the file: %s", file_path) + file_manager = session.content.fileManager + + # Get the first datacenter (assuming a single datacenter scenario) + datacenter = session.content.rootFolder.childEntity[0] + + # Start the delete task + task = file_manager.DeleteDatastoreFile_Task( + name=file_path, datacenter=datacenter + ) + vcutil.wait_for_task(task) + self.logger.debug("File deleted") + + except vim.fault.FileNotFound: + # File does not exist + self.logger.warning("File %s does not exist. No action taken.", file_path) + + def _create_cluster_rule(self, session, cluster, rule_name, rule_type, vms): + """ + Creates a cluster rule with the indicated type + Args: + - session: vcenter session + - cluster: cluster where the rule will be created + - rule_name: name of the rule to be created + - rule_type: type of rule, possible values affinity and anti-affinity + - vms: list of vms to be added to the rule + """ + self.logger.debug("Going to create affinity group: %s", rule_name) + + rule_spec = vim.cluster.RuleSpec() + + rule_info = None + if rule_type == "affinity": + rule_info = vim.cluster.AffinityRuleSpec() + elif rule_type == "anti-affinity": + rule_info = vim.cluster.AntiAffinityRuleSpec() + else: + raise vimconn.VimConnException(f"Invalid affinity type: {rule_type}") + + rule_info.enabled = False + rule_info.mandatory = False # get from configuration + rule_info.name = rule_name + rule_info.vm = vms + + rule_spec.info = rule_info + rule_spec.operation = "add" + + rule_config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec]) + + task = cluster.ReconfigureEx(rule_config_spec, modify=True) + vcutil.wait_for_task(task) + self.logger.debug("Affinity group name: %s created", rule_name) + + def _get_cluster_rule_by_name(self, session, cluster, name): + """ + Find a rule by its name. + + Args: + session: The session object (context or connection object). + cluster: The cluster object containing rules. + name (str): The name of the rule to find. + + Returns: + The rule object if found, otherwise None. + """ + self.logger.debug("Find cluster rule with name: %s", name) + rules = cluster.configurationEx.rule + if not rules: + return None + + for rule in rules: + if rule.name == name: + return rule + + return None + + def _add_vm_to_affinity_rule(self, session, cluster, cluster_rule, vm): + """ + Adds a vm to an existing cluster rule + """ + self.logger.debug("Add vm to affinity rule name: %s", cluster_rule.name) + + # Add VM to the Rule + cluster_rule.vm.append(vm) + + # Enable the rule as rules with less that 2 vms must be disabled + if len(cluster_rule.vm) > 1: + cluster_rule.enabled = True + + # Reconfigure the Cluster with the Updated Rule + spec = vim.cluster.ConfigSpecEx() + spec.rulesSpec = [vim.cluster.RuleSpec(operation="edit", info=cluster_rule)] + task = cluster.ReconfigureComputeResource_Task(spec=spec, modify=True) + vcutil.wait_for_task(task) + self.logger.debug("Affinity rule edited successfully.") + + def _delete_cluster_rule(self, session, cluster, affinity_rule): + """ + Delete a cluster rule from a cluster + """ + # Delete the Rule + spec = vim.cluster.ConfigSpecEx() + rule_spec = vim.cluster.RuleSpec( + operation="remove", removeKey=affinity_rule.key + ) + spec.rulesSpec = [rule_spec] + + # Step 4: Reconfigure the Cluster + task = cluster.ReconfigureComputeResource_Task(spec=spec, modify=True) + vcutil.wait_for_task(task) + self.logger.debug("Affinity rule %s deleted.", affinity_rule.name) + + def add_vm_or_create_affinity_group( + self, session, cluster, affinity_group_name, affinity_group_type, vm + ): + """ + Method that manages adding a vm to a cluster rule. If the cluster_rule does + not exist it creates it, otherwise adds the machine to the cluster rule + + Args: + - session + - cluster + - affinity_group_name: Name of the cluster rule to be used + - affinity_group_type + - vm + """ + self.logger.debug( + "Add vm name: %s to affinity_group_name: %s", vm.name, affinity_group_name + ) + + # Find if affinity group exists + affinity_group = self._get_cluster_rule_by_name( + session, cluster, affinity_group_name + ) + + if not affinity_group: + + # If affinity group does not exist create + self._create_cluster_rule( + session, cluster, affinity_group_name, affinity_group_type, [vm] + ) + else: + # Add vm to affinity group + self._add_vm_to_affinity_rule(session, cluster, affinity_group, vm) + + def delete_vm_affinity_rule(self, session, cluster, affinity_rule_name, vm_name): + """ + Removest the machine with the provided name from the cluster affinity rule + with name affinity_rule_name + """ + self.logger.debug( + "Remove vm: %s from affinity rule name: %s", vm_name, affinity_rule_name + ) + + # Find affinity rule + affinity_rule = self._get_cluster_rule_by_name( + session, cluster, affinity_rule_name + ) + if not affinity_rule: + # warning, affinity rule not found, unable to delete, do nothing + self.logger.warning( + "Affinity rule with name: %s not found, unable to delete", + affinity_rule_name, + ) + + else: + found = False + for vm in affinity_rule.vm: + if vm.name == vm_name: + affinity_rule.vm.remove(vm) + found = True + + if found and len(affinity_rule.vm) > 0: + # Reconfigure affinity rule + spec = vim.cluster.ConfigSpecEx() + spec.rulesSpec = [ + vim.cluster.RuleSpec(operation="edit", info=affinity_rule) + ] + task = cluster.ReconfigureComputeResource_Task(spec=spec, modify=True) + vcutil.wait_for_task(task) + self.logger.debug( + "Affinity rule %s edited successfully.", affinity_rule_name + ) + + elif len(affinity_rule.vm) == 0: + # No vms left delete affinity group + self._delete_cluster_rule(session, cluster, affinity_rule) + + def disconnect_vms_from_dpg(self, session, net_id, vms): + """ + Disconnects the indicated list of vms from the network with id: net_id + """ + self.logger.debug("Disconnect vms for from net id: %s", net_id) + + # Stop vms that are started + stopped_vms = self.stop_vm_list(session, vms) + + # Disconnect vms + port_group_id = net_id.removeprefix(vcnetwork.DISTRIBUTED_PORTGROUP_KEY_PREFIX) + self._disconnect_vms(session, port_group_id, vms) + + # Restart vms + self.start_vm_list(session, stopped_vms) + + def _disconnect_vms(self, session, port_group_id, vms): + """ + Disconnects a list of vms from a net, the vms should be already stopped before + calling this method + """ + task_list = [] + for vm in vms: + task = self._disconnect_vm(session, port_group_id, vm) + if task: + task_list.append(task) + + if task_list: + # wait until all tasks are completed + vcutil.wait_for_tasks(task_list) + + def _disconnect_vm(self, session, port_group_id, vm): + """ + Disconnect vm from port_group + """ + + self.logger.debug( + "Disconnect vm name: %s from port_group_id: %s", vm.name, port_group_id + ) + task = None + + # Disconnect port group + spec = vim.vm.ConfigSpec() + device_changes = [] + + for device in vm.config.hardware.device: + if isinstance(device, vim.vm.device.VirtualEthernetCard): + if isinstance( + device.backing, + vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo, + ): + if device.backing.port.portgroupKey == port_group_id: + nic_spec = vim.vm.device.VirtualDeviceSpec() + nic_spec.operation = ( + vim.vm.device.VirtualDeviceSpec.Operation.remove + ) # Remove the NIC + nic_spec.device = device + device_changes.append(nic_spec) + + if device_changes: + spec.deviceChange = device_changes + task = vm.ReconfigVM_Task(spec=spec) + + return task + + def stop_vm_list(self, session, vms): + """ + Stop the vms in the provided list if they are started + """ + stopped_vms = [] + task_stop_list = [] + + for vm in vms: + if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn: + task = vm.PowerOff() + task_stop_list.append(task) + stopped_vms.append(vm) + + if task_stop_list: + # wait until all tasks are completed + vcutil.wait_for_tasks(task_stop_list) + + return stopped_vms + + def start_vm_list(self, session, vms): + """ + Start the vms in the provided list + """ + started_vms = [] + task_start_list = [] + + for vm in vms: + if vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOn: + task = vm.PowerOn() + task_start_list.append(task) + started_vms.append(vm) + + if task_start_list: + # wait until all tasks are completed + vcutil.wait_for_tasks(task_start_list) + + return started_vms + + +class VCenterVmsOps: + """ + Helper class to create properly configured vms or to deal with vms configuration + """ + + def __init__( + self, + vc_config: VCenterConfig, + vc_vmsutil: VCenterVmsUtil, + vc_netutil: VCenterNetworkUtil, + session, + ): + self.vc_config = vc_config + self.vc_vmsutil = vc_vmsutil + self.vcnet_util = vc_netutil + + # Connection is provided to this object as it used just to deal with operating on vms + self.session = session + + self.logger = self.vc_vmsutil.logger + + def prepare_vm_base_config(self, vm_name, flavor, image): + """ + Prepares the base config spec in pyvmomi for the new vm + """ + self.logger.debug("Prepare vmconfig spec") + + vm_config_spec = vim.vm.ConfigSpec() + vm_config_spec.name = vm_name + vm_config_spec.memoryMB = flavor.get(FLAVOR_RAM_KEY) + vm_config_spec.numCPUs = flavor.get(FLAVOR_VCPUS_KEY) + vm_config_spec.guestId = image.config.guestId + + # Get image metadata + metadata = self._get_vm_metadata(vm_name, flavor, image) + vm_config_spec.annotation = metadata + + device_changes = [] + vm_config_spec.deviceChange = device_changes + return vm_config_spec + + def _get_vm_metadata(self, vm_name, flavor, image): + + metadata = [] + metadata.append(("name", vm_name)) + metadata.append(("imageid", image.config.instanceUuid)) + for prop_name, value in flavor.items(): + metadata.append((f"flavor:{prop_name}", value)) + return "".join(["%s:%s\n" % (k, v) for k, v in metadata]) + + def prepare_vm_main_disk(self, flavor, image_vm, vm_config_spec, new_datastore): + """ + Obtain main disk from image and modify its size to clone it + """ + # review - the code i have here considers there is only one main disk, + # ¿is it possible this is not the case? + self.logger.debug("Prepare main disk size: %s", flavor.get(FLAVOR_DISK_KEY)) + new_disk_size_gb = flavor.get(FLAVOR_DISK_KEY) + + # Update spec + device_changes = vm_config_spec.deviceChange + for device in image_vm.config.hardware.device: + if isinstance(device, vim.vm.device.VirtualDisk): + disk_spec = vim.vm.device.VirtualDeviceSpec() + disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit + disk_spec.device = device + + # Check old capacity is not less that new one + curr_disk_capacity_gb = disk_spec.device.capacityInKB / (1024 * 1024) + self.logger.debug("Obtained main disk, size: %s", curr_disk_capacity_gb) + if curr_disk_capacity_gb > new_disk_size_gb: + raise vimconn.VimConnException( + f"New disk size : {new_disk_size_gb} can not be lower that image size: " + f" {curr_disk_capacity_gb}" + ) + + # Set new capacity + disk_spec.device.capacityInKB = ( + new_disk_size_gb * 1024 * 1024 + ) # Convert GB to KB + + # in case at some point is it seen it is needed it is also possible to specify datastore + + device_changes.append(disk_spec) + + def prepare_vm_networks(self, net_list, template_vm, vm_config_spec): + """ + Prepare configuration to add network interfaces to the new vm + """ + + # Obtain device_changes to update configuration + device_changes = vm_config_spec.deviceChange + + # Remove existing network interfaces in case they exist + self._prepare_remove_existing_nics(template_vm, device_changes) + + # Add a nic for each net + for net in net_list: + # Skip non-connected iface + if not net.get("net_id"): + self.logger.debug(f"Skipping unconnected interface: {net}") + continue + + self.logger.debug(f"Prepare nic for net: {net}") + nic_spec = self._prepare_vm_nic(net, vm_config_spec) + device_changes.append(nic_spec) + + def _prepare_remove_existing_nics(self, template_vm, device_changes): + for device in template_vm.config.hardware.device: + if isinstance(device, vim.vm.device.VirtualEthernetCard): + self.logger.debug( + "Remove existing nic from template, label: %s", + device.deviceInfo.label, + ) + nic_spec = vim.vm.device.VirtualDeviceSpec() + nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove + nic_spec.device = device + device_changes.append(nic_spec) + + def _prepare_vm_nic(self, net, vm_config_spec): + + mac_address = net.get("mac_address", None) + + # Get network from network id + self.logger.debug("Prepare nic configuration net_id: %s", net.get("net_id")) + network = self.vcnet_util.get_network_by_id(self.session, net.get("net_id")) + self.logger.debug(f"Recovered network: {network}") + self.logger.debug(f"Recovered network: {network.key}") + self.logger.debug( + f"Recovered network: {network.config.distributedVirtualSwitch.uuid}" + ) + + # Obtain an available key + key = self.get_unused_device_key(vm_config_spec.deviceChange) + + # Prepare nic specification + nic_spec = vim.vm.device.VirtualDeviceSpec() + nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + + # Create the right adapter for the type of network + nic = None + nic_type = net.get("type") + if nic_type == "virtual": + nic = vim.vm.device.VirtualVmxnet3() + elif nic_type == "SR-IOV": + nic = vim.vm.device.VirtualSriovEthernetCard() + + # If we have sriov interfaces must reserve all memory + vm_config_spec.memoryReservationLockedToMax = True + else: + self.logger.debug("Nic type: %s not supported", nic_type) + raise vimconn.VimConnException(f"Nic type: {nic_type} not supported") + + nic.backing = ( + vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo() + ) + nic.backing.port = vim.dvs.PortConnection() + nic.backing.port.portgroupKey = network.key + nic.backing.port.switchUuid = network.config.distributedVirtualSwitch.uuid + + nic.connectable = vim.vm.device.VirtualDevice.ConnectInfo() + nic.connectable.startConnected = True + nic.connectable.allowGuestControl = True + nic.wakeOnLanEnabled = True + + # Assign mac address if exists + if mac_address: + nic.addressType = "manual" + nic.macAddress = mac_address + + # Assign key + nic.key = key + nic_spec.device = nic + return nic_spec + + def prepare_vm_quotas(self, extended_flavor_quotas, vm_config_spec): + """ + Prepares the vm quotas configuration + """ + self.logger.debug("Prepare quotas configuration: %s", extended_flavor_quotas) + + if extended_flavor_quotas.get("cpu-quota"): + vm_config_spec.cpuAllocation = self._prepare_resource_allocation_config( + extended_flavor_quotas.get("cpu-quota") + ) + + if extended_flavor_quotas.get("mem-quota"): + vm_config_spec.memoryAllocation = self._prepare_resource_allocation_config( + extended_flavor_quotas.get("mem-quota") + ) + + def _prepare_resource_allocation_config(self, quota_config): + self.logger.debug("Prepare resource allocation config: %s", quota_config) + resource_allocation = vim.ResourceAllocationInfo() + if quota_config.get("reserve"): + resource_allocation.reservation = quota_config.get("reserve") + if quota_config.get("limit"): + resource_allocation.limit = quota_config.get("limit") + if quota_config.get("shares"): + resource_allocation.shares = vim.SharesInfo( + level="custom", shares=quota_config.get("shares") + ) + + self.logger.debug("Resource allocation config done") + return resource_allocation + + def attach_cdrom(self, vm, iso_filename): + """ + Attaches the indicated iso file to the provided vm,the iso file must be already + uploaded in vmware vcenter + """ + self.logger.debug( + "Attach iso to vm: '%s', iso file: '%s'", vm.name, iso_filename + ) + + # 1 - Find free IDE controller + controller_key = self._find_free_ide_controller(vm) + + # 2 - Build iso attach specification + device_spec = self._prepare_cdrom_spec(controller_key, iso_filename) + config_spec = vim.vm.ConfigSpec(deviceChange=[device_spec]) + + # 3 - Must set the boot order as to start from cd + config_spec.bootOptions = vim.vm.BootOptions( + bootOrder=[vim.vm.BootOptions.BootableCdromDevice()] + ) + + # 4 - Reconfigure the vm to attach cd-rom + self.reconfigure_vm(vm, config_spec) + + def _find_free_ide_controller(self, vm): + """ + Finds a free ide controller in the provided vm + """ + for dev in vm.config.hardware.device: + if isinstance(dev, vim.vm.device.VirtualIDEController): + # If there are less than 2 devices attached, we can use it. + if len(dev.device) < 2: + return dev.key + return None + + def _prepare_cdrom_spec(self, controller_key, iso_filename): + + device_spec = vim.vm.device.VirtualDeviceSpec() + device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + + cdrom = vim.vm.device.VirtualCdrom() + cdrom.controllerKey = controller_key + cdrom.key = -1 + + backing = vim.vm.device.VirtualCdrom.IsoBackingInfo() + backing.fileName = iso_filename + # backing.datastore = datastore + cdrom.backing = backing + + connectable = vim.vm.device.VirtualDevice.ConnectInfo() + connectable.allowGuestControl = True + connectable.startConnected = True + cdrom.connectable = connectable + + device_spec.device = cdrom + return device_spec + + def reconfigure_vm(self, vm, new_config_spec): + """ + Reconfigure vm with the changes indicated in new_config_spec + """ + self.logger.debug("Reconfigure vm name: '%s'", vm.name) + task = vm.Reconfigure(new_config_spec) + vcutil.wait_for_task(task) + self.logger.debug("Vm name: '%s' reconfigured", vm.name) + + def prepare_ephemeral_disk( + self, original_vm, vm_config_spec, datastore, disk_size_gb, created_items + ): + """ + Prepares the specification for an ephemeral disk + """ + self.logger.debug("Prepare ephemeral disk size: %s", disk_size_gb) + + disk_folder = vm_config_spec.name + disk_name = f"{vm_config_spec.name}-ephemeral" + device_spec = self._prepare_disk_spec( + original_vm=original_vm, + vm_config_spec=vm_config_spec, + datastore=datastore, + disk_folder=disk_folder, + disk_name=disk_name, + disk_size_gb=disk_size_gb, + ) + if not vm_config_spec.deviceChange: + vm_config_spec.deviceChange = [] + vm_config_spec.deviceChange.append(device_spec) + + def prepare_permanent_disk( + self, original_vm, vm_config_spec, datastore, disk, disk_index, created_items + ): + """ + Creates a permanent disk, if the disk must be kept after the vm is deleted + create the disk in another folder + """ + self.logger.debug( + "Prepare persisten volume disk index: %s, size: %s, name: %s", + disk_index, + disk.get("size"), + disk.get("name"), + ) + + disk_folder = vm_config_spec.name + disk_name = f'{vm_config_spec.name}-{disk.get("name")}-{disk_index}' + + device_spec = self._prepare_disk_spec( + original_vm=original_vm, + vm_config_spec=vm_config_spec, + datastore=datastore, + disk_folder=disk_folder, + disk_name=disk_name, + disk_size_gb=disk.get("size"), + ) + + # Will use disk path as id as if the disk is unattache it has no other id in vcenter + disk_id = device_spec.device.backing.fileName + self.logger.debug("Created disk id: %s", disk_id) + + # Append to device_change so that the data will be stored + if not vm_config_spec.deviceChange: + vm_config_spec.deviceChange = [] + vm_config_spec.deviceChange.append(device_spec) + + # Return in created items, id is url encoded to avoid problems from spaces + volume_txt = "volume:" + quote(disk_id) + if disk.get("keep"): + volume_txt += ":keep" + created_items[volume_txt] = True + + def _prepare_disk_spec( + self, + original_vm, + vm_config_spec, + datastore, + disk_size_gb, + disk_folder=None, + disk_name=None, + ): + # Validate disk size gb is an int > 0 + + # Get the full list of devices and on the full list obtain free scsi controller + # and unit number + devices = self._get_complete_device_list(original_vm, vm_config_spec) + controller_key, unit_number = self._get_scsi_controller_key_unit_number(devices) + datastore_name = datastore.info.name + + # Create a new device spec + device_spec = vim.vm.device.VirtualDeviceSpec() + device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add + device_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create + + # Disk backing configuration + disk_backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo() + disk_backing.diskMode = "persistent" + disk_backing.thinProvisioned = True # Optional: Set True for thin provisioning + disk_backing.datastore = datastore # Use the first datastore by default + if disk_folder and disk_name: + # If this folder and name are not provided vcenter sets a default filename + disk_backing.fileName = f"[{datastore_name}] {disk_folder}/{disk_name}.vmdk" + + # Disk size in KB (1 GB = 1024 * 1024 KB) + disk_size_kb = int(disk_size_gb) * 1024 * 1024 + + disk = vim.vm.device.VirtualDisk() + disk.capacityInKB = disk_size_kb + disk.backing = disk_backing + disk.controllerKey = controller_key + disk.unitNumber = unit_number + disk.key = self.get_unused_device_key(vm_config_spec.deviceChange) + + device_spec.device = disk + return device_spec + + def _get_complete_device_list(self, original_vm, vm_config_spec): + devices = [] + # Add original vm list to devices + devices.extend(original_vm.config.hardware.device) + # Just add also devices in new config spec, if device is add it will be in the new list + # In case it is edit may be added twice, for delete devices i can not reuse unit yet + changed_devices = [ + device_spec.device for device_spec in vm_config_spec.deviceChange + ] + devices.extend(changed_devices) + return devices + + def _get_scsi_controller_key_unit_number(self, devices): + """ + Obtains an available scsi controller key and unit number + """ + scsi_keys = [dev.key for dev in devices if self._is_scsi_controller(dev)] + allocated_slots = self._find_allocated_slots(devices, scsi_keys) + self.logger.debug("scsi controller keys: %s", scsi_keys) + self.logger.debug("allocated slots: %s", allocated_slots) + result = self._find_controller_slot( + scsi_keys, allocated_slots, SCSI_CONTROLLER_MAX_DEVICES + ) + if not result: + raise vimconn.VimConnException( + "Unable to find valid controller key to add a valid disk" + ) + else: + self.logger.debug("Obtained controller key and unit number: %s", result) + return result + + @staticmethod + def _is_scsi_controller(device): + scsi_controller_types = ( + vim.vm.device.VirtualLsiLogicController, + vim.vm.device.VirtualLsiLogicSASController, + vim.vm.device.VirtualBusLogicController, + vim.vm.device.ParaVirtualSCSIController, + ) + return isinstance(device, scsi_controller_types) + + def _find_allocated_slots(self, devices, controller_keys): + allocated = {} + for device in devices: + self.logger.debug("Find allocated slots, device: %s", device) + if ( + (device.controllerKey is not None) + and (device.controllerKey in controller_keys) + and (device.unitNumber is not None) + ): + unit_numbers = allocated.setdefault(device.controllerKey, []) + unit_numbers.append(device.unitNumber) + return allocated + + @staticmethod + def _find_controller_slot(controller_keys, taken, max_unit_number): + for controller_key in controller_keys: + for unit_number in range(max_unit_number): + if unit_number not in taken.get(controller_key, []): + return controller_key, unit_number + + @staticmethod + def get_unused_device_key(device_specs): + """ + Finds the next unused negative key for a list of device specs. + keys are temporary but + + Args: + device_specs (list): List of vim.vm.device.VirtualDeviceSpec objects. + + Returns: + int: The next unused negative key. + """ + # Collect all used negative keys + device_keys = set() + for device_spec in device_specs: + if device_spec.operation == vim.vm.device.VirtualDeviceSpec.Operation.add: + device_keys.add(device_spec.device.key) + + # Find the smallest unused negative key + next_negative_key = -1 + while next_negative_key in device_keys: + next_negative_key -= 1 + + return next_negative_key diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vim_helper.py b/RO-VIM-vcenter/osm_rovim_vcenter/vim_helper.py new file mode 100644 index 00000000..877c045b --- /dev/null +++ b/RO-VIM-vcenter/osm_rovim_vcenter/vim_helper.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Indra +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Helper class that will be used for things not related to +""" +import json +import logging +import os +import shutil +import subprocess +import tempfile +import uuid + +from osm_ro_plugin import vimconn + + +class CloudInitHelper: + """ + Class that will help to generate iso files needed for cloud-init functionality + """ + + def __init__(self, log_level=None): + self.logger = logging.getLogger("ro.vim.vcenter.network") + if log_level: + self.logger.setLevel(getattr(logging, log_level)) + + def generate_cloud_init_iso(self, user_data): + """ + Generates a cloud init iso with the provided user_data + """ + self.logger.debug("Generate cloud init iso") + tmpdir = tempfile.mkdtemp() + iso_path = os.path.join(tmpdir, "ConfigDrive.iso") + latest_dir = os.path.join(tmpdir, "openstack", "latest") + os.makedirs(latest_dir) + with open( + os.path.join(latest_dir, "meta_data.json"), "w" + ) as meta_file_obj, open( + os.path.join(latest_dir, "user_data"), "w" + ) as userdata_file_obj: + userdata_file_obj.write(user_data) + meta_file_obj.write( + json.dumps( + { + "availability_zone": "nova", + "launch_index": 0, + "name": "ConfigDrive", + "uuid": str(uuid.uuid4()), + } + ) + ) + genisoimage_cmd = ( + "genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}".format( + iso_path=iso_path, source_dir_path=tmpdir + ) + ) + self.logger.info( + 'create_config_drive_iso(): Creating ISO by running command "{}"'.format( + genisoimage_cmd + ) + ) + + try: + FNULL = open(os.devnull, "w") + subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL) + except subprocess.CalledProcessError as e: + shutil.rmtree(tmpdir, ignore_errors=True) + error_msg = "create_config_drive_iso(): Exception executing genisoimage : {}".format( + e + ) + self.logger.error(error_msg) + raise vimconn.VimConnException(error_msg) + + return iso_path, tmpdir + + def delete_tmp_dir(self, tmpdirname): + """ + Delete the tmp dir with the indicated name + """ + self.logger.debug("Delete tmp dir: %s", tmpdirname) + shutil.rmtree(tmpdirname) diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vimconn_vcenter.py b/RO-VIM-vcenter/osm_rovim_vcenter/vimconn_vcenter.py new file mode 100644 index 00000000..f5efc6ad --- /dev/null +++ b/RO-VIM-vcenter/osm_rovim_vcenter/vimconn_vcenter.py @@ -0,0 +1,1442 @@ +# -*- coding: utf-8 -*- +# Copyright 2025 Indra +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +New vmware vcenter plugin documentation +""" +import logging +import ssl +from urllib.parse import quote, urlparse +import uuid + +from osm_ro_plugin import vimconn +from osm_rovim_vcenter import vcenter_util as vcutil +from osm_rovim_vcenter import vcenter_vms as vcvmutil +from osm_rovim_vcenter.vcenter_config import VCenterConfig +from osm_rovim_vcenter.vcenter_ipmanager import VCenterIpManager +from osm_rovim_vcenter.vcenter_network import VCenterNetworkUtil +from osm_rovim_vcenter.vcenter_util import VCenterFileUploader +from osm_rovim_vcenter.vcenter_vms import VCenterVmsOps +from osm_rovim_vcenter.vcenter_vms import VCenterVmsUtil +from osm_rovim_vcenter.vim_helper import CloudInitHelper +from pyVim.connect import Disconnect, SmartConnect +from pyVmomi import vim +import yaml + + +def handle_connector_exceptions(func): + """ + Decorator function that handles and reraises exceptions + """ + + def format_exception(*args, **kwargs): + try: + return func(*args, **kwargs) + except Exception as e: + vimconnector._format_raise_exception(e) + + return format_exception + + +DEFAULT_OSM_TENANT_NAME = "default" + + +class vimconnector(vimconn.VimConnector): + """ + RO Vcenter plugin main class + """ + + # Dict to store flavors in memory, stores flavors with key id + _flavorlist = {} + + # Affinity groups, will use the name as id, will not allow duplicates because + # if we have duplicates we will not be able to know if they we must create a new affinity + # group or not + _affinity_groups = {} + + def __init__( + self, + uuid=None, + name=None, + tenant_id=None, + tenant_name=None, + url=None, + url_admin=None, + user=None, + passwd=None, + log_level=None, + config={}, + persistent_info={}, + ): + """ + TODO - documentation + :param uuid: + :param name: + :param tenant_id: + :param tenant_name: + :param url: + :param url_admin: + :param user: + :param passwd: + :param log_level: + :param config: + :param persistent_info: + """ + vimconn.VimConnector.__init__( + self, + uuid, + name, + tenant_id, + tenant_name, + url, + url_admin, + user, + passwd, + log_level, + config, + ) + + self.logger = logging.getLogger("ro.vim.vcenter") + if log_level: + self.logger.setLevel(getattr(logging, log_level)) + self.log_level = log_level + + self.persistent_info = persistent_info + + self.logger.info( + "Initializing vcenter plugin, name:%s, uuid: %s, tenant_name: %s", + name, + uuid, + tenant_name, + ) + self.logger.info("Connection info, url: %s, user: %s", url, user) + self.logger.info("Config information: %s ", config) + self.logger.info("Persistent info: %s", persistent_info) + + # Parse the URL to extract the hostname + parsed_url = urlparse(url) + self.vcenter_hostname = parsed_url.hostname + + # Default port is 443 + self.vcenter_port = ( + parsed_url.port + if parsed_url.port + else (443 if parsed_url.scheme == "https" else 80) + ) + self.logger.debug( + "vcenter_hostname: %s, vcenter_port: %s", + self.vcenter_hostname, + self.vcenter_port, + ) + + # Prepare ssl context + if self.config.get("insecure") and self.config.get("ca_cert"): + raise vimconn.VimConnException( + "options insecure and ca_cert are mutually exclusive" + ) + elif self.config.get("insecure") is None and self.config.get("ca_cert") is None: + raise vimconn.VimConnException( + "either providing certificates or selecting insecure connection is required" + ) + + if self.config.get("insecure"): + self.logger.warning("Using insecure ssl context") + self.ssl_context = ssl._create_unverified_context() + + if self.config.get("ca_cert"): + self.logger.debug("ca_cert path: %s", self.config.get("ca_cert")) + self.ssl_context = ssl.create_default_context( + cafile=self.config.get("ca_cert") + ) + + # Assign default tenant name if not provided + # Check with null because there seems to be + # an error on upper layer that sets null when not provided + if not tenant_name or tenant_name == "null": + self.tenant_name = DEFAULT_OSM_TENANT_NAME + + # Availability zone: by the moment will support just one but is is required + # Availibity zone must correspond to a cluster or resource pool name + self.availability_zone = self.config.get("availability_zone") + if not self.availability_zone: + raise vimconn.VimConnException( + "Config parameter availability_zone is required" + ) + + # Allow to indicate distributed virtual switch, ¿could we support more than one? + self.dvs_names = self.config.get("availability_network_zone") + if not self.dvs_names: + raise vimconn.VimConnException( + "Config parameter availability_network_zone is required" + ) + + # Datasource configuration + self.datastore = self.config.get("datastore") + if not self.datastore: + raise vimconn.VimConnException("Config parameter datastore is required") + + # Nsx configuration + self.nsx_url = self.config.get("nsx_url") + self.nsx_user = self.config.get("nsx_user") + self.nsx_password = self.config.get("nsx_password") + self.nsx_verify_ssl = False + if self.config.get("nsx_ca_cert"): + self.nsx_verify_ssl = self.config.get("nsx_ca_cert") + + self.dhcp_configure_always = self.config.get("dhcp_configure_always", False) + + # Initialize vcenter helper objects + self.vcenter_fileuploader = VCenterFileUploader( + self.vcenter_hostname, + self.vcenter_port, + self.user, + self.passwd, + self.config.get("ca_cert", None), + log_level=log_level, + ) + self.vcenter_config = VCenterConfig( + self.availability_zone, + tenant_id, + self.tenant_name, + datastore_name=self.datastore, + distributed_switches_names=self.dvs_names, + log_level=log_level, + ) + self.vcnet_util = VCenterNetworkUtil(log_level=log_level) + self.vcvms_util = VCenterVmsUtil(self.vcenter_config, log_level=log_level) + self.cloudinit_helper = CloudInitHelper(log_level=log_level) + self.vcenter_ipmanager = VCenterIpManager( + vc_netutil=self.vcnet_util, + nsx_url=self.nsx_url, + nsx_user=self.nsx_user, + nsx_password=self.nsx_password, + nsx_verify_ssl=self.nsx_verify_ssl, + dhcp_configure_always=self.dhcp_configure_always, + ) + + def check_vim_connectivity(self): + self.logger.debug("Check vim connectivity") + # Load vcenter content to test connection + session = self._get_vcenter_instance() + try: + vcutil.get_vcenter_content(session) + finally: + self._disconnect_si(session) + + def get_tenant_list(self, filter_dict={}): + """Obtain tenants of VIM + filter_dict dictionary that can contain the following keys: + name: filter by tenant name + id: filter by tenant uuid/id + + Returns the tenant list of dictionaries, and empty list if no tenant match all the filers: + [{'name':', 'id':', ...}, ...] + """ + self.logger.warning("Get tenant list is not supported in vcenter") + raise vimconn.VimConnNotImplemented( + "Get tenant list is not supported in vcenter" + ) + + def new_tenant(self, tenant_name, tenant_description): + """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided + "tenant_name": string max lenght 64 + "tenant_description": string max length 256 + returns the tenant identifier or raise exception + """ + self.logger.warning("new_tenant is not supported in vcenter") + raise vimconn.VimConnNotImplemented("new_tenant is not supported in vcenter") + + def delete_tenant(self, tenant_id): + """Delete a tenant from VIM + tenant_id: returned VIM tenant_id on "new_tenant" + Returns None on success. Raises and exception of failure. If tenant is not found raises VimConnNotFoundException + """ + self.logger.warning("delete_tenant is not supported in vcenter") + raise vimconn.VimConnNotImplemented("delete_tenant is not supported in vcenter") + + def get_flavor(self, flavor_id): + """Obtain flavor details from the VIM + Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } + Raises an exception upon error or if not found + """ + self.logger.debug("Get flavor with id: %s", flavor_id) + + if flavor_id not in self._flavorlist: + raise vimconn.VimConnNotFoundException("Flavor not found.") + + return self._flavorlist[flavor_id] + + def get_flavor_id_from_data(self, flavor_dict): + """Obtain flavor id that match the flavor description + Params: + 'flavor_dict': dictionary that contains: + 'disk': main hard disk in GB + 'ram': meomry in MB + 'vcpus': number of virtual cpus + #TODO: complete parameters for EPA + Returns the flavor_id or raises a VimConnNotFoundException + """ + self.logger.debug("Get flavor from data: %s", flavor_dict) + # As in this connector flavors are only stored in memory always return vimconnnotfound + # exception + raise vimconn.VimConnNotFoundException( + "get_flavor_id_from_data not used in this plugin" + ) + + def new_flavor(self, flavor_data): + """Adds a tenant flavor to VIM + flavor_data contains a dictionary with information, keys: + name: flavor name + ram: memory (cloud type) in MBytes + vpcus: cpus (cloud type) + extended: EPA parameters + - numas: #items requested in same NUMA + memory: number of 1G huge pages memory + paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual + threads + interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa + - name: interface name + dedicated: yes|no|yes:sriov; for PT, SRIOV or only one SRIOV for the physical NIC + bandwidth: X Gbps; requested guarantee bandwidth + vpci: requested virtual PCI address + disk: disk size + is_public: + #TODO to concrete + Returns the flavor identifier + """ + self.logger.debug("New flavor data: %s", flavor_data) + + new_flavor = flavor_data + ram = flavor_data.get(vcvmutil.FLAVOR_RAM_KEY, 1024) + cpu = flavor_data.get(vcvmutil.FLAVOR_VCPUS_KEY, 1) + disk = flavor_data.get(vcvmutil.FLAVOR_DISK_KEY, 0) + + self._validate_int(ram, "ram") + self._validate_int(cpu, "cpu") + self._validate_int(disk, "disk") + + # generate a new uuid put to internal dict and return it. + flavor_id = uuid.uuid4() + self._flavorlist[str(flavor_id)] = new_flavor + self.logger.debug("Created flavor - %s : %s", flavor_id, new_flavor) + + return str(flavor_id) + + def delete_flavor(self, flavor_id): + """Deletes a tenant flavor from VIM identify by its id + Returns the used id or raise an exception + """ + self.logger.debug("Delete flavor id: %s", flavor_id) + if flavor_id in self._flavorlist: + self._flavorlist.pop(flavor_id) + return flavor_id + else: + self.logger.info("Flavor with id: %s not found ", flavor_id) + + def get_affinity_group(self, affinity_group_id): + """Obtain affinity or anti affinity group details from the VIM + Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } + Raises an exception upon error or if not found + """ + self.logger.debug("Get affinity group with id: %s", affinity_group_id) + if affinity_group_id not in self._affinity_groups: + raise vimconn.VimConnNotFoundException( + "Affinity group with id: %s not found" + ) + + return self._affinity_groups[affinity_group_id] + + def new_affinity_group(self, affinity_group_data): + """Adds an affinity or anti affinity group to VIM + affinity_group_data contains a dictionary with information, keys: + name: name in VIM for the affinity or anti-affinity group + type: affinity or anti-affinity + scope: Only nfvi-node allowed + Returns the affinity or anti affinity group identifier + """ + self.logger.debug("New affinity group, data: %s", affinity_group_data) + affinity_group = None + + affinity_group_name = affinity_group_data.get("name") + affinity_group_type = affinity_group_data.get("type") + affinity_group = self._affinity_groups.get(affinity_group_name) + + if affinity_group_name in self._affinity_groups: + affinity_group = self._affinity_groups.get(affinity_group_name) + if affinity_group_type != affinity_group.get("type"): + self.logger.warning( + "There is already an affinity group with name %s " + "and different type: % s", + affinity_group_name, + affinity_group_type, + ) + raise vimconn.VimConnNotFoundException( + f"there is already an affinity group with name: {affinity_group_name} and " + "different type" + ) + else: + affinity_group = affinity_group_data + self._affinity_groups[affinity_group_name] = affinity_group_data + + self.logger.debug("Affinity groups: %s", self._affinity_groups) + return affinity_group.get("name") + + def delete_affinity_group(self, affinity_group_id): + """ + Deletes an affinity or anti affinity group from the VIM identified by its id + Returns the used id or raise an exception + """ + self.logger.debug("Delete affinity group with id: %s", affinity_group_id) + + if affinity_group_id in self._affinity_groups: + self.logger.info( + "Deleting affinity group %s", + self._affinity_groups.get("affinity_group_id"), + ) + del self._affinity_groups[affinity_group_id] + else: + self.logger.info("Affinity group with id %s not found", affinity_group_id) + + self.logger.debug("Affinity groups: %s", self._affinity_groups) + return affinity_group_id + + def new_image(self, image_dict): + """Adds a tenant image to VIM + Returns the image id or raises an exception if failed + """ + self.logger.debug("Create new image: %s", image_dict) + raise vimconn.VimConnNotImplemented("new image is not supported in vcenter") + + def delete_image(self, image_id): + """Deletes a tenant image from VIM + Returns the image_id if image is deleted or raises an exception on error + """ + self.logger.debug("Delete image: %s", image_id) + raise vimconn.VimConnNotImplemented("delete image is not supported in vcenter") + + def get_image_id_from_path(self, path): + """Get the image id from image path in the VIM database. + Returns the image_id or raises a VimConnNotFoundException + """ + self.logger.debug("Get image from path: %s", path) + raise vimconn.VimConnNotImplemented( + "get image from path is not supported in vcenter" + ) + + @handle_connector_exceptions + def get_image_list(self, filter_dict=None): + """Obtain tenant images from VIM + Filter_dict can be: + name: image name + id: image uuid + checksum: image checksum + location: image path + Returns the image list of dictionaries: + [{}, ...] + List can be empty + """ + filter_dict = filter_dict or {} + self.logger.debug("Get image list, filter_dict: %s", filter_dict) + + session = self._get_vcenter_instance() + try: + # Get images + image_list = self.vcvms_util.list_images(session, filter_dict=filter_dict) + + self.logger.debug("Image list: %s", image_list) + return image_list + finally: + self._disconnect_si(session) + + def new_vminstance( + self, + name: str, + description: str, + start: bool, + image_id: str, + flavor_id: str, + affinity_group_list: list, + net_list: list, + cloud_config=None, + disk_list=None, + availability_zone_index=None, + availability_zone_list=None, + security_group_name=None, + ) -> tuple: + """Adds a VM instance to VIM. + + Args: + name (str): name of VM + description (str): description + start (bool): indicates if VM must start or boot in pause mode. Ignored + image_id (str) image uuid + flavor_id (str) flavor uuid + affinity_group_list (list): list of affinity groups, each one is a dictionary.Ignore if empty. + net_list (list): list of interfaces, each one is a dictionary with: + name: name of network + net_id: network uuid to connect + vpci: virtual vcpi to assign, ignored because openstack lack #TODO + model: interface model, ignored #TODO + mac_address: used for SR-IOV ifaces #TODO for other types + use: 'data', 'bridge', 'mgmt' + type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared' + vim_id: filled/added by this function + floating_ip: True/False (or it can be None) + port_security: True/False + cloud_config (dict): (optional) dictionary with: + key-pairs: (optional) list of strings with the public key to be inserted to the default user + users: (optional) list of users to be inserted, each item is a dict with: + name: (mandatory) user name, + key-pairs: (optional) list of strings with the public key to be inserted to the user + user-data: (optional) string is a text script to be passed directly to cloud-init + config-files: (optional). List of files to be transferred. Each item is a dict with: + dest: (mandatory) string with the destination absolute path + encoding: (optional, by default text). Can be one of: + 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64' + content : (mandatory) string with the content of the file + permissions: (optional) string with file permissions, typically octal notation '0644' + owner: (optional) file owner, string with the format 'owner:group' + boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk) + disk_list: (optional) list with additional disks to the VM. Each item is a dict with: + image_id: (optional). VIM id of an existing image. If not provided an empty disk must be mounted + size: (mandatory) string with the size of the disk in GB + vim_id: (optional) should use this existing volume id + availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required + availability_zone_list: list of availability zones given by user in the VNFD descriptor. Ignore if + availability_zone_index is None + #TODO ip, security groups + + Returns: + A tuple with the instance identifier and created_items or raises an exception on error + created_items can be None or a dictionary where this method can include key-values that will be passed to + the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc. + Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same + as not present. + + """ + self.logger.info( + "new vm_instance name: %s, image_id: %s, flavor_id: %s", + name, + image_id, + flavor_id, + ) + self.logger.debug( + "new_vinstance data, net_list: %s, disk_list: %s" + " affinity_group_list: %s, cloud_config: %s,", + net_list, + disk_list, + affinity_group_list, + cloud_config, + ) + net_list = net_list or [] + disk_list = disk_list or [] + affinity_group_list = affinity_group_list or [] + + session = self._get_vcenter_instance() + new_vm = None + created_items = {} + try: + vc_vmops = VCenterVmsOps( + self.vcenter_config, self.vcvms_util, self.vcnet_util, session + ) + + # Recover flavor, image template, resource pool, cluster, datastore + # datastore info, if it is not in configuration, get the same that template + flavor = self.get_flavor(flavor_id) + self.logger.debug("Flavor recovered: %s", flavor) + + # Obtain image to clone + image_vm = self.vcvms_util.get_image_by_uuid(session, image_id) + self.logger.debug("Image recovered: %s", image_vm) + + # Obtain needed configuration + datastore = self.vcenter_config.get_datastore(session) + self.logger.debug("Datastore 1: %s", datastore) + cluster, resource_pool = self.vcenter_config.get_cluster_rp_from_av_zone( + session, availability_zone_index, availability_zone_list + ) + vms_folder = self.vcenter_config.get_instances_folder(session) + self.logger.debug("Cluster: %s, resource_pool: %s", cluster, resource_pool) + + # Start to prepare config data + + # Prepare affinity groups (check that they can be found) + affinity_groups_full = self._prepare_affinity_groups(affinity_group_list) + + # Generate vm unique name + vm_name = self._generate_vm_name(name) + + # Prepare vmconfig based on image and flavor data + vm_config_spec = vc_vmops.prepare_vm_base_config(vm_name, flavor, image_vm) + + # Process flavor extended config + self._process_flavor_extended_config(vc_vmops, vm_config_spec, flavor) + + # Prepare main disk + vc_vmops.prepare_vm_main_disk(flavor, image_vm, vm_config_spec, datastore) + + # Add network interfaces configuration + vc_vmops.prepare_vm_networks(net_list, image_vm, vm_config_spec) + + # Prepare disks configuration + self._prepare_vm_disks( + flavor=flavor, + disk_list=disk_list, + created_items=created_items, + vm_config_spec=vm_config_spec, + image_vm=image_vm, + vc_vmops=vc_vmops, + ) + + # Generate cloud init iso + iso_path, tmp_dir = self._generate_cloud_init_iso(cloud_config) + + # Clone machine + self.logger.debug("Cloning image to create vm name %s", vm_config_spec.name) + # self.logger.debug("Cloning image config spec %s", vm_config_spec) + clone_spec = vim.vm.CloneSpec( + location=vim.vm.RelocateSpec(pool=resource_pool, datastore=datastore), + powerOn=False, # Power on the VM after creation + template=False, + config=vm_config_spec, + ) + clone_task = image_vm.Clone( + folder=vms_folder, name=vm_config_spec.name, spec=clone_spec + ) + self.logger.debug("Machine cloned, wait for clone task to complete") + + # Wait until clone task is completed + new_vm = vcutil.wait_for_task(clone_task) + + # Attach cloud init to vm + self._attach_cloud_init_iso( + vc_vmops, new_vm, iso_path, tmp_dir, created_items + ) + + # Add the machine to affinity groups + self._add_vm_affinity_groups( + session, cluster, new_vm, affinity_groups_full, created_items + ) + + # Assign vim_id to net + self._assign_vim_id_to_net(new_vm, net_list) + + # Assign fixed ip addresses if there are any + self.vcenter_ipmanager.set_vm_ips(session, name, new_vm, net_list) + + # Start vm + self.vcvms_util.start_vm(new_vm) + + self.logger.info( + "Created vm, server_id: %s, vm_name: %s, created_items: %s, " + " net_list: %s", + new_vm.config.instanceUuid, + vm_name, + created_items, + net_list, + ) + return new_vm.config.instanceUuid, created_items + + except Exception as e: + if new_vm: + try: + server_uuid = new_vm.config.instanceUuid + + created_items = self.remove_keep_tag_from_persistent_volumes( + created_items + ) + + self.delete_vminstance(server_uuid, created_items) + + except Exception as e2: + self.logger.error(f"new_vminstance rollback fail {e2}") + + # Logs and reraises exception + self._format_raise_exception(e) + finally: + self._disconnect_si(session) + + @staticmethod + def remove_keep_tag_from_persistent_volumes(created_items: dict) -> dict: + """Removes the keep flag from persistent volumes. So, those volumes could be removed. + + Args: + created_items (dict): All created items belongs to VM + + Returns: + updated_created_items (dict): Dict which does not include keep flag for volumes. + + """ + return { + key.replace(":keep", ""): value for (key, value) in created_items.items() + } + + def _assign_vim_id_to_net(self, vm, net_list): + """ + Obtains the vim_id and assigns it to the net, also assigns the mac_address it is is available + """ + nics_info = self.vcvms_util.get_vm_nics_list(vm) + for net in net_list: + net_id = net.get("net_id") + # Obtain the first interface with the same net_id + for index, nic in enumerate(nics_info): + if nic.get("vim_net_id") == net_id: + net["vim_id"] = nic.get("vim_interface_id") + if nic.get("mac_address"): + net["mac_address"] = nic.get("mac_address") + del nics_info[index] + break + if nics_info: + self.logger.warning("Unassigned elements in network: %s", nics_info) + + def _prepare_vm_disks( + self, flavor, disk_list, created_items, vm_config_spec, image_vm, vc_vmops + ): + """ + Prepare all volumes for vm instance + """ + disk_list = disk_list or [] + datastore = image_vm.datastore[ + 0 + ] # could configure to store permanent disk in anther datastore + + # Check if an ephemeral disk needs to be created + ephemeral_disk_size_gb = flavor.get("ephemeral", 0) + if int(ephemeral_disk_size_gb) > 0: + # Create ephemeral disk + vc_vmops.prepare_ephemeral_disk( + image_vm, + vm_config_spec, + datastore, + ephemeral_disk_size_gb, + created_items, + ) + + self.logger.debug("Process disk list: %s", disk_list) + for disk_index, disk in enumerate(disk_list, start=1): + self.logger.debug("disk_index: %s, disk: %s", disk_index, disk) + if "image_id" in disk: + self.logger.warning("Volume disks with image id not supported yet") + elif disk.get("multiattach"): + self.logger.warning("Volume disks with image id not supported yet") + elif disk.get("volume_id"): + self.logger.warning("Volumes already existing not supported yet") + else: + # Create permanent disk + vc_vmops.prepare_permanent_disk( + image_vm, vm_config_spec, datastore, disk, disk_index, created_items + ) + + def _prepare_affinity_groups(self, affinity_group_id_list): + """ + Check affinity groups ids in the list can be found and recover the affinity groups from ids + """ + affinity_groups = None + if affinity_group_id_list: + affinity_groups = [] + for item in affinity_group_id_list: + affinity_group_id = item["affinity_group_id"] + # Obtain the affinity group from the environment + affinity_group = self._affinity_groups.get(affinity_group_id) + if not affinity_group: + raise vimconn.VimConnNotFoundException( + f"Affinity group: {affinity_group_id} not found" + ) + else: + affinity_groups.append(affinity_group) + return affinity_groups + + def _add_vm_affinity_groups( + self, session, cluster, new_vm, affinity_group_list, created_items + ): + + if affinity_group_list: + self.logger.debug("Add vm to affinity group list: %s", affinity_group_list) + for affinity_group in affinity_group_list: + self.vcvms_util.add_vm_or_create_affinity_group( + session, + cluster, + affinity_group.get("name"), + affinity_group.get("type"), + new_vm, + ) + affinity_group_txt = "affinity-group:" + affinity_group.get("name") + created_items[affinity_group_txt] = True + + def _process_flavor_extended_config(self, vc_vmops, vm_config_spec, flavor): + """ + Process the flavor extended configuration + :param flavor_data, dict with flavor_data, extended configuration is in key extended + :param vm_config_spec, dictionaty with the new vm config to be completed with extended flavor config + """ + quotas_keys = {"cpu-quota", "mem-quota"} + # quotas = {"cpu-quota", "mem-quota", "vif-quota", "disk-io-quota"} + + extended = flavor.get("extended") + if extended: + self.logger.debug("Process flavor extended data: %s", extended) + + # Process quotas + extended_quotas = { + key: extended[key] for key in quotas_keys & extended.keys() + } + if extended_quotas: + vc_vmops.prepare_vm_quotas(extended_quotas, vm_config_spec) + + def get_vminstance(self, vm_id): + """Returns the VM instance information from VIM""" + self.logger.debug("Get vm_instance id: %s", vm_id) + + session = self._get_vcenter_instance() + try: + vm = self.vcvms_util.get_vm_by_uuid(session, vm_id) + return vm + finally: + self._disconnect_si(session) + + @handle_connector_exceptions + def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None): + """ + Removes a VM instance from VIM and its associated elements + :param vm_id: VIM identifier of the VM, provided by method new_vminstance + :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method + action_vminstance + :return: None or the same vm_id. Raises an exception on fail + """ + self.logger.debug( + "Delete vm_instance: vm_id: %s, " + " created_items: %s," + " volumes_to_hold: %s", + vm_id, + created_items, + volumes_to_hold, + ) + + created_items = created_items or {} + volumes_to_hold = volumes_to_hold or {} + + session = self._get_vcenter_instance() + try: + # Obtain volumes to keep + volumes_to_keep = self._extract_volumes_to_keep(created_items) + self.logger.debug("volumes_to_keep: %s", volumes_to_keep) + + # Obtain cloud init iso files to delete + cloud_init_iso = self._extract_cloudinit_iso(created_items) + self.logger.debug("cloud init iso: %s", cloud_init_iso) + + # Obtain vm + vm = self.vcvms_util.get_vm_by_uuid(session, vm_id) + + # Shutdown vm and wait to avoid probles when volumes are unattached + stop_task = self.vcvms_util.stop_vm(vm) + vcutil.wait_for_task(stop_task) + + # Prepare spec to unattach volumes + unattach_spec = None + if volumes_to_keep: + unattach_spec = self.vcvms_util.prepare_unattach_volumes( + vm, volumes_to_keep, unattach_spec + ) + + # Prepare spec to unattach iso + if cloud_init_iso: + unattach_spec = self.vcvms_util.prepare_unattach_cloudinitiso( + vm, cloud_init_iso, unattach_spec + ) + + # Unattach volumes to keep and iso + self.vcvms_util.reconfig_vm(session, vm, unattach_spec) + + # Delete iso files + self.vcvms_util.delete_iso_files(session, cloud_init_iso) + + # Delete vm from affinity group + self._delete_vm_affinity_groups(session, vm, created_items) + + # Delete vm + self.vcvms_util.delete_vm(session, vm_id) + + finally: + self._disconnect_si(session) + + def _delete_vm_affinity_groups(self, session, vm, created_items): + + self.logger.debug("Delete vm affinity groups: %s", created_items) + vm_name = vm.name + cluster = self.vcvms_util.get_vm_cluster(session, vm) + + for key, value in created_items.items(): + if value is True and key.startswith("affinity-group:"): + self.logger.debug("Delete vm affinity groups key: %s", key) + # Remove vm from affinity group if there is just one delete affinity group + affinity_rule_name = key.split(":")[1] + self.vcvms_util.delete_vm_affinity_rule( + session, cluster, affinity_rule_name, vm_name + ) + created_items[key] = False + + @staticmethod + def _extract_volumes_to_keep(created_items: dict) -> dict: + volumes_to_keep = [] + for key, value in created_items.items(): + if value is True and key.startswith("volume:") and ":keep" in key: + # Extract the volume ID (the part between "volume:" and ":keep") + volume_id = key.split(":")[1] + volumes_to_keep.append(volume_id) + return volumes_to_keep + + @staticmethod + def _extract_cloudinit_iso(created_items: dict) -> dict: + cloud_init_iso_list = [] + for key, value in created_items.items(): + if value is True and key.startswith("cloud-init-iso:"): + cloud_init_id = key.split(":")[1] + cloud_init_iso_list.append(cloud_init_id) + return cloud_init_iso_list + + def refresh_vms_status(self, vm_list): + """Get the status of the virtual machines and their interfaces/ports + Params: the list of VM identifiers + Returns a dictionary with: + vm_id: #VIM id of this Virtual Machine + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), + # CREATING (on building process), ERROR + # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address + # + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + interfaces: + - vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + mac_address: #Text format XX:XX:XX:XX:XX:XX + vim_net_id: #network id where this interface is connected + vim_interface_id: #interface/port VIM id + ip_address: #null, or text with IPv4, IPv6 address + compute_node: #identification of compute node where PF,VF interface is allocated + pci: #PCI address of the NIC that hosts the PF,VF + vlan: #physical VLAN used for VF + """ + self.logger.debug("Refresh vm_status vm_list: %s", vm_list) + vm_list = vm_list or [] + out_vms = {} + + session = self._get_vcenter_instance() + try: + for vm_id in vm_list: + self.logger.debug("Refresh vm id: %s", vm_id) + out_vm = {} + try: + vm = self.vcvms_util.get_vm_by_uuid(session, vm_id) + + vim_vm = self.vcvms_util.get_vim_vm_basic(vm) + out_vm["vim_info"] = self.serialize(vim_vm) + out_vm["status"] = vim_vm.get("status", "other") + + out_vm["interfaces"] = self.vcvms_util.get_vm_nics_list(vm) + + mac_ips_dict = self.vcenter_ipmanager.get_vm_ips(session, vm) + self.logger.debug( + "Obtained list of macs and ip addresses: %s", mac_ips_dict + ) + + for interface in out_vm["interfaces"]: + mac_address = interface.get("mac_address") + if mac_ips_dict.get(mac_address): + interface["ip_address"] = ";".join( + mac_ips_dict.get(mac_address) + ) + + except vimconn.VimConnNotFoundException as e: + self.logger.error( + "Not found error recovering vm id: %s, message: %s", + vm_id, + str(e), + ) + out_vm["status"] = "DELETED" + out_vm["error_msg"] = str(e) + except Exception as e: + self.logger.error(f"Error recovering vm id: {vm_id}".format(), e) + out_vm["status"] = "VIM_ERROR" + out_vm["error_msg"] = str(e) + + out_vms[vm_id] = out_vm + finally: + self._disconnect_si(session) + + self.logger.debug("Refresh vm status, result: %s", out_vms) + return out_vms + + @handle_connector_exceptions + def action_vminstance(self, vm_id, action_dict, created_items=None): + """ + Send and action over a VM instance. Returns created_items if the action was successfully sent to the VIM. + created_items is a dictionary with items that + :param vm_id: VIM identifier of the VM, provided by method new_vminstance + :param action_dict: dictionary with the action to perform + :param created_items: provided by method new_vminstance is a dictionary with key-values that will be passed to + the method delete_vminstance. Can be used to store created ports, volumes, etc. Format is VimConnector + dependent, but do not use nested dictionaries and a value of None should be the same as not present. This + method can modify this value + :return: None, or a console dict + """ + self.logger.debug( + "Action vm_instance, id: %s, action_dict: %s", vm_id, str(action_dict) + ) + created_items = created_items or {} + + session = self._get_vcenter_instance() + try: + # Get vm + vm = self.vcvms_util.get_vm_by_uuid(session, vm_id) + self.logger.debug("vm state: %s", vm.runtime.powerState) + + if "start" in action_dict: + self.vcvms_util.start_vm(vm) + elif "shutoff" in action_dict or "shutdown" in action_dict: + self.vcvms_util.stop_vm(vm) + elif "pause" in action_dict: + # todo - pause + self.logger.warning("pause not implemented yet") + + elif "resume" in action_dict: + self.logger.warning("resume not implemented yet") + + elif "forceOff" in action_dict: + self.logger.warning("forceOff not implemented yet") + + elif "reboot" in action_dict: + self.logger.warning("reboot action not implemented yet") + + elif "terminate" in action_dict: + self.logger.warning("terminate action not implemented yet") + + elif "rebuild" in action_dict: + self.logger.warning("rebuild action not implemented yet") + + else: + raise vimconn.VimConnException( + f"action_vminstance: Invalid action {action_dict} or action is None." + ) + + finally: + self._disconnect_si(session) + + def get_vminstance_console(self, vm_id, console_type="vnc"): + """ + Get a console for the virtual machine + Params: + vm_id: uuid of the VM + console_type, can be: + "novnc" (by default), "xvpvnc" for VNC types, + "rdp-html5" for RDP types, "spice-html5" for SPICE types + Returns dict with the console parameters: + protocol: ssh, ftp, http, https, ... + server: usually ip address + port: the http, ssh, ... port + suffix: extra text, e.g. the http path and query string + """ + self.logger.debug( + "Get vm instance console, vm_id: %s, console_type: %s", vm_id, console_type + ) + raise vimconn.VimConnNotImplemented( + "get instance console is not supported in vcenter" + ) + + @handle_connector_exceptions + def new_network( + self, + net_name, + net_type, + ip_profile=None, + shared=False, + provider_network_profile=None, + ): + """Adds a tenant network to VIM + Params: + 'net_name': name of the network + 'net_type': one of: + 'bridge': overlay isolated network + 'data': underlay E-LAN network for Passthrough and SRIOV interfaces + 'ptp': underlay E-LINE network for Passthrough and SRIOV interfaces. + 'ip_profile': is a dict containing the IP parameters of the network + 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented) + 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y + 'gateway_address': (Optional) ip_schema, that is X.X.X.X + 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X] + 'dhcp_enabled': True or False + 'dhcp_start_address': ip_schema, first IP to grant + 'dhcp_count': number of IPs to grant. + 'shared': if this network can be seen/use by other tenants/organization + 'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk} + Returns a tuple with the network identifier and created_items, or raises an exception on error + created_items can be None or a dictionary where this method can include key-values that will be passed to + the method delete_network. Can be used to store created segments, created l2gw connections, etc. + Format is VimConnector dependent, but do not use nested dictionaries and a value of None should be the same + as not present. + """ + self.logger.debug( + "new network, net_name: %s, net_type: %s, ip_profile: %s," + " shared: %s, provider_network_profile: %s", + net_name, + net_type, + ip_profile, + shared, + provider_network_profile, + ) + created_items = {} + + # Generate network name with suffix + net_unique_name = self._generate_network_name(net_name) + + # Create distributed port group + net_id = self._create_distributed_port_group( + net_unique_name, net_type, ip_profile, provider_network_profile + ) + + self.logger.debug("Created network id: %s, name: %s", net_id, net_unique_name) + return net_id, created_items + + def _create_distributed_port_group( + self, net_name, net_type, ip_profile, provider_network_profile + ): + self.logger.debug("Create distributed port group with name: %s", net_name) + + session = self._get_vcenter_instance() + try: + # Obtain dvs_names + dvs_names = self.vcenter_config.get_dvs_names(session) + if len(dvs_names) != 1: + raise vimconn.VimConnException( + "Creation of networks is unsupported if not just one distributed switch is configured" + ) + + dvs_name = dvs_names[0] + + # Create distributed port group + vlan = None + if provider_network_profile: + vlan = provider_network_profile.get("segmentation-id") + self.logger.debug("vlan value for network: %s", vlan) + + net_id, port_group = self.vcnet_util.create_distributed_port_group( + session, net_name, dvs_name, vlan=vlan + ) + + return net_id + finally: + self._disconnect_si(session) + + def get_network_list(self, filter_dict=None): + """Obtain tenant networks of VIM + Params: + 'filter_dict' (optional) contains entries to return only networks that matches ALL + entries: + name: string => returns only networks with this name + id: string => returns networks with this VIM id, this imply returns one network + at most + shared: boolean >= returns only networks that are (or are not) shared + tenant_id: sting => returns only networks that belong to this tenant/project + ,#(not used yet) admin_state_up: boolean => returns only networks that are + (or are not) in admin state + active + #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this + # status + Returns the network list of dictionaries. each dictionary contains: + 'id': (mandatory) VIM network id + 'name': (mandatory) VIM network name + 'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', + 'VIM_ERROR', 'OTHER' + 'network_type': (optional) can be 'vxlan', 'vlan' or 'flat' + 'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains + the segmentation id + 'error_msg': (optional) text that explains the ERROR status + other VIM specific fields: (optional) whenever possible using the same naming of + filter_dict param + List can be empty if no network map the filter_dict. Raise an exception only upon VIM + connectivity, + authorization, or some other unspecific error + """ + self.logger.debug("get network list, filter_dict: %s", filter_dict) + filter_dict = filter_dict or {} + + # Get network list: step 1: get the list of distributed port groups + session = self._get_vcenter_instance() + try: + # Get the list of available distributed switches + dvs_names = self.vcenter_config.get_dvs_names(session) + + # Get the list of distributed port groups for the distributed switches + dport_groups = self.vcnet_util.get_port_groups_by_dvs_name( + session, dvs_names + ) + # self.logger.debug("Distributed port groups: %s", dport_groups) + + network_list = [] # network list object to be returned + for port_group in dport_groups: + if filter_dict: + if ( + filter_dict.get("id") + and str(port_group.key) != filter_dict["id"] + ): + continue + + if ( + filter_dict.get("name") + and str(port_group.name) != filter_dict["name"] + ): + continue + + # Obtain vim networl data + network_list.append(self.vcnet_util.get_vim_network_from_pg(port_group)) + + self.logger.debug("Network list obtained: %s", network_list) + return network_list + finally: + self._disconnect_si(session) + + @handle_connector_exceptions + def get_network(self, net_id): + """Obtain network details from the 'net_id' VIM network + Return a dict that contains: + 'id': (mandatory) VIM network id, that is, net_id + 'name': (mandatory) VIM network name + 'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', + 'VIM_ERROR', 'OTHER' + 'error_msg': (optional) text that explains the ERROR status + other VIM specific fields: (optional) whenever possible using the same naming of + filter_dict param + Raises an exception upon error or when network is not found + """ + self.logger.debug("get network id: %s", net_id) + + session = self._get_vcenter_instance() + try: + vim_net = self.vcnet_util.get_vim_network_by_id(session, net_id) + return vim_net + finally: + self._disconnect_si(session) + + @handle_connector_exceptions + def delete_network(self, net_id, created_items=None): + """ + Removes a tenant network from VIM and its associated elements + :param net_id: VIM identifier of the network, provided by method new_network + :param created_items: dictionary with extra items to be deleted. provided by method new_network + Returns the network identifier or raises an exception upon error or when network is not found + """ + self.logger.debug( + "delete network id: %s, created_items: %s", net_id, created_items + ) + + session = self._get_vcenter_instance() + try: + # Check the network is distributed port group + if not self.vcnet_util.is_distributed_port_group(net_id): + raise vimconn.VimConnNotSupportedException( + f"Network with id: {net_id} is not a distributed port group, deleting is not supported" + ) + + # Obtain the network + net = self.vcnet_util.get_network_by_id(session, net_id) + if self.vcnet_util.is_nsx_port_group(net): + raise vimconn.VimConnNotSupportedException( + f"Network with id: {net_id} is a nsx backed network, deleting is not supported" + ) + + # Obtain connected vms + connected_vms = self.vcnet_util.get_distributed_port_connected_vms(net) + + # Disconnect vms + self.vcvms_util.disconnect_vms_from_dpg(session, net_id, connected_vms) + + # Delete the network + self.vcnet_util.delete_distributed_port_group(net) + + finally: + self._disconnect_si(session) + + def refresh_nets_status(self, net_list): + """Get the status of the networks + Params: + 'net_list': a list with the VIM network id to be get the status + Returns a dictionary with: + 'net_id': #VIM id of this network + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, INACTIVE, DOWN (admin down), + # BUILD (on building process) + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + 'net_id2': ... + """ + self.logger.debug("Refresh network list %s", net_list) + net_list = net_list or [] + net_dict = {} + + session = self._get_vcenter_instance() + try: + for net_id in net_list: + net = {} + + try: + vim_net = self.vcnet_util.get_vim_network_by_id(session, net_id) + + net["vim_info"] = self.serialize(vim_net) + net["status"] = vim_net.get("status", "ACTIVE") + # vcenter does not a status flag + + except vimconn.VimConnNotFoundException as e: + self.logger.error("Exception getting net status: %s", str(e)) + net["status"] = "DELETED" + net["error_msg"] = str(e) + except vimconn.VimConnException as e: + self.logger.error("Exception getting net status: %s", str(e)) + net["status"] = "VIM_ERROR" + net["error_msg"] = str(e) + net_dict[net_id] = net + + finally: + self._disconnect_si(session) + + self.logger.debug("Refresh net status, result: %s", net_dict) + return net_dict + + def serialize(self, value): + """Serialization of python basic types. + + In the case value is not serializable a message will be logged and a + simple representation of the data that cannot be converted back to + python is returned. + """ + if isinstance(value, str): + return value + + try: + return yaml.dump(value, default_flow_style=True, width=256) + except yaml.representer.RepresenterError: + self.logger.debug( + "The following entity cannot be serialized in YAML:\n\n%s\n\n", + str(value), + exc_info=True, + ) + + return str(value) + + def _generate_cloud_init_iso(self, cloud_config): + iso_path = None + tmp_dir = None + + if cloud_config: + self.logger.debug("Cloud config provided, generate ISO file") + _, userdata = self._create_user_data(cloud_config) + iso_path, tmp_dir = self.cloudinit_helper.generate_cloud_init_iso(userdata) + + return iso_path, tmp_dir + + def _attach_cloud_init_iso( + self, vc_vmops, new_vm, iso_path, tmp_dir, created_items + ): + """ + Attachs a previously generated cloud init iso file to a vm + """ + + if iso_path: + # Obtain vm folder name and datastore name + folder_name = new_vm.name + datastore_name = new_vm.datastore[0].info.name + file_name = new_vm.name + "-cloud-init.iso" + + # Obtain datacenter name for the datastore + datacenter_name = self.vcenter_config.get_datacenter_name(vc_vmops.session) + + # Upload iso file + self.vcenter_fileuploader.upload_file( + iso_path, datacenter_name, datastore_name, folder_name, file_name + ) + iso_filename = f"[{datastore_name}] {folder_name}/{file_name}" + + iso_filename_txt = "cloud-init-iso:" + quote(iso_filename) + created_items[iso_filename_txt] = True + + # Attach iso to vm + vc_vmops.attach_cdrom(new_vm, iso_filename) + + # Delete tmp_dir + self.cloudinit_helper.delete_tmp_dir(tmp_dir) + + @staticmethod + def _generate_short_suffix(): + # Generate a UUID and take the first 8 characters + return str(uuid.uuid4())[:8] + + def _generate_vm_name(self, vm_name): + return vm_name + "-" + self._generate_short_suffix() + + def _generate_network_name(self, network_name): + return network_name + "-" + self._generate_short_suffix() + + @staticmethod + def _format_raise_exception(exception): + """Transform a PyVmomi exception into a VimConn exception by analyzing the cause.""" + logger = logging.getLogger("ro.vim.vcenter") + message_error = str(exception) + + # Log the error before reraising + logger.error(f"Exception ocurred, message: {message_error}", exc_info=True) + + # Reraise VimConnException directly + if isinstance(exception, vimconn.VimConnException): + raise exception + else: + # General Errors + raise vimconn.VimConnException( + f"Exception: {type(exception).__name__}: {message_error}" + ) + + def _get_vcenter_instance(self): + self.logger.debug( + "Connect to vcenter, hostname: %s, port: %s, " "user: %s", + self.vcenter_hostname, + self.vcenter_port, + self.user, + ) + si = SmartConnect( + host=self.vcenter_hostname, + user=self.user, + pwd=self.passwd, + port=self.vcenter_port, + sslContext=self.ssl_context, + ) + return si + + def _disconnect_si(self, server_instance): + Disconnect(server_instance) + + def _get_vcenter_content(self, server_instance): + return server_instance.RetrieveContent() + + def _validate_int(self, value, var_name): + if not isinstance(value, int): + raise vimconn.VimConnException( + f"Variable '{var_name}' must be an int. Got value: {value} ({type(value).__name__})" + ) diff --git a/RO-VIM-vcenter/requirements.in b/RO-VIM-vcenter/requirements.in new file mode 100644 index 00000000..8f9bcbd4 --- /dev/null +++ b/RO-VIM-vcenter/requirements.in @@ -0,0 +1,21 @@ +# Copyright ETSI Contributors and Others. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +PyYAML +requests +netaddr +pyvmomi + diff --git a/RO-VIM-vcenter/setup.py b/RO-VIM-vcenter/setup.py new file mode 100644 index 00000000..6e93d886 --- /dev/null +++ b/RO-VIM-vcenter/setup.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +## +# Copyright VMware Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +from setuptools import setup + +_name = "osm_rovim_vcenter" +_version_command = ("git describe --match v* --tags --long --dirty", "pep440-git-full") +_description = "OSM ro vim plugin for vmware" +_author = "OSM Support" +_author_email = "osmsupport@etsi.org" +_maintainer = "OSM Support" +_maintainer_email = "osmsupport@etsi.org" +_license = "Apache 2.0" +_url = "https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary" + +_readme = """ +=========== +osm--rovim_vcenter +=========== + +osm-ro pluging for vmware vcenter VIM +""" + +setup( + name=_name, + description=_description, + long_description=_readme, + version_command=_version_command, + author=_author, + author_email=_author_email, + maintainer=_maintainer, + maintainer_email=_maintainer_email, + url=_url, + license=_license, + packages=[_name], + include_package_data=True, + setup_requires=["setuptools-version-command"], + entry_points={ + "osm_rovim.plugins": [ + "rovim_vcenter = osm_rovim_vcenter.vimconn_vcenter:vimconnector" + ], + }, +) diff --git a/RO-VIM-vcenter/stdeb.cfg b/RO-VIM-vcenter/stdeb.cfg new file mode 100644 index 00000000..4765eba8 --- /dev/null +++ b/RO-VIM-vcenter/stdeb.cfg @@ -0,0 +1,19 @@ +## +# Copyright VMware Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +[DEFAULT] +X-Python3-Version : >= 3.5 +Depends3: genisoimage diff --git a/devops-stages/stage-build.sh b/devops-stages/stage-build.sh index 3d570c78..55473613 100755 --- a/devops-stages/stage-build.sh +++ b/devops-stages/stage-build.sh @@ -35,6 +35,7 @@ dist_ro_vim_azure dist_ro_vim_openstack dist_ro_vim_openvim dist_ro_vim_vmware +dist_ro_vim_vcenter dist_ro_vim_gcp" TOX_ENV_LIST="$(echo $PACKAGES | sed "s/ /,/g")" diff --git a/tox.ini b/tox.ini index a3af3d5c..b75bc7f6 100644 --- a/tox.ini +++ b/tox.ini @@ -63,6 +63,7 @@ commands = black --check --diff RO-VIM-openstack black --check --diff RO-VIM-openvim black --check --diff RO-VIM-vmware + black --check --diff RO-VIM-vcenter black --check --diff RO-VIM-gcp black --check --diff integration-tests @@ -162,6 +163,7 @@ commands = flake8 RO-VIM-azure/osm_rovim_azure/ RO-VIM-azure/setup.py flake8 RO-VIM-openstack/osm_rovim_openstack/ RO-VIM-openstack/setup.py flake8 RO-VIM-openvim/osm_rovim_openvim/ RO-VIM-openvim/setup.py + flake8 RO-VIM-vcenter/osm_rovim_vcenter/ RO-VIM-vcenter/setup.py flake8 RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware.py RO-VIM-vmware/setup.py flake8 RO-VIM-gcp/osm_rovim_gcp/ RO-VIM-gcp/setup.py flake8 integration-tests/ @@ -188,6 +190,7 @@ commands = pylint -E RO-SDN-onos_vpls/osm_rosdn_onos_vpls --disable=E1101 pylint -E RO-SDN-tapi/osm_rosdn_tapi pylint -E RO-VIM-aws/osm_rovim_aws + pylint -E RO-VIM-vcenter/osm_rovim_vcenter - pylint -E RO-VIM-azure/osm_rovim_azure --disable=all pylint -E RO-VIM-openstack/osm_rovim_openstack --disable=E1101 - pylint -E RO-VIM-openvim/osm_rovim_openvim --disable=all @@ -445,6 +448,18 @@ commands = python3 setup.py --command-packages=stdeb.command sdist_dsc sh -c 'cd deb_dist/osm-rovim-vmware*/ && dpkg-buildpackage -rfakeroot -uc -us' +####################################################################################### +[testenv:dist_ro_vim_vcenter] +deps = {[testenv]deps} + -r{toxinidir}/requirements-dist.txt +skip_install = true +allowlist_externals = sh +changedir = {toxinidir}/RO-VIM-vcenter +commands = + sh -c 'rm -rf deb_dist dist osm_rovim_vcenter.egg-info osm_rovim_vcenter*.tar.gz' + python3 setup.py --command-packages=stdeb.command sdist_dsc + sh -c 'cd deb_dist/osm-rovim-vcenter*/ && dpkg-buildpackage -rfakeroot -uc -us' + ####################################################################################### [testenv:dist_ro_vim_gcp] -- 2.25.1