Feature 11051: VIM plugin for VMware vCenter 05/15005/7
authorlloretgalleg <illoret@indra.es>
Tue, 4 Mar 2025 11:37:14 +0000 (11:37 +0000)
committerelumalai <deepika.e@tataelxsi.co.in>
Wed, 18 Jun 2025 13:12:54 +0000 (15:12 +0200)
Change-Id: If63f0625725da9141585bef675224258bbd047cc
Signed-off-by: lloretgalleg <illoret@indra.es>
15 files changed:
Dockerfile.local
RO-VIM-vcenter/osm_rovim_vcenter/__init__.py [new file with mode: 0644]
RO-VIM-vcenter/osm_rovim_vcenter/nsx_client.py [new file with mode: 0644]
RO-VIM-vcenter/osm_rovim_vcenter/vcenter_config.py [new file with mode: 0644]
RO-VIM-vcenter/osm_rovim_vcenter/vcenter_ipmanager.py [new file with mode: 0644]
RO-VIM-vcenter/osm_rovim_vcenter/vcenter_network.py [new file with mode: 0644]
RO-VIM-vcenter/osm_rovim_vcenter/vcenter_util.py [new file with mode: 0644]
RO-VIM-vcenter/osm_rovim_vcenter/vcenter_vms.py [new file with mode: 0644]
RO-VIM-vcenter/osm_rovim_vcenter/vim_helper.py [new file with mode: 0644]
RO-VIM-vcenter/osm_rovim_vcenter/vimconn_vcenter.py [new file with mode: 0644]
RO-VIM-vcenter/requirements.in [new file with mode: 0644]
RO-VIM-vcenter/setup.py [new file with mode: 0644]
RO-VIM-vcenter/stdeb.cfg [new file with mode: 0644]
devops-stages/stage-build.sh
tox.ini

index 1aea781..b3ebca3 100644 (file)
@@ -21,8 +21,11 @@ WORKDIR /build
 RUN DEBIAN_FRONTEND=noninteractive apt-get --yes update && \
     DEBIAN_FRONTEND=noninteractive apt-get --yes install \
     gcc python3 python3-dev python3-venv python3-pip \
-    python3-setuptools curl git genisoimage netbase && \
-    python3 -m pip install -U pip build
+    python3-setuptools curl git genisoimage netbase libmagic1 file && \
+    apt-get clean && rm -rf /var/lib/apt/lists/*
+
+# Upgrade pip and install build tools
+RUN python3 -m pip install -U pip build
 
 COPY . /build
 
@@ -46,6 +49,9 @@ RUN python3 -m build /build/RO-VIM-vmware && \
 RUN python3 -m build /build/RO-VIM-openstack && \
     python3 -m pip install /build/RO-VIM-openstack/dist/*.whl
 
+RUN python3 -m build /build/RO-VIM-vcenter && \
+     python3 -m pip install /build/RO-VIM-vcenter/dist/*.whl
+
 RUN python3 -m build /build/RO-VIM-openvim && \
     python3 -m pip install /build/RO-VIM-openvim/dist/*.whl
 
@@ -95,6 +101,11 @@ COPY --from=INSTALL /usr/local/lib/python3.10/dist-packages  /usr/local/lib/pyth
 COPY --from=INSTALL /usr/bin/genisoimage /usr/bin/genisoimage
 COPY --from=INSTALL /etc/protocols /etc/protocols
 
+# Copy libmagic.so.1 and dependencies
+COPY --from=INSTALL /usr/lib/x86_64-linux-gnu/libmagic.so.1 /usr/lib/x86_64-linux-gnu/
+COPY --from=INSTALL /usr/lib/x86_64-linux-gnu/libmagic.so.1.* /usr/lib/x86_64-linux-gnu/
+COPY --from=INSTALL /usr/share/misc/magic.mgc /usr/share/misc/magic.mgc
+
 VOLUME /var/log/osm
 
 EXPOSE 9090
diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/__init__.py b/RO-VIM-vcenter/osm_rovim_vcenter/__init__.py
new file mode 100644 (file)
index 0000000..676bd0e
--- /dev/null
@@ -0,0 +1,15 @@
+# -*- coding: utf-8 -*-\r
+# Copyright {2025} Indra\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#    http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\r
+# implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/nsx_client.py b/RO-VIM-vcenter/osm_rovim_vcenter/nsx_client.py
new file mode 100644 (file)
index 0000000..364435e
--- /dev/null
@@ -0,0 +1,128 @@
+# -*- coding: utf-8 -*-\r
+# Copyright 2025 Indra\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#    http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\r
+# implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+"""\r
+Utility class to deal with NSX in vcenter\r
+"""\r
+import logging\r
+import os\r
+\r
+from osm_ro_plugin import vimconn\r
+import requests\r
+from requests.auth import HTTPBasicAuth\r
+\r
+\r
+class NsxClient:\r
+    """\r
+    Class that handles interactions with vcenter NSX\r
+    """\r
+\r
+    NSX_POLICY_V1_API_PREFIX = "/policy/api/v1"\r
+\r
+    def __init__(\r
+        self, nsx_manager_url, user, password, verify_ssl=False, log_level=None\r
+    ):\r
+        self.nsx_manager_url = nsx_manager_url\r
+        self.user = user\r
+        self.password = password\r
+        self.verify_ssl = verify_ssl\r
+\r
+        self.logger = logging.getLogger("ro.vim.vcenter.vms")\r
+        if log_level:\r
+            self.logger.setLevel(getattr(logging, log_level))\r
+\r
+        self.logger.info("verify_ssl: %s", self.verify_ssl)\r
+        if not self.verify_ssl:\r
+            self.logger.info("Insecure access to nsx is configured")\r
+\r
+    def get_nsx_segment_dhcp_config(self, segment_path):\r
+        """\r
+        Obtain nsx subnet config from segment path\r
+        """\r
+        self.logger.debug("Obtain nsx segment dhcp configuration: %s", segment_path)\r
+        url = f"{self.nsx_manager_url}{self.NSX_POLICY_V1_API_PREFIX}{segment_path}"\r
+        response_json = self._process_http_get_request(url)\r
+        subnets = response_json.get("subnets")\r
+        self.logger.debug("Subnets recovered: %s", subnets)\r
+        return subnets\r
+\r
+    def _process_http_get_request(self, get_request_url):\r
+        headers = {\r
+            "Accept": "application/json",\r
+            "Content-Type": "application/json",\r
+        }\r
+\r
+        auth = self._get_auth()\r
+        if isinstance(auth, dict):  # Token-based or API-key authentication\r
+            headers.update(auth)\r
+\r
+        response = requests.get(\r
+            get_request_url,\r
+            headers=headers,\r
+            auth=auth if not isinstance(auth, dict) else None,\r
+            verify=self.verify_ssl,\r
+        )\r
+        try:\r
+            if not response.ok:\r
+                raise vimconn.VimConnException(\r
+                    f"Error nsx get request, text: {response.text}",\r
+                    http_code=response.status_code,\r
+                )\r
+            else:\r
+                return response.json()\r
+        except requests.RequestException as e:\r
+            self.logger.error(f"Error nsx get request, url: {get_request_url}", e)\r
+            raise vimconn.VimConnException(\r
+                f"Error nsx get request, url: {get_request_url}, error: {str(e)}"\r
+            )\r
+\r
+    def _get_auth(self):\r
+        # Obtain authentication, by the moment it will be basic authentication,\r
+        # it could be modified to support other authentication methods\r
+        return HTTPBasicAuth(self.user, self.password)\r
+\r
+\r
+if __name__ == "__main__":\r
+    # Init logger\r
+    log_format = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(funcName)s(): %(message)s"\r
+    logging.basicConfig(\r
+        level=logging.DEBUG,  # Set the logging level\r
+        format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",  # Set the log message format\r
+        datefmt="%Y-%m-%dT%H:%M:%S",\r
+        handlers=[\r
+            logging.StreamHandler(),  # Log to the console\r
+        ],\r
+    )\r
+    logger = logging.getLogger("ro.vim.vmware.test_nsx")\r
+    logger.setLevel(level=logging.DEBUG)\r
+\r
+    test_nsx_url = os.getenv("NSX_URL")\r
+    test_nsx_user = os.getenv("NSX_USER")\r
+    test_nsx_password = os.getenv("NSX_PASSWORD")\r
+    if os.getenv("NSX_CACERT"):\r
+        test_verify_ssl = os.getenv("NSX_CACERT")\r
+    else:\r
+        test_verify_ssl = False\r
+\r
+    logger.debug("Create nsx client")\r
+    nsx_client = NsxClient(\r
+        test_nsx_url,\r
+        test_nsx_user,\r
+        test_nsx_password,\r
+        verify_ssl=test_verify_ssl,\r
+        log_level="DEBUG",\r
+    )\r
+    test_segment_path = "/infra/segments/b5a27856-e7ef-49ab-a09e-e4d3416db3d2"\r
+    nsx_client.get_nsx_segment_dhcp_config(test_segment_path)\r
diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_config.py b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_config.py
new file mode 100644 (file)
index 0000000..76bab58
--- /dev/null
@@ -0,0 +1,279 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Indra
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Utility class to get configuration information in vcenter
+It should be used to get information about datacenters, datastore,
+clusters and other configuration objects in vcenter
+"""
+import logging
+
+from osm_ro_plugin import vimconn
+from osm_rovim_vcenter import vcenter_util as vcutil
+from pyVmomi import vim
+
+
+DEFAULT_BASE_FOLDER_NAME = "OSM"
+DEFAULT_IMAGES_FOLDER_NAME = "OSM-Images"
+DEFAULT_INSTANCES_FOLDER_NAME = "OSM-Instances"
+
+
+class VCenterConfig:
+    """
+    Class used to handle vcenter configuration, used to recover
+    basic objects information: cluster, datastore, etc
+    """
+
+    def __init__(
+        self,
+        availability_zones,
+        tenant_id,
+        tenant_name,
+        distributed_switches_names=None,
+        datastore_name=None,
+        log_level=None,
+    ):
+
+        if isinstance(availability_zones, str):
+            self.availability_zones_names = [availability_zones]
+        else:
+            self.availability_zones_names = availability_zones
+
+        self.distributed_switches_names = None
+        if isinstance(availability_zones, str):
+            self.distributed_switches_names = [distributed_switches_names]
+        else:
+            self.distributed_switches_names = distributed_switches_names
+
+        self.datastore_name = datastore_name
+        self.tenant_id = tenant_id
+        self.tenant_name = tenant_name
+        self.datacenter_name = None
+
+        # Initialize vim availability zones to None, it will be set the first time it
+        # is recovered
+        self.vim_availability_zones = None
+
+        # Configuration of folders
+        self.base_folder_name = DEFAULT_BASE_FOLDER_NAME
+        self.images_folder_name = DEFAULT_IMAGES_FOLDER_NAME
+        self.instances_folder_name = DEFAULT_INSTANCES_FOLDER_NAME
+
+        self.logger = logging.getLogger("ro.vim.vcenter.config")
+        if log_level:
+            self.logger.setLevel(getattr(logging, log_level))
+
+    def get_dvs_names(self, session):
+        """
+        Obtains distributed switches names, in case it is configured just returns the list
+        If distributed switches names is not configured then it recovers distributed switches
+        names from the distributed switches available for the cluster list
+        """
+        dvs_names = self.distributed_switches_names
+        if not dvs_names:
+            self.logger.debug(
+                "Recover distributed switches names from cluster configuration"
+            )
+            self.logger.warning(
+                "Method to get distributed switches names from cluster not "
+                "implemented"
+            )
+            dvs_names = []
+        return dvs_names
+
+    def get_images_folder(self, session):
+        """
+        Obtain OSM images folder
+        """
+        # Improvement: - take into account the tenant_id
+        base_folder = vcutil.get_vcenter_folder(session, self.base_folder_name)
+        if not base_folder:
+            raise vimconn.VimConnNotFoundException(
+                "base folder for current tenant not found"
+            )
+
+        # Get images folder (inside the osm base folder)
+        images_folder = vcutil.get_vcenter_folder(
+            session, self.images_folder_name, base_folder
+        )
+        if not images_folder:
+            raise vimconn.VimConnNotFoundException(
+                "images folder for current tenant not found"
+            )
+
+        return images_folder
+
+    def get_instances_folder(self, session):
+        """
+        Obtain OSM instances folder
+        """
+        osm_base_folder = vcutil.get_vcenter_folder(session, self.base_folder_name)
+        if not osm_base_folder:
+            raise vimconn.VimConnNotFoundException(
+                f"base folder name {osm_base_folder} for current tenant not found"
+            )
+
+        # Get instances folder (inside the osm base folder)
+        base_vms_folder = self._get_or_create_child_folder(
+            osm_base_folder, self.instances_folder_name
+        )
+
+        # For each tenant there will be a subfolder
+        instances_folder = self._get_or_create_child_folder(
+            base_vms_folder, self.tenant_name
+        )
+
+        return instances_folder
+
+    def _get_or_create_child_folder(self, vm_base_folder, child_folder_name):
+
+        # Check if the folder already exists
+        child_folder = None
+        for child in vm_base_folder.childEntity:
+            if isinstance(child, vim.Folder) and child.name == child_folder_name:
+                child_folder = child
+                break
+
+        if not child_folder:
+            # Create a new folder
+            child_folder = vm_base_folder.CreateFolder(child_folder_name)
+            self.logger.debug("Folder '%s' created successfully", child_folder)
+
+        return child_folder
+
+    def get_datastore(self, session):
+        """
+        Get the datastore from the configuration if one datastore is configured, otherwise get
+        from the image
+        """
+        datastore = None
+
+        datastore = vcutil.get_vcenter_obj(
+            session, [vim.Datastore], self.datastore_name
+        )
+        if not datastore:
+            raise vimconn.VimConnNotFoundException(
+                f"Datastore with name: {self.datastore_name} not found"
+            )
+
+        return datastore
+
+    def get_datacenter_name(self, session):
+        """
+        Obtains the datacenter name, this data is cached
+        """
+        if not self.datacenter_name:
+            self.datacenter_name = self._get_datacenter_from_datastore(session)
+        return self.datacenter_name
+
+    def _get_datacenter_from_datastore(self, session):
+        datacenter_name = None
+
+        # Create a view of all datastores
+        content = session.RetrieveContent()
+        container = content.viewManager.CreateContainerView(
+            content.rootFolder, [vim.Datastore], True
+        )
+        datastores = container.view
+
+        for datastore in datastores:
+            if datastore.name == self.datastore_name:
+                # Traverse up the hierarchy to find the datacenter
+                parent = datastore.parent
+                while parent and not isinstance(parent, vim.Datacenter):
+                    parent = parent.parent
+                if isinstance(parent, vim.Datacenter):
+                    datacenter_name = parent.name
+                    break  # Return the datacenter name and exit the loop
+        container.Destroy()
+
+        # Raise exception if no datacenter was found
+        if datacenter_name is None:
+            raise vimconn.VimConnException("Unable to find datacenter")
+        return datacenter_name
+
+    def get_cluster_rp_from_av_zone(
+        self, session, availability_zone_index, availability_zone_list
+    ):
+        """
+        Gets the resource pool and cluster corresponding to the indicated avzone
+        """
+
+        # get the availability zone from configuration
+        avzone_name = self.availability_zones_names[0]
+        return self._get_resource_pool_cluster_from_av_zone(session, avzone_name)
+
+    def _get_resource_pool_cluster_from_av_zone(self, session, avzone_name):
+        self.logger.debug("Search availability_zone name: %s", avzone_name)
+        # We have an availability zone that can correspond to a resource pool or to a cluster
+        # If it is a resource pool will find a cluster associated
+        # If it is a cluster will get the first resource pool associated
+
+        # Check if there is a resource group with this name
+        resource_pool = self._get_resource_pool(session, avzone_name)
+
+        if resource_pool:
+            cluster = self._get_cluster_from_resource_pool(session, resource_pool)
+            if not cluster:
+                raise vimconn.VimConnNotFoundException(
+                    "unable to find cluster for resource pool"
+                    f"name : {resource_pool.name}"
+                )
+        else:
+            # Check if there is a cluster with this name
+            cluster = self._get_vcenter_cluster(session, avzone_name)
+            if not cluster:
+                raise vimconn.VimConnNotFoundException(
+                    f"Unable to find either cluster or resource pool with name {avzone_name}"
+                )
+
+            # Obtain resource pool for cluster
+            resource_pool = cluster.resourcePool
+
+        self.logger.debug(
+            "Recovered cluster name: %s and resource_pool: %s",
+            cluster.name,
+            resource_pool.name,
+        )
+        return cluster, resource_pool
+
+    def _get_cluster_from_resource_pool(self, server_instance, resource_pool):
+        cluster = None
+
+        parent = resource_pool.parent
+        while parent:
+            if isinstance(parent, vim.ClusterComputeResource):
+                cluster = parent
+                self.logger.debug(
+                    "Recovered cluster name: %s for resouce pool: %s",
+                    cluster.name,
+                    resource_pool.name,
+                )
+                break
+            elif isinstance(parent, vim.ClusterComputeResource):
+                self.logger.warning("Parent is a host not a cluster")
+                cluster = parent
+            else:
+                parent = parent.parent
+
+        return cluster
+
+    def _get_resource_pool(self, session, resource_pool_name):
+        return vcutil.get_vcenter_obj(session, [vim.ResourcePool], resource_pool_name)
+
+    def _get_vcenter_cluster(self, server_instance, cluster_name):
+        return vcutil.get_vcenter_obj(
+            server_instance, [vim.ClusterComputeResource], cluster_name
+        )
diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_ipmanager.py b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_ipmanager.py
new file mode 100644 (file)
index 0000000..b322107
--- /dev/null
@@ -0,0 +1,365 @@
+# -*- coding: utf-8 -*-\r
+# Copyright 2025 Indra\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#    http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\r
+# implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+"""\r
+Utility class to get and the the information about ip address\r
+"""\r
+import ipaddress\r
+import logging\r
+import os\r
+import re\r
+import ssl\r
+\r
+from osm_ro_plugin import vimconn\r
+from osm_rovim_vcenter import vcenter_util as vcutil\r
+from osm_rovim_vcenter.nsx_client import NsxClient\r
+from osm_rovim_vcenter.vcenter_config import VCenterConfig\r
+from osm_rovim_vcenter.vcenter_network import VCenterNetworkUtil\r
+from osm_rovim_vcenter.vcenter_vms import VCenterVmsUtil\r
+from pyVim.connect import Disconnect, SmartConnect\r
+from pyVmomi import vim\r
+\r
+\r
+class VCenterIpManager:\r
+    """\r
+    Helper class to deal with setting and recovering fixed ip addresses\r
+    """\r
+\r
+    def __init__(\r
+        self,\r
+        vc_netutil: VCenterNetworkUtil,\r
+        nsx_url=None,\r
+        nsx_user=None,\r
+        nsx_password=None,\r
+        nsx_verify_ssl=False,\r
+        log_level=None,\r
+        dhcp_configure_always=False,\r
+    ):\r
+        self.logger = logging.getLogger("ro.vim.vcenter.network")\r
+        if log_level:\r
+            self.logger.setLevel(getattr(logging, log_level))\r
+\r
+        self.vc_netutil = vc_netutil\r
+        self.dhcp_configure_always = dhcp_configure_always\r
+\r
+        self.nsx_url = nsx_url\r
+        self.nsx_user = nsx_user\r
+        self.nsx_password = nsx_password\r
+        self.nsx_verify_ssl = nsx_verify_ssl\r
+\r
+        self.nsx_client = None\r
+        self.logger.debug(\r
+            "Nsx url: %s, nsx_user: %s, nsx_password: %s",\r
+            self.nsx_url,\r
+            self.nsx_url,\r
+            self.nsx_password,\r
+        )\r
+        if self.nsx_url and self.nsx_user and self.nsx_password:\r
+            self.logger.debug("Configure nsx client")\r
+            self.nsx_client = NsxClient(\r
+                nsx_url,\r
+                nsx_user,\r
+                nsx_password,\r
+                verify_ssl=self.nsx_verify_ssl,\r
+                log_level=log_level,\r
+            )\r
+\r
+        self.logger = logging.getLogger("ro.vim.vcenter.network")\r
+        if log_level:\r
+            self.logger.setLevel(getattr(logging, log_level))\r
+\r
+    def get_vm_ips(self, session, vm):\r
+        """\r
+        Obtain using vmware tool the ips for the provided vm\r
+        """\r
+        self.logger.debug("Obtain vm fixed ips configuration for vm name: %s", vm.name)\r
+\r
+        mac_ips_dict = {}\r
+\r
+        if vm.guest.toolsRunningStatus != "guestToolsRunning":\r
+            self.logger.warning(\r
+                "Unable to get vm ips for vm name: '%s' as vm tools is not running",\r
+                vm.name,\r
+            )\r
+        else:\r
+            if vm.guest.net:\r
+                for nic in vm.guest.net:\r
+                    if nic.macAddress and nic.ipAddress:\r
+                        mac_ips_dict[nic.macAddress] = nic.ipAddress\r
+        return mac_ips_dict\r
+\r
+    def set_vm_ips(self, session, vm_name, vm, net_list):\r
+        """\r
+        Set the vm fixed ip address using vmware tools, the subnet information (gateway, network\r
+        mask, dns, etc...) is obtained querying the NSX\r
+        """\r
+        self.logger.debug(\r
+            "Set ip address for vm name: %s, net_list: %s", vm.name, net_list\r
+        )\r
+\r
+        # 1 - Check data, check if need to set some fixed ip address\r
+        # Obtain interfaces with ip_addresses to set\r
+        nets_fixed_ip = {\r
+            net["net_id"]: net for net in net_list if net.get("ip_address")\r
+        }\r
+        if nets_fixed_ip:\r
+            # Must set some fixed ip, check nsx configuration is provided\r
+\r
+            # Check nsx client is configured, only nsx networks are supported:\r
+            # it is needed to obtain subnet parameters and\r
+            # only obtaining them by nsx is supported\r
+            if not self.nsx_client:\r
+                raise vimconn.VimConnException(\r
+                    "Manual ip assigment can not be done as nsx configuration is not provided"\r
+                )\r
+        else:\r
+            # There are not fixed ips to set, if configure to set dhcp configuration do it\r
+            # otherwise return\r
+            if not self.dhcp_configure_always:\r
+                self.logger.debug(\r
+                    "There are not ip fixed address to configure and "\r
+                    "dhcp_configure_always:%s",\r
+                    self.dhcp_configure_always,\r
+                )\r
+                return\r
+\r
+        # 2 - Check vmware tools are installed\r
+        if vm.guest.toolsStatus in ["toolsNotInstalled", None]:\r
+            raise vimconn.VimConnException(\r
+                "VMware Tools is not installed or not detected. To assign fixed ip it is required."\r
+            )\r
+\r
+        # 3 - Iterate network interfaces and configure ip assignment for each interface\r
+        custom_spec = vim.vm.customization.Specification()\r
+        custom_spec.nicSettingMap = []\r
+\r
+        subnet_params_dict = {}\r
+        dns_servers = None\r
+\r
+        for device in vm.config.hardware.device:\r
+            if isinstance(device, vim.vm.device.VirtualEthernetCard):\r
+                net = self._get_net_with_mac(net_list, device.macAddress)\r
+\r
+                if net.get("ip_address"):\r
+                    subnets = self._get_subnets_for_net_id(\r
+                        session, subnet_params_dict, net.get("net_id")\r
+                    )\r
+                    self.logger.debug("Subnets info obtained for net_id: %s", subnets)\r
+\r
+                    # Update ip addresses\r
+                    fixed_ip_dict = self._prepare_fixed_ip_dics(\r
+                        net.get("ip_address"), subnets\r
+                    )\r
+                    if not dns_servers:\r
+                        dns_servers = fixed_ip_dict.get("dns_servers")\r
+                    self.logger.debug("Fixed ip dict: %s", fixed_ip_dict)\r
+\r
+                    self._update_nic_fixedip_address_spec(\r
+                        custom_spec, net.get("mac_address"), fixed_ip_dict\r
+                    )\r
+\r
+                else:\r
+                    self._update_nic_dhcp_spec(custom_spec, device.macAddress)\r
+\r
+        # Update vm configuration\r
+        self._customize_ip_address(vm_name, vm, custom_spec, dns_servers)\r
+\r
+    @staticmethod\r
+    def _get_net_with_mac(net_list, mac_address):\r
+        net = None\r
+        for net in net_list:\r
+            if net.get("mac_address") == mac_address:\r
+                return net\r
+        if not net:\r
+            raise vimconn.VimConnException(\r
+                f"Unable to find net with previously asigned mac address: {mac_address}"\r
+            )\r
+\r
+    def _get_subnets_for_net_id(self, session, subnets_params_dic, net_id):\r
+        """\r
+        Obtains subnet network parameters\r
+        """\r
+        subnets = subnets_params_dic.get(net_id)\r
+\r
+        if not subnets:\r
+            # Obtain network using network id\r
+            self.logger.debug("Obtain network with net_id: %s", net_id)\r
+            network = self.vc_netutil.get_network_by_id(session, net_id)\r
+            self.logger.debug("Network revovered: %s", network)\r
+\r
+            # Network recovered, do not have to check types because only distributed port groups\r
+            # are supported so far\r
+            if network.config.backingType == "nsx":\r
+                # Obtain subnet parameters for network\r
+                segment_path = network.config.segmentId\r
+                self.logger.debug(\r
+                    "Obtain subnet parameters for nsx segment path: %s", segment_path\r
+                )\r
+                subnets = self.nsx_client.get_nsx_segment_dhcp_config(segment_path)\r
+                subnets_params_dic[net_id] = subnets\r
+            else:\r
+                raise vimconn.VimConnException(\r
+                    f"Network with id: {net_id} is not a backed nsx "\r
+                    "network and assigning fixed ip address is not supported"\r
+                )\r
+\r
+        return subnets\r
+\r
+    def _prepare_fixed_ip_dics(self, ip_address, subnets):\r
+        # Improvement - check if it should be done something else of more that one subnet is\r
+        # supported for one segment\r
+        fixed_ip_dict = {"ip_address": ip_address}\r
+        subnet = subnets[0]\r
+        gateway = str(ipaddress.IPv4Interface(subnet.get("gateway_address")).ip)\r
+        subnet_mask = str(\r
+            ipaddress.IPv4Network(subnet.get("network"), strict=False).netmask\r
+        )\r
+        fixed_ip_dict["gateway"] = gateway\r
+        fixed_ip_dict["subnet_mask"] = subnet_mask\r
+\r
+        dns_servers = subnet.get("dhcp_config", {}).get("dns_servers", [])\r
+        fixed_ip_dict["dns_servers"] = dns_servers\r
+        return fixed_ip_dict\r
+\r
+    def _update_nic_fixedip_address_spec(self, custom_spec, mac_address, fixed_ip_dics):\r
+\r
+        # Create a Fixed IP object\r
+        fixed_ip = vim.vm.customization.FixedIp(\r
+            ipAddress=fixed_ip_dics.get("ip_address")\r
+        )\r
+\r
+        adapter_mapping = vim.vm.customization.AdapterMapping()\r
+        adapter_mapping.adapter = vim.vm.customization.IPSettings(\r
+            ip=fixed_ip,\r
+            subnetMask=fixed_ip_dics.get("subnet_mask"),\r
+            gateway=fixed_ip_dics.get("gateway"),\r
+        )\r
+        adapter_mapping.macAddress = mac_address\r
+        custom_spec.nicSettingMap.append(adapter_mapping)\r
+\r
+    def _update_nic_dhcp_spec(self, custom_spec, mac_address):\r
+        adapter_mapping = vim.vm.customization.AdapterMapping()\r
+        adapter_mapping.adapter = vim.vm.customization.IPSettings(\r
+            ip=vim.vm.customization.DhcpIpGenerator()\r
+        )\r
+        adapter_mapping.macAddress = mac_address\r
+        custom_spec.nicSettingMap.append(adapter_mapping)\r
+\r
+    def _customize_ip_address(self, vm_name, vm, custom_spec, dns_servers):\r
+        # Check the vm name\r
+        name = self._sanitize_vm_name(vm_name)\r
+\r
+        # Optionally configure the hostname\r
+        identity = vim.vm.customization.LinuxPrep(\r
+            domain="domain.local", hostName=vim.vm.customization.FixedName(name=name)\r
+        )\r
+        custom_spec.identity = identity\r
+\r
+        global_ip_settings = vim.vm.customization.GlobalIPSettings()\r
+        if dns_servers:\r
+            global_ip_settings = vim.vm.customization.GlobalIPSettings(\r
+                dnsServerList=dns_servers\r
+            )\r
+        custom_spec.globalIPSettings = global_ip_settings\r
+\r
+        customize_task = vm.CustomizeVM_Task(spec=custom_spec)\r
+        vcutil.wait_for_task(customize_task)\r
+        self.logger.debug("VM spec updated")\r
+\r
+    def _sanitize_vm_name(self, vm_name):\r
+        corrected_vm_name = vm_name.replace("_", "-")[:63]\r
+        if not re.match(r"^[a-zA-Z0-9-]+$", corrected_vm_name):\r
+            raise vimconn.VimConnException(f"Invalid hostname: {corrected_vm_name}")\r
+        return corrected_vm_name\r
+\r
+\r
+if __name__ == "__main__":\r
+    # Init logger\r
+    log_format = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(funcName)s(): %(message)s"\r
+    logging.basicConfig(\r
+        level=logging.DEBUG,  # Set the logging level\r
+        format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",  # Set the log message format\r
+        datefmt="%Y-%m-%dT%H:%M:%S",\r
+        handlers=[\r
+            logging.StreamHandler(),  # Log to the console\r
+        ],\r
+    )\r
+    logger = logging.getLogger("ro.vim.vmware.test_nsx")\r
+    logger.setLevel(level=logging.DEBUG)\r
+\r
+    test_nsx_url = os.getenv("NSX_URL")\r
+    test_nsx_user = os.getenv("NSX_USER")\r
+    test_nsx_password = os.getenv("NSX_PASSWORD")\r
+\r
+    vcnet_util = VCenterNetworkUtil(log_level="DEBUG")\r
+    vc_ipmanager = VCenterIpManager(\r
+        vc_netutil=vcnet_util,\r
+        nsx_url=test_nsx_url,\r
+        nsx_user=test_nsx_user,\r
+        nsx_password=test_nsx_password,\r
+        log_level="DEBUG",\r
+    )\r
+\r
+    vcenter_cluster = os.getenv("TEST_CLUSTER_NAME")\r
+    VCENTER_TENANT_ID = "default"\r
+    VCENTER_TENANT_NAME = "default"\r
+    vc_config = VCenterConfig(\r
+        availability_zones=vcenter_cluster,\r
+        tenant_id=VCENTER_TENANT_ID,\r
+        tenant_name=VCENTER_TENANT_NAME,\r
+        log_level="DEBUG",\r
+    )\r
+\r
+    vcenter_cert_path = os.getenv("VCENTER_CERT_PATH")\r
+    vcenter_host = os.getenv("VCENTER_SERVER")\r
+    vcenter_user = os.getenv("VCENTER_USER")\r
+    vcenter_password = os.getenv("VCENTER_PASSWORD")\r
+    ssl_context = ssl.create_default_context(cafile=vcenter_cert_path)\r
+    test_session = SmartConnect(\r
+        host=vcenter_host,\r
+        user=vcenter_user,\r
+        pwd=vcenter_password,\r
+        port=443,\r
+        sslContext=ssl_context,\r
+    )\r
+    logger.debug("Connected to vcenter")\r
+\r
+    try:\r
+        # Obtain a vm\r
+        vc_vmsutil = VCenterVmsUtil(vcenter_config=vc_config, log_level="DEBUG")\r
+\r
+        # Test set ips\r
+        """\r
+        #vm = vc_vmsutil.get_vm_by_uuid(session, "5035b827-e3c4-1ca4-b689-9fadb1cc78d7")\r
+        vm = vc_vmsutil.get_vm_by_uuid(session, "5035f893-c302-08e3-8465-345165aaf921")\r
+        logger.debug("Vm recovered")\r
+        net_list = [\r
+            {'name': 'eth0', 'net_id': 'vim.dvs.DistributedVirtualPortgroup:dvportgroup-44614', \r
+                'type': 'SRIOV', 'use': 'data'},\r
+            {'name': 'eth1', 'net_id': 'vim.dvs.DistributedVirtualPortgroup:dvportgroup-47674', \r
+                'type': 'virtual', 'use': 'data', 'ip_address': '192.168.228.23'}\r
+        ]\r
+        vc_ipmanager.set_vm_ips(session, vm, net_list)\r
+        """\r
+\r
+        # Test get ips\r
+        test_vm = vc_vmsutil.get_vm_by_uuid(\r
+            test_session, "50359c0a-41ee-9afc-d21b-e398b8ac1d64"\r
+        )\r
+        mac_ips = vc_ipmanager.get_vm_ips(test_session, test_vm)\r
+        logger.debug("Ip address for vm mac address: %s", mac_ips)\r
+    finally:\r
+        Disconnect(test_session)\r
+        logger.debug("Disconnected to vcenter")\r
diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_network.py b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_network.py
new file mode 100644 (file)
index 0000000..7a256bd
--- /dev/null
@@ -0,0 +1,281 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Indra
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Utility class to get networks information in vcenter
+"""
+import logging
+
+from osm_ro_plugin import vimconn
+from osm_rovim_vcenter import vcenter_util as vcutil
+from pyVmomi import vim
+
+DISTRIBUTED_PORTGROUP_KEY_PREFIX = "vim.dvs.DistributedVirtualPortgroup:"
+
+
+class VCenterNetworkUtil:
+    """
+    Helper class to deal with vcenter networks
+    """
+
+    def __init__(self, log_level=None):
+        self.logger = logging.getLogger("ro.vim.vcenter.network")
+        if log_level:
+            self.logger.setLevel(getattr(logging, log_level))
+
+    def get_dvs_list(self, session, dvs_names):
+        """
+        Obtains distributed switches with the provided distributed switches names
+        """
+        self.logger.debug("Get dvs for dvs_names: %s", dvs_names)
+        dvs = []
+        content = vcutil.get_vcenter_content(session)
+        container = content.viewManager.CreateContainerView(
+            content.rootFolder, [vim.DistributedVirtualSwitch], True
+        )
+        for dswitch in container.view:
+            if dswitch.name in dvs_names:
+                dvs.append(dswitch)
+        return dvs
+
+    def get_port_groups_by_dvs_name(self, session, dvs_names):
+        """
+        Obtains distributed port groups for the indicated distributed switches
+        """
+        self.logger.debug("Get port groups for dvs_names: %s", dvs_names)
+        dport_groups = []
+        content = vcutil.get_vcenter_content(session)
+        container = content.viewManager.CreateContainerView(
+            content.rootFolder, [vim.DistributedVirtualSwitch], True
+        )
+        for dswitch in container.view:
+            if dswitch.name in dvs_names:
+                for portgroup in dswitch.portgroup:
+                    dport_groups.append(portgroup)
+        return dport_groups
+
+    def find_port_group_by_name_dvs(self, session, dvs, port_group_name):
+        """
+        Obtains the distributed port group with the provided name searching in the distributed
+        virtual switch dvs
+        """
+        port_group = None
+
+        for pg in dvs.portgroup:
+            if pg.name == port_group_name:
+                port_group = pg
+
+        if not port_group:
+            raise vimconn.VimConnNotFoundException(
+                f"Distributed port group with name: {port_group_name} not found"
+            )
+
+        return port_group
+
+    def get_network_by_id(self, session, net_id):
+        """
+        Obtains a pyvmomi network instance object by id
+        Currently only obtains distributed port group
+        """
+        if net_id.startswith(DISTRIBUTED_PORTGROUP_KEY_PREFIX):
+            pg_key = net_id.removeprefix(DISTRIBUTED_PORTGROUP_KEY_PREFIX)
+            pg = self._get_portgroup_by_key(session, pg_key)
+            return pg
+        else:
+            self.logger.error(
+                "Network: %s is not a distributed port group, currently not supported",
+                net_id,
+            )
+            raise vimconn.VimConnNotFoundException(
+                f"Network: {net_id} is not a distributed port group, currently not supported"
+            )
+
+    def get_vim_network_by_id(self, session, net_id):
+        """
+        Obtains a vim network from vim_id
+        """
+        if net_id.startswith(DISTRIBUTED_PORTGROUP_KEY_PREFIX):
+            pg_key = net_id.removeprefix(DISTRIBUTED_PORTGROUP_KEY_PREFIX)
+            pg = self._get_portgroup_by_key(session, pg_key)
+            return self.get_vim_network_from_pg(pg)
+        else:
+            self.logger.error(
+                "Network: %s is not a distributed port group, currently not supported",
+                net_id,
+            )
+            raise vimconn.VimConnNotFoundException(
+                f"Network: {net_id} is not a distributed port group, currently not supported"
+            )
+
+    def _get_portgroup_by_key(self, session, key):
+        """
+        Obtains a distributed port group with the indicated key
+        """
+        port_group = None
+
+        content = vcutil.get_vcenter_content(session)
+        container = content.viewManager.CreateContainerView(
+            content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True
+        )
+        for pg in container.view:
+            if pg.key == key:
+                port_group = pg
+        if not port_group:
+            raise vimconn.VimConnNotFoundException(
+                f"Portgroup with key: {key} not found"
+            )
+        else:
+            return port_group
+
+    def get_vim_network_from_pg(self, portgroup):
+        """
+        Obtains a vim network object from a distributed port group
+        """
+        port_number = portgroup.config.numPorts
+        binding_type = portgroup.config.type
+        backing_type = portgroup.config.backingType
+
+        # Get VLAN Information
+        vlan_spec = portgroup.config.defaultPortConfig.vlan
+        vlan_id = None
+        if isinstance(vlan_spec, vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec):
+            vlan_id = vlan_spec.vlanId
+        elif isinstance(
+            vlan_spec, vim.dvs.VmwareDistributedVirtualSwitch.TrunkVlanSpec
+        ):
+            vlan_id = [(vlan.start, vlan.end) for vlan in vlan_spec.vlanId]
+
+        vim_network = {
+            "id": DISTRIBUTED_PORTGROUP_KEY_PREFIX + portgroup.key,
+            "name": portgroup.name,
+            # There is no functionaly in vcenter to check if a network is active
+            "port_number": port_number,
+            "binding_type": binding_type,
+            "vlan_id": vlan_id,
+            "net_backing_type": backing_type,
+        }
+        return vim_network
+
+    def get_dvs(self, session, dvs_name):
+        """
+        Obtains a distributed virtual switch using its name
+        """
+        dvs = vcutil.get_vcenter_obj(session, [vim.DistributedVirtualSwitch], dvs_name)
+        if not dvs:
+            raise vimconn.VimConnNotFoundException(
+                f"Distributed virtual switch with name: {dvs_name} not found"
+            )
+        return dvs
+
+    def create_distributed_port_group(
+        self, session, port_group_name, dvs_name, vlan=None
+    ):
+        """
+        Creates a distributed port group with name port_group_name in the
+        distributed_virtual_switch named dvs_name
+        """
+        try:
+            # Obtain dvs with name dvs_name
+            dvs = self.get_dvs(session, dvs_name)
+
+            # Create portgroup
+            port_group_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+            port_group_spec.name = port_group_name
+            port_group_spec.type = (
+                vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
+            )
+
+            if vlan:
+                vlan_spec = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
+                vlan_spec.vlanId = vlan
+                vlan_spec.inherited = False  # Ensure it's explicitly set
+                port_group_spec.defaultPortConfig = (
+                    vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                )
+                port_group_spec.defaultPortConfig.vlan = vlan_spec
+
+            task = dvs.AddDVPortgroup_Task([port_group_spec])
+            vcutil.wait_for_task(task)
+            self.logger.debug(
+                "Distributed port group with name: %s created", port_group_name
+            )
+
+            # Obtain portgroup created and return it
+            port_group = self.find_port_group_by_name_dvs(session, dvs, port_group_name)
+            net_key = DISTRIBUTED_PORTGROUP_KEY_PREFIX + port_group.key
+
+            return net_key, port_group
+        except vim.fault.DuplicateName as e:
+            self.logger.error(
+                f"Distributed port group with name: {port_group_name} already exists",
+                exc_info=True,
+            )
+            raise vimconn.VimConnConflictException(
+                f"Distributed port group with name: {port_group_name} already exists"
+            ) from e
+
+    def delete_distributed_port_group(self, port_group):
+        """
+        Deletes the indicated distributed port group
+        """
+        self.logger.debug("Delete distributed port group key: %s", port_group.key)
+        task = port_group.Destroy_Task()
+        vcutil.wait_for_task(task)
+        self.logger.debug("Distributed port group deleted")
+
+    def is_distributed_port_group(self, net_id):
+        """
+        Checks if the net with net_id is a distributed port group
+        """
+        if net_id.startswith(DISTRIBUTED_PORTGROUP_KEY_PREFIX):
+            return True
+        else:
+            return False
+
+    def get_distributed_port_connected_vms(self, port_group):
+        """
+        Obtains the vms connected to the provided distributed port group
+        """
+        vms = []
+        for vm in port_group.vm:
+            vms.append(vm)
+        return vms
+
+    def is_nsx_port_group(self, port_group):
+        """
+        Check if the distributed port group backing type is nsx
+        """
+        if port_group.config.backingType == "nsx":
+            return True
+        else:
+            return False
+
+    def _get_distributed_port_group(self, session, portgroup_key):
+        portgroup = None
+        content = vcutil.get_vcenter_content(session)
+        container = content.viewManager.CreateContainerView(
+            content.rootFolder, [vim.DistributedVirtualSwitch], True
+        )
+        for dswitch in container.view:
+            for pg in dswitch.portgroup:
+                if pg.key == portgroup_key:
+                    portgroup = pg
+            if portgroup:
+                break
+
+        if not portgroup:
+            raise vimconn.VimConnNotFoundException(
+                f"unable to find portgroup key: {portgroup_key}"
+            )
diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_util.py b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_util.py
new file mode 100644 (file)
index 0000000..6a997f4
--- /dev/null
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Indra
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Utility class with helper methods to deal with vcenter
+"""
+import logging
+import time
+
+from osm_ro_plugin import vimconn
+from pyVmomi import vim
+import requests
+
+
+def get_vcenter_content(session):
+    """
+    Obtains the vcenter content object
+    """
+    return session.RetrieveContent()
+
+
+def get_vcenter_obj(session, vim_type, name, folder=None):
+    """
+    Get the vSphere object associated with a given text name
+    """
+    obj = None
+
+    content = get_vcenter_content(session)
+    if not folder:
+        folder = content.rootFolder
+
+    container = content.viewManager.CreateContainerView(folder, vim_type, True)
+    for c in container.view:
+        if c.name == name:
+            obj = c
+            break
+    container.Destroy()
+    return obj
+
+
+def get_vcenter_folder(server_instance, folder_name, base_folder=None):
+    """
+    Obtains the vcenter folder object with the provided folder_name
+    """
+    return get_vcenter_obj(server_instance, [vim.Folder], folder_name, base_folder)
+
+
+def wait_for_task(task):
+    """Wait for a task to complete and handle any errors."""
+    if task:
+        while task.info.state not in [
+            vim.TaskInfo.State.success,
+            vim.TaskInfo.State.error,
+        ]:
+            time.sleep(1)
+        if task.info.state == vim.TaskInfo.State.success:
+            return task.info.result
+        else:
+            raise task.info.error  # Raise the specific exception
+
+
+def wait_for_tasks(tasks):
+    """Wait until all tasks in the list are finished. If any task fails, raise an error."""
+    while any(task.info.state not in ["success", "error"] for task in tasks):
+        time.sleep(2)
+
+    for task in tasks:
+        if task.info.state == "error":
+            raise task.info.error
+
+
+class VCenterFileUploader:
+    """
+    Helper class to upload files to vcenter
+    """
+
+    def __init__(
+        self,
+        host,
+        port,
+        user,
+        password,
+        ca_cert_path,
+        log_level=None,
+        default_timeout=None,
+    ):
+        self.logger = logging.getLogger("ro.vim.vcenter.util")
+        if log_level:
+            self.logger.setLevel(getattr(logging, log_level))
+
+        self.host = host
+        self.port = port
+        self.user = user
+        self.password = password
+        self.ssl_verify = False
+        if ca_cert_path:
+            self.ssl_verify = ca_cert_path
+
+        self.default_timeout = default_timeout or 30
+
+    def upload_file(
+        self,
+        local_file_path,
+        datacenter_name,
+        datastore_name,
+        folder_name,
+        file_name,
+        timeout=None,
+    ):
+        """
+        Upload local file to a vmware datastore into the indicated folder
+        and with the indicated name
+        """
+        timeout = timeout or self.default_timeout
+        self.logger.debug(
+            "Upload file %s to datastore %s, folder %s, timeout %s",
+            local_file_path,
+            datastore_name,
+            folder_name,
+            timeout,
+        )
+
+        upload_path = f"/folder/{folder_name}/{file_name}"
+        url = f"https://{self.host}:{self.port}{upload_path}?dcPath={datacenter_name}&dsName={datastore_name}"
+        self.logger.debug("Upload file to url: %s", url)
+
+        with open(local_file_path, "rb") as file:
+            headers = {"Content-Type": "application/octet-stream"}
+            response = requests.put(
+                url,
+                headers=headers,
+                auth=(self.user, self.password),  # Basic authentication
+                data=file,
+                verify=self.ssl_verify,
+                timeout=timeout,
+            )
+
+        self.logger.debug(
+            "Response code: %s, text: %s", response.status_code, response.text
+        )
+        if response.status_code not in (200, 201):
+            self.logger.error(
+                "Error uploading file error_code: %s, text: %s",
+                response.status_code,
+                response.text,
+            )
+            raise vimconn.VimConnException(
+                f"Error uploading file error_code: {response.status_code}, text {response.textt}"
+            )
+        else:
+            self.logger.debug("ISO File updated successfully")
diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_vms.py b/RO-VIM-vcenter/osm_rovim_vcenter/vcenter_vms.py
new file mode 100644 (file)
index 0000000..9ee99c0
--- /dev/null
@@ -0,0 +1,1147 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Indra
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Utility class to deal with vms in vcenter
+"""
+import logging
+from urllib.parse import quote, unquote
+
+from osm_ro_plugin import vimconn
+from osm_rovim_vcenter import vcenter_network as vcnetwork
+from osm_rovim_vcenter import vcenter_util as vcutil
+from osm_rovim_vcenter.vcenter_config import VCenterConfig
+from osm_rovim_vcenter.vcenter_network import VCenterNetworkUtil
+from pyVmomi import vim
+
+vmPowerState2osm = {
+    "poweredOff": "INACTIVE",
+    "poweredOn": "ACTIVE",
+    "suspended": "PAUSED",
+    "other": "OTHER",
+}
+
+# keys for flavor dict
+FLAVOR_RAM_KEY = "ram"
+FLAVOR_VCPUS_KEY = "vcpus"
+FLAVOR_DISK_KEY = "disk"
+
+# Maximum number of devices of an scsi controller
+SCSI_CONTROLLER_MAX_DEVICES = 16
+
+
+class VCenterVmsUtil:
+    """
+    Utility class to get information about vms
+    """
+
+    def __init__(self, vcenter_config: VCenterConfig, log_level=None):
+        self.vcenter_config = vcenter_config
+
+        self.logger = logging.getLogger("ro.vim.vcenter.vms")
+        if log_level:
+            self.logger.setLevel(getattr(logging, log_level))
+
+    def list_images(self, session, filter_dict=None):
+        """
+        Obtain images from tenant images folder
+        """
+        filter_dict = filter_dict or {}
+
+        # Obtain images folder
+        images_folder = self.vcenter_config.get_images_folder(session)
+
+        # List images in folder
+        image_list = []
+        vm_images = self._list_vms(session, images_folder, filter_dict)
+        for image in vm_images:
+            image_list.append(
+                {
+                    "id": image.config.instanceUuid,
+                    "name": image.name,
+                    "moref": image._moId,
+                }
+            )
+
+        return image_list
+
+    def _list_vms(self, session, folder=None, filter_dict=None):
+        """
+        Lists vms in a folder, supported filter id (vcenter instanceUuid) and name
+        """
+        self.logger.debug("List vms for the folder: %s", folder)
+        vms = []
+        filter_dict = filter_dict or {}
+
+        content = vcutil.get_vcenter_content(session)
+        if not folder:
+            self.logger.debug("Folder is not provided, search from root folder")
+            folder = content.rootFolder
+
+        container = content.viewManager.CreateContainerView(
+            folder, [vim.VirtualMachine], True
+        )
+        for vm in container.view:
+            if filter_dict:
+                if (
+                    filter_dict.get("id")
+                    and str(vm.config.instanceUuid) != filter_dict["id"]
+                ):
+                    continue
+
+                if filter_dict.get("name") and str(vm.name) != filter_dict["name"]:
+                    continue
+
+            vms.append(vm)
+
+        return vms
+
+    def get_vm_by_uuid(self, session, vm_id):
+        """
+        Obtains vm by its uuid
+        """
+        search_index = session.content.searchIndex
+        vm = search_index.FindByUuid(None, vm_id, True, True)
+        if vm:
+            return vm
+        else:
+            raise vimconn.VimConnNotFoundException(f"Vm with id: {vm_id} not found")
+
+    def get_image_by_uuid(self, session, image_id):
+        """
+        Obtains an image from its uuid, today just gets a vm, will leave it this way to be
+        able to change it in the future if needed
+        """
+        return self.get_vm_by_uuid(session, image_id)
+
+    @staticmethod
+    def get_vim_vm_basic(vm):
+        """
+        Creates an object with the vm basic info in the vim format from the vcenter vm data
+        """
+        vim_vm = {
+            "id": vm.config.instanceUuid,
+            "name": vm.name,
+            "moref": vm._moId,
+            "status": vmPowerState2osm.get(vm.runtime.powerState, "other"),
+        }
+        return vim_vm
+
+    def get_vm_nics_list(self, vm):
+        """
+        Gets the list of nics for the provided vm and its associated info (dict)
+        """
+        interfaces_info = []
+        for device in vm.config.hardware.device:
+            if isinstance(device, vim.vm.device.VirtualEthernetCard):
+                interface = {}
+                interface["vim_interface_id"] = device.key
+                interface["mac_address"] = device.macAddress
+
+                # Obtain net_id
+                if isinstance(
+                    device.backing,
+                    vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo,
+                ):
+
+                    interface["port_id"] = device.backing.port.portKey
+                    interface["vim_net_id"] = (
+                        vcnetwork.DISTRIBUTED_PORTGROUP_KEY_PREFIX
+                        + device.backing.port.portgroupKey
+                    )
+                    interface["switch_uuid"] = device.backing.port.switchUuid
+                else:
+                    self.logger.warning(
+                        "nic device type not supported yet %s", {type(device).__name__}
+                    )
+
+                # Complete values for vim_info, info from the data
+                vim_info = {}
+                vim_info["key"] = device.key
+                vim_info["controllerKey"] = device.controllerKey
+                vim_info["wakeOnLanEnabled"] = device.wakeOnLanEnabled
+                if device.deviceInfo:
+                    vim_info["label"] = device.deviceInfo.label
+                    vim_info["summary"] = device.deviceInfo.summary
+
+                interfaces_info.append(interface)
+
+        return interfaces_info
+
+    def delete_vm(self, session, vm_id):
+        """
+        Deletes the vm with the indicated instanceUuid, to delete must obtain a refreshed vm
+        """
+        vm = self.get_vm_by_uuid(session, vm_id)
+
+        if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+            powerof_task = vm.PowerOffVM_Task()
+            vcutil.wait_for_task(powerof_task)
+
+        destroy_task = vm.Destroy_Task()
+        vcutil.wait_for_task(destroy_task)
+        self.logger.debug("vm id: %s deleted", vm_id)
+
+    def get_vm_cluster(self, session, vm):
+        """
+        Obtains the cluster associated to a vm
+        """
+        host = vm.runtime.host
+        cluster = host.parent
+        return cluster
+
+    def start_vm(self, vm):
+        """
+        Starts the provided vm
+        """
+        if vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOn:
+            task = vm.PowerOn()
+            return task
+        else:
+            self.logger.warning("WARN : Instance is already started")
+            return None
+
+    def stop_vm(self, vm):
+        """
+        Stops the provided vm
+        """
+        if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+            task = vm.PowerOff()
+            return task
+        else:
+            self.logger.warning("WARN : Instance is not in Active state")
+            return None
+
+    def unattach_volumes(self, session, vm, volumes):
+        """
+        Unattach the indicated volumes, volumes includes the volume_path quoted
+        """
+        self.logger.debug("Volumes to unattach: %s", volumes)
+
+        volumes_to_unattach = self._get_devices_from_volume_list(vm, volumes)
+
+        # Unattach devices
+        self._unattach_devices(session, vm, volumes_to_unattach)
+
+    def _get_devices_from_volume_list(self, vm, volumes):
+
+        # The list of volumes is identified by the file path encoded, unencode the list
+        volume_file_paths = [unquote(volume_id) for volume_id in volumes]
+        self.logger.debug("Volume file paths: %s", volume_file_paths)
+
+        # Obtain the devices to unattach
+        volumes_to_unattach = []
+        for volume_path in volume_file_paths:
+            # Flag to check if volume is found
+            found = False
+
+            # Iterate over devices in the VM
+            for device in vm.config.hardware.device:
+                # Check if the device is a VirtualDisk and its backing file matches the volume path
+                if (
+                    isinstance(device, vim.vm.device.VirtualDisk)
+                    and hasattr(device.backing, "fileName")
+                    and device.backing.fileName == volume_path
+                ):
+                    volumes_to_unattach.append(device)
+                    found = True
+                    break  # Exit the inner loop as the volume is found
+
+            # Log a warning if volume is not found
+            if not found:
+                self.logger.warning(
+                    "Volume path '%s' not found in VM device list.", volume_path
+                )
+
+        return volumes_to_unattach
+
+    def _unattach_devices(self, session, vm, device_list):
+        """
+        Unattach the indicated list of devices
+        """
+        if device_list:
+            change_spec = vim.vm.ConfigSpec()
+            change_spec.deviceChange = []
+
+            for device in device_list:
+                device_change = vim.vm.device.VirtualDeviceSpec()
+                device_change.operation = (
+                    vim.vm.device.VirtualDeviceSpec.Operation.remove
+                )
+                device_change.device = device
+                change_spec.deviceChange.append(device_change)
+
+            # Reconfigure vm
+            task = vm.ReconfigVM_Task(spec=change_spec)
+            vcutil.wait_for_task(task)
+            self.logger.debug("Devices unattached")
+
+        else:
+            self.logger.warning("No devices to unattach provided, will do nothing")
+
+    def reconfig_vm(self, session, vm, reconfig_spec):
+        """
+        Reconfigure the indicated vm with the provided reconfigure spec
+        """
+        if reconfig_spec:
+            # Reconfigure vm
+            task = vm.ReconfigVM_Task(spec=reconfig_spec)
+            vcutil.wait_for_task(task)
+            self.logger.debug("Vm reconfigured")
+
+    def prepare_unattach_volumes(self, vm, volumes, unattach_spec):
+        """
+        Prepares an unattach spec to be able to unattach volumes to keep
+        """
+        self.logger.debug("Prepare unattach volumes: %s", volumes)
+        unattach_device_list = self._get_devices_from_volume_list(vm, volumes)
+
+        # Prepare unattach spec
+        unattach_spec = self._prepare_unattach_spec(unattach_spec, unattach_device_list)
+
+        return unattach_spec
+
+    def prepare_unattach_cloudinitiso(self, vm, cloudinitiso_list, unattach_spec):
+        """
+        Prepares an unattach spec to be able to unattach iso
+        """
+        self.logger.debug("Prepare unattach cloudinitiso: %s", cloudinitiso_list)
+        unattach_device_list = self._get_cdromiso_from_list(vm, cloudinitiso_list)
+
+        # Prepare unattach spec
+        unattach_spec = self._prepare_unattach_spec(unattach_spec, unattach_device_list)
+
+        return unattach_spec
+
+    def _prepare_unattach_spec(self, change_spec, devices_to_unattach):
+        # Prepare unattach spec
+        if not change_spec:
+            change_spec = vim.vm.ConfigSpec()
+            change_spec.deviceChange = []
+
+        for device in devices_to_unattach:
+            device_change = vim.vm.device.VirtualDeviceSpec()
+            device_change.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
+            device_change.device = device
+            change_spec.deviceChange.append(device_change)
+
+        return change_spec
+
+    def _get_cdromiso_from_list(self, vm, cloudinitiso_list):
+
+        # The list of volumes is identified by the file path encoded, unencode the list
+        cloudinitiso_paths = [
+            unquote(cloudinitiso) for cloudinitiso in cloudinitiso_list
+        ]
+        self.logger.debug("Cloud init iso: %s", cloudinitiso_paths)
+
+        # Obtain the iso cdrom to unattach
+        devices_to_unattach = []
+        for cloudinitiso_file in cloudinitiso_paths:
+            found = False
+
+            # Iterate over devices in the VM
+            for device in vm.config.hardware.device:
+                # Check if the device is a VirtualCdRom and its backing file matches the volume path
+                if (
+                    isinstance(device, vim.vm.device.VirtualCdrom)
+                    and hasattr(device.backing, "fileName")
+                    and device.backing.fileName == cloudinitiso_file
+                ):
+                    devices_to_unattach.append(device)
+                    found = True
+                    break  # Exit the inner loop as the volume is found
+
+            # Log a warning if volume is not found
+            if not found:
+                self.logger.warning(
+                    "Iso path '%s' not found in VM device list.", cloudinitiso_file
+                )
+
+        return devices_to_unattach
+
+    def delete_iso_files(self, session, iso_file_list):
+        """
+        Deletes the file indicated in the isp_file_list,
+        The file path is quoted and must be unquoted before delete
+        """
+        self.logger.debug("Delete files: %s", iso_file_list)
+
+        isofile_paths = [unquote(cloudinitiso) for cloudinitiso in iso_file_list]
+        for file_path in isofile_paths:
+            self.delete_datastore_file(session, file_path)
+
+    def delete_datastore_file(self, session, file_path):
+        """
+        Deletes the file indicated in the file_path
+        """
+        try:
+            # Retrieve the file manager
+            self.logger.debug("Delete the file: %s", file_path)
+            file_manager = session.content.fileManager
+
+            # Get the first datacenter (assuming a single datacenter scenario)
+            datacenter = session.content.rootFolder.childEntity[0]
+
+            # Start the delete task
+            task = file_manager.DeleteDatastoreFile_Task(
+                name=file_path, datacenter=datacenter
+            )
+            vcutil.wait_for_task(task)
+            self.logger.debug("File deleted")
+
+        except vim.fault.FileNotFound:
+            # File does not exist
+            self.logger.warning("File %s does not exist. No action taken.", file_path)
+
+    def _create_cluster_rule(self, session, cluster, rule_name, rule_type, vms):
+        """
+        Creates a cluster rule with the indicated type
+        Args:
+        - session: vcenter session
+        - cluster: cluster where the rule will be created
+        - rule_name: name of the rule to be created
+        - rule_type: type of rule, possible values affinity and anti-affinity
+        - vms: list of vms to be added to the rule
+        """
+        self.logger.debug("Going to create affinity group: %s", rule_name)
+
+        rule_spec = vim.cluster.RuleSpec()
+
+        rule_info = None
+        if rule_type == "affinity":
+            rule_info = vim.cluster.AffinityRuleSpec()
+        elif rule_type == "anti-affinity":
+            rule_info = vim.cluster.AntiAffinityRuleSpec()
+        else:
+            raise vimconn.VimConnException(f"Invalid affinity type: {rule_type}")
+
+        rule_info.enabled = False
+        rule_info.mandatory = False  # get from configuration
+        rule_info.name = rule_name
+        rule_info.vm = vms
+
+        rule_spec.info = rule_info
+        rule_spec.operation = "add"
+
+        rule_config_spec = vim.cluster.ConfigSpecEx(rulesSpec=[rule_spec])
+
+        task = cluster.ReconfigureEx(rule_config_spec, modify=True)
+        vcutil.wait_for_task(task)
+        self.logger.debug("Affinity group name: %s created", rule_name)
+
+    def _get_cluster_rule_by_name(self, session, cluster, name):
+        """
+        Find a rule by its name.
+
+        Args:
+            session: The session object (context or connection object).
+            cluster: The cluster object containing rules.
+            name (str): The name of the rule to find.
+
+        Returns:
+            The rule object if found, otherwise None.
+        """
+        self.logger.debug("Find cluster rule with name: %s", name)
+        rules = cluster.configurationEx.rule
+        if not rules:
+            return None
+
+        for rule in rules:
+            if rule.name == name:
+                return rule
+
+        return None
+
+    def _add_vm_to_affinity_rule(self, session, cluster, cluster_rule, vm):
+        """
+        Adds a vm to an existing cluster rule
+        """
+        self.logger.debug("Add vm to affinity rule name: %s", cluster_rule.name)
+
+        # Add VM to the Rule
+        cluster_rule.vm.append(vm)
+
+        # Enable the rule as rules with less that 2 vms must be disabled
+        if len(cluster_rule.vm) > 1:
+            cluster_rule.enabled = True
+
+        # Reconfigure the Cluster with the Updated Rule
+        spec = vim.cluster.ConfigSpecEx()
+        spec.rulesSpec = [vim.cluster.RuleSpec(operation="edit", info=cluster_rule)]
+        task = cluster.ReconfigureComputeResource_Task(spec=spec, modify=True)
+        vcutil.wait_for_task(task)
+        self.logger.debug("Affinity rule edited successfully.")
+
+    def _delete_cluster_rule(self, session, cluster, affinity_rule):
+        """
+        Delete a cluster rule from a cluster
+        """
+        # Delete the Rule
+        spec = vim.cluster.ConfigSpecEx()
+        rule_spec = vim.cluster.RuleSpec(
+            operation="remove", removeKey=affinity_rule.key
+        )
+        spec.rulesSpec = [rule_spec]
+
+        # Step 4: Reconfigure the Cluster
+        task = cluster.ReconfigureComputeResource_Task(spec=spec, modify=True)
+        vcutil.wait_for_task(task)
+        self.logger.debug("Affinity rule %s deleted.", affinity_rule.name)
+
+    def add_vm_or_create_affinity_group(
+        self, session, cluster, affinity_group_name, affinity_group_type, vm
+    ):
+        """
+        Method that manages adding a vm to a cluster rule. If the cluster_rule does
+        not exist it creates it, otherwise adds the machine to the cluster rule
+
+        Args:
+        - session
+        - cluster
+        - affinity_group_name: Name of the cluster rule to be used
+        - affinity_group_type
+        - vm
+        """
+        self.logger.debug(
+            "Add vm name: %s to affinity_group_name: %s", vm.name, affinity_group_name
+        )
+
+        # Find if affinity group exists
+        affinity_group = self._get_cluster_rule_by_name(
+            session, cluster, affinity_group_name
+        )
+
+        if not affinity_group:
+
+            # If affinity group does not exist create
+            self._create_cluster_rule(
+                session, cluster, affinity_group_name, affinity_group_type, [vm]
+            )
+        else:
+            # Add vm to affinity group
+            self._add_vm_to_affinity_rule(session, cluster, affinity_group, vm)
+
+    def delete_vm_affinity_rule(self, session, cluster, affinity_rule_name, vm_name):
+        """
+        Removest the machine with the provided name from the cluster affinity rule
+        with name affinity_rule_name
+        """
+        self.logger.debug(
+            "Remove vm: %s from affinity rule name: %s", vm_name, affinity_rule_name
+        )
+
+        # Find affinity rule
+        affinity_rule = self._get_cluster_rule_by_name(
+            session, cluster, affinity_rule_name
+        )
+        if not affinity_rule:
+            # warning, affinity rule not found, unable to delete, do nothing
+            self.logger.warning(
+                "Affinity rule with name: %s not found, unable to delete",
+                affinity_rule_name,
+            )
+
+        else:
+            found = False
+            for vm in affinity_rule.vm:
+                if vm.name == vm_name:
+                    affinity_rule.vm.remove(vm)
+                    found = True
+
+            if found and len(affinity_rule.vm) > 0:
+                # Reconfigure affinity rule
+                spec = vim.cluster.ConfigSpecEx()
+                spec.rulesSpec = [
+                    vim.cluster.RuleSpec(operation="edit", info=affinity_rule)
+                ]
+                task = cluster.ReconfigureComputeResource_Task(spec=spec, modify=True)
+                vcutil.wait_for_task(task)
+                self.logger.debug(
+                    "Affinity rule %s edited successfully.", affinity_rule_name
+                )
+
+            elif len(affinity_rule.vm) == 0:
+                # No vms left delete affinity group
+                self._delete_cluster_rule(session, cluster, affinity_rule)
+
+    def disconnect_vms_from_dpg(self, session, net_id, vms):
+        """
+        Disconnects the indicated list of vms from the network with id: net_id
+        """
+        self.logger.debug("Disconnect vms for from net id: %s", net_id)
+
+        # Stop vms that are started
+        stopped_vms = self.stop_vm_list(session, vms)
+
+        # Disconnect vms
+        port_group_id = net_id.removeprefix(vcnetwork.DISTRIBUTED_PORTGROUP_KEY_PREFIX)
+        self._disconnect_vms(session, port_group_id, vms)
+
+        # Restart vms
+        self.start_vm_list(session, stopped_vms)
+
+    def _disconnect_vms(self, session, port_group_id, vms):
+        """
+        Disconnects a list of vms from a net, the vms should be already stopped before
+        calling this method
+        """
+        task_list = []
+        for vm in vms:
+            task = self._disconnect_vm(session, port_group_id, vm)
+            if task:
+                task_list.append(task)
+
+        if task_list:
+            # wait until all tasks are completed
+            vcutil.wait_for_tasks(task_list)
+
+    def _disconnect_vm(self, session, port_group_id, vm):
+        """
+        Disconnect vm from port_group
+        """
+
+        self.logger.debug(
+            "Disconnect vm name: %s from port_group_id: %s", vm.name, port_group_id
+        )
+        task = None
+
+        # Disconnect port group
+        spec = vim.vm.ConfigSpec()
+        device_changes = []
+
+        for device in vm.config.hardware.device:
+            if isinstance(device, vim.vm.device.VirtualEthernetCard):
+                if isinstance(
+                    device.backing,
+                    vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo,
+                ):
+                    if device.backing.port.portgroupKey == port_group_id:
+                        nic_spec = vim.vm.device.VirtualDeviceSpec()
+                        nic_spec.operation = (
+                            vim.vm.device.VirtualDeviceSpec.Operation.remove
+                        )  # Remove the NIC
+                        nic_spec.device = device
+                        device_changes.append(nic_spec)
+
+        if device_changes:
+            spec.deviceChange = device_changes
+            task = vm.ReconfigVM_Task(spec=spec)
+
+        return task
+
+    def stop_vm_list(self, session, vms):
+        """
+        Stop the vms in the provided list if they are started
+        """
+        stopped_vms = []
+        task_stop_list = []
+
+        for vm in vms:
+            if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+                task = vm.PowerOff()
+                task_stop_list.append(task)
+                stopped_vms.append(vm)
+
+        if task_stop_list:
+            # wait until all tasks are completed
+            vcutil.wait_for_tasks(task_stop_list)
+
+        return stopped_vms
+
+    def start_vm_list(self, session, vms):
+        """
+        Start the vms in the provided list
+        """
+        started_vms = []
+        task_start_list = []
+
+        for vm in vms:
+            if vm.runtime.powerState != vim.VirtualMachinePowerState.poweredOn:
+                task = vm.PowerOn()
+                task_start_list.append(task)
+                started_vms.append(vm)
+
+        if task_start_list:
+            # wait until all tasks are completed
+            vcutil.wait_for_tasks(task_start_list)
+
+        return started_vms
+
+
+class VCenterVmsOps:
+    """
+    Helper class to create properly configured vms or to deal with vms configuration
+    """
+
+    def __init__(
+        self,
+        vc_config: VCenterConfig,
+        vc_vmsutil: VCenterVmsUtil,
+        vc_netutil: VCenterNetworkUtil,
+        session,
+    ):
+        self.vc_config = vc_config
+        self.vc_vmsutil = vc_vmsutil
+        self.vcnet_util = vc_netutil
+
+        # Connection is provided to this object as it used just to deal with operating on vms
+        self.session = session
+
+        self.logger = self.vc_vmsutil.logger
+
+    def prepare_vm_base_config(self, vm_name, flavor, image):
+        """
+        Prepares the base config spec in pyvmomi for the new vm
+        """
+        self.logger.debug("Prepare vmconfig spec")
+
+        vm_config_spec = vim.vm.ConfigSpec()
+        vm_config_spec.name = vm_name
+        vm_config_spec.memoryMB = flavor.get(FLAVOR_RAM_KEY)
+        vm_config_spec.numCPUs = flavor.get(FLAVOR_VCPUS_KEY)
+        vm_config_spec.guestId = image.config.guestId
+
+        # Get image metadata
+        metadata = self._get_vm_metadata(vm_name, flavor, image)
+        vm_config_spec.annotation = metadata
+
+        device_changes = []
+        vm_config_spec.deviceChange = device_changes
+        return vm_config_spec
+
+    def _get_vm_metadata(self, vm_name, flavor, image):
+
+        metadata = []
+        metadata.append(("name", vm_name))
+        metadata.append(("imageid", image.config.instanceUuid))
+        for prop_name, value in flavor.items():
+            metadata.append((f"flavor:{prop_name}", value))
+        return "".join(["%s:%s\n" % (k, v) for k, v in metadata])
+
+    def prepare_vm_main_disk(self, flavor, image_vm, vm_config_spec, new_datastore):
+        """
+        Obtain main disk from image and modify its size to clone it
+        """
+        # review - the code i have here considers there is only one main disk,
+        # Â¿is it possible this is not the case?
+        self.logger.debug("Prepare main disk size: %s", flavor.get(FLAVOR_DISK_KEY))
+        new_disk_size_gb = flavor.get(FLAVOR_DISK_KEY)
+
+        # Update spec
+        device_changes = vm_config_spec.deviceChange
+        for device in image_vm.config.hardware.device:
+            if isinstance(device, vim.vm.device.VirtualDisk):
+                disk_spec = vim.vm.device.VirtualDeviceSpec()
+                disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.edit
+                disk_spec.device = device
+
+                # Check old capacity is not less that new one
+                curr_disk_capacity_gb = disk_spec.device.capacityInKB / (1024 * 1024)
+                self.logger.debug("Obtained main disk, size: %s", curr_disk_capacity_gb)
+                if curr_disk_capacity_gb > new_disk_size_gb:
+                    raise vimconn.VimConnException(
+                        f"New disk size : {new_disk_size_gb} can not be lower that image size: "
+                        f" {curr_disk_capacity_gb}"
+                    )
+
+                # Set new capacity
+                disk_spec.device.capacityInKB = (
+                    new_disk_size_gb * 1024 * 1024
+                )  # Convert GB to KB
+
+                # in case at some point is it seen it is needed it is also possible to specify datastore
+
+                device_changes.append(disk_spec)
+
+    def prepare_vm_networks(self, net_list, template_vm, vm_config_spec):
+        """
+        Prepare configuration to add network interfaces to the new vm
+        """
+
+        # Obtain device_changes to update configuration
+        device_changes = vm_config_spec.deviceChange
+
+        # Remove existing network interfaces in case they exist
+        self._prepare_remove_existing_nics(template_vm, device_changes)
+
+        # Add a nic for each net
+        for net in net_list:
+            # Skip non-connected iface
+            if not net.get("net_id"):
+                self.logger.debug(f"Skipping unconnected interface: {net}")
+                continue
+
+            self.logger.debug(f"Prepare nic for net: {net}")
+            nic_spec = self._prepare_vm_nic(net, vm_config_spec)
+            device_changes.append(nic_spec)
+
+    def _prepare_remove_existing_nics(self, template_vm, device_changes):
+        for device in template_vm.config.hardware.device:
+            if isinstance(device, vim.vm.device.VirtualEthernetCard):
+                self.logger.debug(
+                    "Remove existing nic from template, label: %s",
+                    device.deviceInfo.label,
+                )
+                nic_spec = vim.vm.device.VirtualDeviceSpec()
+                nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove
+                nic_spec.device = device
+                device_changes.append(nic_spec)
+
+    def _prepare_vm_nic(self, net, vm_config_spec):
+
+        mac_address = net.get("mac_address", None)
+
+        # Get network from network id
+        self.logger.debug("Prepare nic configuration net_id: %s", net.get("net_id"))
+        network = self.vcnet_util.get_network_by_id(self.session, net.get("net_id"))
+        self.logger.debug(f"Recovered network: {network}")
+        self.logger.debug(f"Recovered network: {network.key}")
+        self.logger.debug(
+            f"Recovered network: {network.config.distributedVirtualSwitch.uuid}"
+        )
+
+        # Obtain an available key
+        key = self.get_unused_device_key(vm_config_spec.deviceChange)
+
+        # Prepare nic specification
+        nic_spec = vim.vm.device.VirtualDeviceSpec()
+        nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+
+        # Create the right adapter for the type of network
+        nic = None
+        nic_type = net.get("type")
+        if nic_type == "virtual":
+            nic = vim.vm.device.VirtualVmxnet3()
+        elif nic_type == "SR-IOV":
+            nic = vim.vm.device.VirtualSriovEthernetCard()
+
+            # If we have sriov interfaces must reserve all memory
+            vm_config_spec.memoryReservationLockedToMax = True
+        else:
+            self.logger.debug("Nic type: %s not supported", nic_type)
+            raise vimconn.VimConnException(f"Nic type: {nic_type} not supported")
+
+        nic.backing = (
+            vim.vm.device.VirtualEthernetCard.DistributedVirtualPortBackingInfo()
+        )
+        nic.backing.port = vim.dvs.PortConnection()
+        nic.backing.port.portgroupKey = network.key
+        nic.backing.port.switchUuid = network.config.distributedVirtualSwitch.uuid
+
+        nic.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+        nic.connectable.startConnected = True
+        nic.connectable.allowGuestControl = True
+        nic.wakeOnLanEnabled = True
+
+        # Assign mac address if exists
+        if mac_address:
+            nic.addressType = "manual"
+            nic.macAddress = mac_address
+
+        # Assign key
+        nic.key = key
+        nic_spec.device = nic
+        return nic_spec
+
+    def prepare_vm_quotas(self, extended_flavor_quotas, vm_config_spec):
+        """
+        Prepares the vm quotas configuration
+        """
+        self.logger.debug("Prepare quotas configuration: %s", extended_flavor_quotas)
+
+        if extended_flavor_quotas.get("cpu-quota"):
+            vm_config_spec.cpuAllocation = self._prepare_resource_allocation_config(
+                extended_flavor_quotas.get("cpu-quota")
+            )
+
+        if extended_flavor_quotas.get("mem-quota"):
+            vm_config_spec.memoryAllocation = self._prepare_resource_allocation_config(
+                extended_flavor_quotas.get("mem-quota")
+            )
+
+    def _prepare_resource_allocation_config(self, quota_config):
+        self.logger.debug("Prepare resource allocation config: %s", quota_config)
+        resource_allocation = vim.ResourceAllocationInfo()
+        if quota_config.get("reserve"):
+            resource_allocation.reservation = quota_config.get("reserve")
+        if quota_config.get("limit"):
+            resource_allocation.limit = quota_config.get("limit")
+        if quota_config.get("shares"):
+            resource_allocation.shares = vim.SharesInfo(
+                level="custom", shares=quota_config.get("shares")
+            )
+
+        self.logger.debug("Resource allocation config done")
+        return resource_allocation
+
+    def attach_cdrom(self, vm, iso_filename):
+        """
+        Attaches the indicated iso file to the provided vm,the iso file must be already
+        uploaded in vmware vcenter
+        """
+        self.logger.debug(
+            "Attach iso to vm: '%s', iso file: '%s'", vm.name, iso_filename
+        )
+
+        # 1 - Find free IDE controller
+        controller_key = self._find_free_ide_controller(vm)
+
+        # 2 - Build iso attach specification
+        device_spec = self._prepare_cdrom_spec(controller_key, iso_filename)
+        config_spec = vim.vm.ConfigSpec(deviceChange=[device_spec])
+
+        # 3 - Must set the boot order as to start from cd
+        config_spec.bootOptions = vim.vm.BootOptions(
+            bootOrder=[vim.vm.BootOptions.BootableCdromDevice()]
+        )
+
+        # 4 - Reconfigure the vm to attach cd-rom
+        self.reconfigure_vm(vm, config_spec)
+
+    def _find_free_ide_controller(self, vm):
+        """
+        Finds a free ide controller in the provided vm
+        """
+        for dev in vm.config.hardware.device:
+            if isinstance(dev, vim.vm.device.VirtualIDEController):
+                # If there are less than 2 devices attached, we can use it.
+                if len(dev.device) < 2:
+                    return dev.key
+        return None
+
+    def _prepare_cdrom_spec(self, controller_key, iso_filename):
+
+        device_spec = vim.vm.device.VirtualDeviceSpec()
+        device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+
+        cdrom = vim.vm.device.VirtualCdrom()
+        cdrom.controllerKey = controller_key
+        cdrom.key = -1
+
+        backing = vim.vm.device.VirtualCdrom.IsoBackingInfo()
+        backing.fileName = iso_filename
+        # backing.datastore = datastore
+        cdrom.backing = backing
+
+        connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+        connectable.allowGuestControl = True
+        connectable.startConnected = True
+        cdrom.connectable = connectable
+
+        device_spec.device = cdrom
+        return device_spec
+
+    def reconfigure_vm(self, vm, new_config_spec):
+        """
+        Reconfigure vm with the changes indicated in new_config_spec
+        """
+        self.logger.debug("Reconfigure vm name: '%s'", vm.name)
+        task = vm.Reconfigure(new_config_spec)
+        vcutil.wait_for_task(task)
+        self.logger.debug("Vm name: '%s' reconfigured", vm.name)
+
+    def prepare_ephemeral_disk(
+        self, original_vm, vm_config_spec, datastore, disk_size_gb, created_items
+    ):
+        """
+        Prepares the specification for an ephemeral disk
+        """
+        self.logger.debug("Prepare ephemeral disk size: %s", disk_size_gb)
+
+        disk_folder = vm_config_spec.name
+        disk_name = f"{vm_config_spec.name}-ephemeral"
+        device_spec = self._prepare_disk_spec(
+            original_vm=original_vm,
+            vm_config_spec=vm_config_spec,
+            datastore=datastore,
+            disk_folder=disk_folder,
+            disk_name=disk_name,
+            disk_size_gb=disk_size_gb,
+        )
+        if not vm_config_spec.deviceChange:
+            vm_config_spec.deviceChange = []
+        vm_config_spec.deviceChange.append(device_spec)
+
+    def prepare_permanent_disk(
+        self, original_vm, vm_config_spec, datastore, disk, disk_index, created_items
+    ):
+        """
+        Creates a permanent disk, if the disk must be kept after the vm is deleted
+        create the disk in another folder
+        """
+        self.logger.debug(
+            "Prepare persisten volume disk index: %s, size: %s, name: %s",
+            disk_index,
+            disk.get("size"),
+            disk.get("name"),
+        )
+
+        disk_folder = vm_config_spec.name
+        disk_name = f'{vm_config_spec.name}-{disk.get("name")}-{disk_index}'
+
+        device_spec = self._prepare_disk_spec(
+            original_vm=original_vm,
+            vm_config_spec=vm_config_spec,
+            datastore=datastore,
+            disk_folder=disk_folder,
+            disk_name=disk_name,
+            disk_size_gb=disk.get("size"),
+        )
+
+        # Will use disk path as id as if the disk is unattache it has no other id in vcenter
+        disk_id = device_spec.device.backing.fileName
+        self.logger.debug("Created disk id: %s", disk_id)
+
+        # Append to device_change so that the data will be stored
+        if not vm_config_spec.deviceChange:
+            vm_config_spec.deviceChange = []
+        vm_config_spec.deviceChange.append(device_spec)
+
+        # Return in created items, id is url encoded to avoid problems from spaces
+        volume_txt = "volume:" + quote(disk_id)
+        if disk.get("keep"):
+            volume_txt += ":keep"
+        created_items[volume_txt] = True
+
+    def _prepare_disk_spec(
+        self,
+        original_vm,
+        vm_config_spec,
+        datastore,
+        disk_size_gb,
+        disk_folder=None,
+        disk_name=None,
+    ):
+        # Validate disk size gb is an int > 0
+
+        # Get the full list of devices and on the full list obtain free scsi controller
+        # and unit number
+        devices = self._get_complete_device_list(original_vm, vm_config_spec)
+        controller_key, unit_number = self._get_scsi_controller_key_unit_number(devices)
+        datastore_name = datastore.info.name
+
+        # Create a new device spec
+        device_spec = vim.vm.device.VirtualDeviceSpec()
+        device_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+        device_spec.fileOperation = vim.vm.device.VirtualDeviceSpec.FileOperation.create
+
+        # Disk backing configuration
+        disk_backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+        disk_backing.diskMode = "persistent"
+        disk_backing.thinProvisioned = True  # Optional: Set True for thin provisioning
+        disk_backing.datastore = datastore  # Use the first datastore by default
+        if disk_folder and disk_name:
+            # If this folder and name are not provided vcenter sets a default filename
+            disk_backing.fileName = f"[{datastore_name}] {disk_folder}/{disk_name}.vmdk"
+
+        # Disk size in KB (1 GB = 1024 * 1024 KB)
+        disk_size_kb = int(disk_size_gb) * 1024 * 1024
+
+        disk = vim.vm.device.VirtualDisk()
+        disk.capacityInKB = disk_size_kb
+        disk.backing = disk_backing
+        disk.controllerKey = controller_key
+        disk.unitNumber = unit_number
+        disk.key = self.get_unused_device_key(vm_config_spec.deviceChange)
+
+        device_spec.device = disk
+        return device_spec
+
+    def _get_complete_device_list(self, original_vm, vm_config_spec):
+        devices = []
+        # Add original vm list to devices
+        devices.extend(original_vm.config.hardware.device)
+        # Just add also devices in new config spec, if device is add it will be in the new list
+        # In case it is edit may be added twice, for delete devices i can not reuse unit yet
+        changed_devices = [
+            device_spec.device for device_spec in vm_config_spec.deviceChange
+        ]
+        devices.extend(changed_devices)
+        return devices
+
+    def _get_scsi_controller_key_unit_number(self, devices):
+        """
+        Obtains an available scsi controller key and unit number
+        """
+        scsi_keys = [dev.key for dev in devices if self._is_scsi_controller(dev)]
+        allocated_slots = self._find_allocated_slots(devices, scsi_keys)
+        self.logger.debug("scsi controller keys: %s", scsi_keys)
+        self.logger.debug("allocated slots: %s", allocated_slots)
+        result = self._find_controller_slot(
+            scsi_keys, allocated_slots, SCSI_CONTROLLER_MAX_DEVICES
+        )
+        if not result:
+            raise vimconn.VimConnException(
+                "Unable to find valid controller key to add a valid disk"
+            )
+        else:
+            self.logger.debug("Obtained controller key and unit number: %s", result)
+            return result
+
+    @staticmethod
+    def _is_scsi_controller(device):
+        scsi_controller_types = (
+            vim.vm.device.VirtualLsiLogicController,
+            vim.vm.device.VirtualLsiLogicSASController,
+            vim.vm.device.VirtualBusLogicController,
+            vim.vm.device.ParaVirtualSCSIController,
+        )
+        return isinstance(device, scsi_controller_types)
+
+    def _find_allocated_slots(self, devices, controller_keys):
+        allocated = {}
+        for device in devices:
+            self.logger.debug("Find allocated slots, device: %s", device)
+            if (
+                (device.controllerKey is not None)
+                and (device.controllerKey in controller_keys)
+                and (device.unitNumber is not None)
+            ):
+                unit_numbers = allocated.setdefault(device.controllerKey, [])
+                unit_numbers.append(device.unitNumber)
+        return allocated
+
+    @staticmethod
+    def _find_controller_slot(controller_keys, taken, max_unit_number):
+        for controller_key in controller_keys:
+            for unit_number in range(max_unit_number):
+                if unit_number not in taken.get(controller_key, []):
+                    return controller_key, unit_number
+
+    @staticmethod
+    def get_unused_device_key(device_specs):
+        """
+        Finds the next unused negative key for a list of device specs.
+        keys are temporary but
+
+        Args:
+            device_specs (list): List of vim.vm.device.VirtualDeviceSpec objects.
+
+        Returns:
+            int: The next unused negative key.
+        """
+        # Collect all used negative keys
+        device_keys = set()
+        for device_spec in device_specs:
+            if device_spec.operation == vim.vm.device.VirtualDeviceSpec.Operation.add:
+                device_keys.add(device_spec.device.key)
+
+        # Find the smallest unused negative key
+        next_negative_key = -1
+        while next_negative_key in device_keys:
+            next_negative_key -= 1
+
+        return next_negative_key
diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vim_helper.py b/RO-VIM-vcenter/osm_rovim_vcenter/vim_helper.py
new file mode 100644 (file)
index 0000000..877c045
--- /dev/null
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+# Copyright 2025 Indra
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Helper class that will be used for things not related to
+"""
+import json
+import logging
+import os
+import shutil
+import subprocess
+import tempfile
+import uuid
+
+from osm_ro_plugin import vimconn
+
+
+class CloudInitHelper:
+    """
+    Class that will help to generate iso files needed for cloud-init functionality
+    """
+
+    def __init__(self, log_level=None):
+        self.logger = logging.getLogger("ro.vim.vcenter.network")
+        if log_level:
+            self.logger.setLevel(getattr(logging, log_level))
+
+    def generate_cloud_init_iso(self, user_data):
+        """
+        Generates a cloud init iso with the provided user_data
+        """
+        self.logger.debug("Generate cloud init iso")
+        tmpdir = tempfile.mkdtemp()
+        iso_path = os.path.join(tmpdir, "ConfigDrive.iso")
+        latest_dir = os.path.join(tmpdir, "openstack", "latest")
+        os.makedirs(latest_dir)
+        with open(
+            os.path.join(latest_dir, "meta_data.json"), "w"
+        ) as meta_file_obj, open(
+            os.path.join(latest_dir, "user_data"), "w"
+        ) as userdata_file_obj:
+            userdata_file_obj.write(user_data)
+            meta_file_obj.write(
+                json.dumps(
+                    {
+                        "availability_zone": "nova",
+                        "launch_index": 0,
+                        "name": "ConfigDrive",
+                        "uuid": str(uuid.uuid4()),
+                    }
+                )
+            )
+        genisoimage_cmd = (
+            "genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}".format(
+                iso_path=iso_path, source_dir_path=tmpdir
+            )
+        )
+        self.logger.info(
+            'create_config_drive_iso(): Creating ISO by running command "{}"'.format(
+                genisoimage_cmd
+            )
+        )
+
+        try:
+            FNULL = open(os.devnull, "w")
+            subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
+        except subprocess.CalledProcessError as e:
+            shutil.rmtree(tmpdir, ignore_errors=True)
+            error_msg = "create_config_drive_iso(): Exception executing genisoimage : {}".format(
+                e
+            )
+            self.logger.error(error_msg)
+            raise vimconn.VimConnException(error_msg)
+
+        return iso_path, tmpdir
+
+    def delete_tmp_dir(self, tmpdirname):
+        """
+        Delete the tmp dir with the indicated name
+        """
+        self.logger.debug("Delete tmp dir: %s", tmpdirname)
+        shutil.rmtree(tmpdirname)
diff --git a/RO-VIM-vcenter/osm_rovim_vcenter/vimconn_vcenter.py b/RO-VIM-vcenter/osm_rovim_vcenter/vimconn_vcenter.py
new file mode 100644 (file)
index 0000000..f5efc6a
--- /dev/null
@@ -0,0 +1,1442 @@
+# -*- coding: utf-8 -*-\r
+# Copyright 2025 Indra\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#    http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\r
+# implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+"""\r
+New vmware vcenter plugin documentation\r
+"""\r
+import logging\r
+import ssl\r
+from urllib.parse import quote, urlparse\r
+import uuid\r
+\r
+from osm_ro_plugin import vimconn\r
+from osm_rovim_vcenter import vcenter_util as vcutil\r
+from osm_rovim_vcenter import vcenter_vms as vcvmutil\r
+from osm_rovim_vcenter.vcenter_config import VCenterConfig\r
+from osm_rovim_vcenter.vcenter_ipmanager import VCenterIpManager\r
+from osm_rovim_vcenter.vcenter_network import VCenterNetworkUtil\r
+from osm_rovim_vcenter.vcenter_util import VCenterFileUploader\r
+from osm_rovim_vcenter.vcenter_vms import VCenterVmsOps\r
+from osm_rovim_vcenter.vcenter_vms import VCenterVmsUtil\r
+from osm_rovim_vcenter.vim_helper import CloudInitHelper\r
+from pyVim.connect import Disconnect, SmartConnect\r
+from pyVmomi import vim\r
+import yaml\r
+\r
+\r
+def handle_connector_exceptions(func):\r
+    """\r
+    Decorator function that handles and reraises exceptions\r
+    """\r
+\r
+    def format_exception(*args, **kwargs):\r
+        try:\r
+            return func(*args, **kwargs)\r
+        except Exception as e:\r
+            vimconnector._format_raise_exception(e)\r
+\r
+    return format_exception\r
+\r
+\r
+DEFAULT_OSM_TENANT_NAME = "default"\r
+\r
+\r
+class vimconnector(vimconn.VimConnector):\r
+    """\r
+    RO Vcenter plugin main class\r
+    """\r
+\r
+    # Dict to store flavors in memory, stores flavors with key id\r
+    _flavorlist = {}\r
+\r
+    # Affinity groups, will use the name as id, will not allow duplicates because\r
+    # if we have duplicates we will not be able to know if they we must create a new affinity\r
+    # group or not\r
+    _affinity_groups = {}\r
+\r
+    def __init__(\r
+        self,\r
+        uuid=None,\r
+        name=None,\r
+        tenant_id=None,\r
+        tenant_name=None,\r
+        url=None,\r
+        url_admin=None,\r
+        user=None,\r
+        passwd=None,\r
+        log_level=None,\r
+        config={},\r
+        persistent_info={},\r
+    ):\r
+        """\r
+        TODO - documentation\r
+        :param uuid:\r
+        :param name:\r
+        :param tenant_id:\r
+        :param tenant_name:\r
+        :param url:\r
+        :param url_admin:\r
+        :param user:\r
+        :param passwd:\r
+        :param log_level:\r
+        :param config:\r
+        :param persistent_info:\r
+        """\r
+        vimconn.VimConnector.__init__(\r
+            self,\r
+            uuid,\r
+            name,\r
+            tenant_id,\r
+            tenant_name,\r
+            url,\r
+            url_admin,\r
+            user,\r
+            passwd,\r
+            log_level,\r
+            config,\r
+        )\r
+\r
+        self.logger = logging.getLogger("ro.vim.vcenter")\r
+        if log_level:\r
+            self.logger.setLevel(getattr(logging, log_level))\r
+            self.log_level = log_level\r
+\r
+        self.persistent_info = persistent_info\r
+\r
+        self.logger.info(\r
+            "Initializing vcenter plugin, name:%s, uuid: %s, tenant_name: %s",\r
+            name,\r
+            uuid,\r
+            tenant_name,\r
+        )\r
+        self.logger.info("Connection info, url: %s, user: %s", url, user)\r
+        self.logger.info("Config information: %s ", config)\r
+        self.logger.info("Persistent info: %s", persistent_info)\r
+\r
+        # Parse the URL to extract the hostname\r
+        parsed_url = urlparse(url)\r
+        self.vcenter_hostname = parsed_url.hostname\r
+\r
+        # Default port is 443\r
+        self.vcenter_port = (\r
+            parsed_url.port\r
+            if parsed_url.port\r
+            else (443 if parsed_url.scheme == "https" else 80)\r
+        )\r
+        self.logger.debug(\r
+            "vcenter_hostname: %s, vcenter_port: %s",\r
+            self.vcenter_hostname,\r
+            self.vcenter_port,\r
+        )\r
+\r
+        # Prepare ssl context\r
+        if self.config.get("insecure") and self.config.get("ca_cert"):\r
+            raise vimconn.VimConnException(\r
+                "options insecure and ca_cert are mutually exclusive"\r
+            )\r
+        elif self.config.get("insecure") is None and self.config.get("ca_cert") is None:\r
+            raise vimconn.VimConnException(\r
+                "either providing certificates or selecting insecure connection is required"\r
+            )\r
+\r
+        if self.config.get("insecure"):\r
+            self.logger.warning("Using insecure ssl context")\r
+            self.ssl_context = ssl._create_unverified_context()\r
+\r
+        if self.config.get("ca_cert"):\r
+            self.logger.debug("ca_cert path: %s", self.config.get("ca_cert"))\r
+            self.ssl_context = ssl.create_default_context(\r
+                cafile=self.config.get("ca_cert")\r
+            )\r
+\r
+        # Assign default tenant name if not provided\r
+        # Check with null because there seems to be\r
+        # an error on upper layer that sets null when not provided\r
+        if not tenant_name or tenant_name == "null":\r
+            self.tenant_name = DEFAULT_OSM_TENANT_NAME\r
+\r
+        # Availability zone: by the moment will support just one but is is required\r
+        # Availibity zone must correspond to a cluster or resource pool name\r
+        self.availability_zone = self.config.get("availability_zone")\r
+        if not self.availability_zone:\r
+            raise vimconn.VimConnException(\r
+                "Config parameter availability_zone is required"\r
+            )\r
+\r
+        # Allow to indicate distributed virtual switch, Â¿could we support more than one?\r
+        self.dvs_names = self.config.get("availability_network_zone")\r
+        if not self.dvs_names:\r
+            raise vimconn.VimConnException(\r
+                "Config parameter availability_network_zone is required"\r
+            )\r
+\r
+        # Datasource configuration\r
+        self.datastore = self.config.get("datastore")\r
+        if not self.datastore:\r
+            raise vimconn.VimConnException("Config parameter datastore is required")\r
+\r
+        # Nsx configuration\r
+        self.nsx_url = self.config.get("nsx_url")\r
+        self.nsx_user = self.config.get("nsx_user")\r
+        self.nsx_password = self.config.get("nsx_password")\r
+        self.nsx_verify_ssl = False\r
+        if self.config.get("nsx_ca_cert"):\r
+            self.nsx_verify_ssl = self.config.get("nsx_ca_cert")\r
+\r
+        self.dhcp_configure_always = self.config.get("dhcp_configure_always", False)\r
+\r
+        # Initialize vcenter helper objects\r
+        self.vcenter_fileuploader = VCenterFileUploader(\r
+            self.vcenter_hostname,\r
+            self.vcenter_port,\r
+            self.user,\r
+            self.passwd,\r
+            self.config.get("ca_cert", None),\r
+            log_level=log_level,\r
+        )\r
+        self.vcenter_config = VCenterConfig(\r
+            self.availability_zone,\r
+            tenant_id,\r
+            self.tenant_name,\r
+            datastore_name=self.datastore,\r
+            distributed_switches_names=self.dvs_names,\r
+            log_level=log_level,\r
+        )\r
+        self.vcnet_util = VCenterNetworkUtil(log_level=log_level)\r
+        self.vcvms_util = VCenterVmsUtil(self.vcenter_config, log_level=log_level)\r
+        self.cloudinit_helper = CloudInitHelper(log_level=log_level)\r
+        self.vcenter_ipmanager = VCenterIpManager(\r
+            vc_netutil=self.vcnet_util,\r
+            nsx_url=self.nsx_url,\r
+            nsx_user=self.nsx_user,\r
+            nsx_password=self.nsx_password,\r
+            nsx_verify_ssl=self.nsx_verify_ssl,\r
+            dhcp_configure_always=self.dhcp_configure_always,\r
+        )\r
+\r
+    def check_vim_connectivity(self):\r
+        self.logger.debug("Check vim connectivity")\r
+        # Load vcenter content to test connection\r
+        session = self._get_vcenter_instance()\r
+        try:\r
+            vcutil.get_vcenter_content(session)\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+    def get_tenant_list(self, filter_dict={}):\r
+        """Obtain tenants of VIM\r
+        filter_dict dictionary that can contain the following keys:\r
+            name: filter by tenant name\r
+            id: filter by tenant uuid/id\r
+            <other VIM specific>\r
+        Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:\r
+            [{'name':'<name>, 'id':'<id>, ...}, ...]\r
+        """\r
+        self.logger.warning("Get tenant list is not supported in vcenter")\r
+        raise vimconn.VimConnNotImplemented(\r
+            "Get tenant list is not supported in vcenter"\r
+        )\r
+\r
+    def new_tenant(self, tenant_name, tenant_description):\r
+        """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided\r
+        "tenant_name": string max lenght 64\r
+        "tenant_description": string max length 256\r
+        returns the tenant identifier or raise exception\r
+        """\r
+        self.logger.warning("new_tenant is not supported in vcenter")\r
+        raise vimconn.VimConnNotImplemented("new_tenant is not supported in vcenter")\r
+\r
+    def delete_tenant(self, tenant_id):\r
+        """Delete a tenant from VIM\r
+        tenant_id: returned VIM tenant_id on "new_tenant"\r
+        Returns None on success. Raises and exception of failure. If tenant is not found raises VimConnNotFoundException\r
+        """\r
+        self.logger.warning("delete_tenant is not supported in vcenter")\r
+        raise vimconn.VimConnNotImplemented("delete_tenant is not supported in vcenter")\r
+\r
+    def get_flavor(self, flavor_id):\r
+        """Obtain flavor details from the VIM\r
+        Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }\r
+        Raises an exception upon error or if not found\r
+        """\r
+        self.logger.debug("Get flavor with id: %s", flavor_id)\r
+\r
+        if flavor_id not in self._flavorlist:\r
+            raise vimconn.VimConnNotFoundException("Flavor not found.")\r
+\r
+        return self._flavorlist[flavor_id]\r
+\r
+    def get_flavor_id_from_data(self, flavor_dict):\r
+        """Obtain flavor id that match the flavor description\r
+        Params:\r
+            'flavor_dict': dictionary that contains:\r
+                'disk': main hard disk in GB\r
+                'ram': meomry in MB\r
+                'vcpus': number of virtual cpus\r
+                #TODO: complete parameters for EPA\r
+        Returns the flavor_id or raises a VimConnNotFoundException\r
+        """\r
+        self.logger.debug("Get flavor from data: %s", flavor_dict)\r
+        # As in this connector flavors are only stored in memory always return vimconnnotfound\r
+        # exception\r
+        raise vimconn.VimConnNotFoundException(\r
+            "get_flavor_id_from_data not used in this plugin"\r
+        )\r
+\r
+    def new_flavor(self, flavor_data):\r
+        """Adds a tenant flavor to VIM\r
+            flavor_data contains a dictionary with information, keys:\r
+                name: flavor name\r
+                ram: memory (cloud type) in MBytes\r
+                vpcus: cpus (cloud type)\r
+                extended: EPA parameters\r
+                  - numas: #items requested in same NUMA\r
+                        memory: number of 1G huge pages memory\r
+                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual\r
+                            threads\r
+                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa\r
+                          - name: interface name\r
+                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC\r
+                            bandwidth: X Gbps; requested guarantee bandwidth\r
+                            vpci: requested virtual PCI address\r
+                disk: disk size\r
+                is_public:\r
+                 #TODO to concrete\r
+        Returns the flavor identifier\r
+        """\r
+        self.logger.debug("New flavor data: %s", flavor_data)\r
+\r
+        new_flavor = flavor_data\r
+        ram = flavor_data.get(vcvmutil.FLAVOR_RAM_KEY, 1024)\r
+        cpu = flavor_data.get(vcvmutil.FLAVOR_VCPUS_KEY, 1)\r
+        disk = flavor_data.get(vcvmutil.FLAVOR_DISK_KEY, 0)\r
+\r
+        self._validate_int(ram, "ram")\r
+        self._validate_int(cpu, "cpu")\r
+        self._validate_int(disk, "disk")\r
+\r
+        # generate a new uuid put to internal dict and return it.\r
+        flavor_id = uuid.uuid4()\r
+        self._flavorlist[str(flavor_id)] = new_flavor\r
+        self.logger.debug("Created flavor - %s : %s", flavor_id, new_flavor)\r
+\r
+        return str(flavor_id)\r
+\r
+    def delete_flavor(self, flavor_id):\r
+        """Deletes a tenant flavor from VIM identify by its id\r
+        Returns the used id or raise an exception\r
+        """\r
+        self.logger.debug("Delete flavor id: %s", flavor_id)\r
+        if flavor_id in self._flavorlist:\r
+            self._flavorlist.pop(flavor_id)\r
+            return flavor_id\r
+        else:\r
+            self.logger.info("Flavor with id: %s not found ", flavor_id)\r
+\r
+    def get_affinity_group(self, affinity_group_id):\r
+        """Obtain affinity or anti affinity group details from the VIM\r
+        Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }\r
+        Raises an exception upon error or if not found\r
+        """\r
+        self.logger.debug("Get affinity group with id: %s", affinity_group_id)\r
+        if affinity_group_id not in self._affinity_groups:\r
+            raise vimconn.VimConnNotFoundException(\r
+                "Affinity group with id: %s not found"\r
+            )\r
+\r
+        return self._affinity_groups[affinity_group_id]\r
+\r
+    def new_affinity_group(self, affinity_group_data):\r
+        """Adds an affinity or anti affinity group to VIM\r
+            affinity_group_data contains a dictionary with information, keys:\r
+                name: name in VIM for the affinity or anti-affinity group\r
+                type: affinity or anti-affinity\r
+                scope: Only nfvi-node allowed\r
+        Returns the affinity or anti affinity group identifier\r
+        """\r
+        self.logger.debug("New affinity group, data: %s", affinity_group_data)\r
+        affinity_group = None\r
+\r
+        affinity_group_name = affinity_group_data.get("name")\r
+        affinity_group_type = affinity_group_data.get("type")\r
+        affinity_group = self._affinity_groups.get(affinity_group_name)\r
+\r
+        if affinity_group_name in self._affinity_groups:\r
+            affinity_group = self._affinity_groups.get(affinity_group_name)\r
+            if affinity_group_type != affinity_group.get("type"):\r
+                self.logger.warning(\r
+                    "There is already an affinity group with name %s "\r
+                    "and different type: % s",\r
+                    affinity_group_name,\r
+                    affinity_group_type,\r
+                )\r
+                raise vimconn.VimConnNotFoundException(\r
+                    f"there is already an affinity group with name: {affinity_group_name} and "\r
+                    "different type"\r
+                )\r
+        else:\r
+            affinity_group = affinity_group_data\r
+            self._affinity_groups[affinity_group_name] = affinity_group_data\r
+\r
+        self.logger.debug("Affinity groups: %s", self._affinity_groups)\r
+        return affinity_group.get("name")\r
+\r
+    def delete_affinity_group(self, affinity_group_id):\r
+        """\r
+        Deletes an affinity or anti affinity group from the VIM identified by its id\r
+        Returns the used id or raise an exception\r
+        """\r
+        self.logger.debug("Delete affinity group with id: %s", affinity_group_id)\r
+\r
+        if affinity_group_id in self._affinity_groups:\r
+            self.logger.info(\r
+                "Deleting affinity group %s",\r
+                self._affinity_groups.get("affinity_group_id"),\r
+            )\r
+            del self._affinity_groups[affinity_group_id]\r
+        else:\r
+            self.logger.info("Affinity group with id %s not found", affinity_group_id)\r
+\r
+        self.logger.debug("Affinity groups: %s", self._affinity_groups)\r
+        return affinity_group_id\r
+\r
+    def new_image(self, image_dict):\r
+        """Adds a tenant image to VIM\r
+        Returns the image id or raises an exception if failed\r
+        """\r
+        self.logger.debug("Create new image: %s", image_dict)\r
+        raise vimconn.VimConnNotImplemented("new image is not supported in vcenter")\r
+\r
+    def delete_image(self, image_id):\r
+        """Deletes a tenant image from VIM\r
+        Returns the image_id if image is deleted or raises an exception on error\r
+        """\r
+        self.logger.debug("Delete image: %s", image_id)\r
+        raise vimconn.VimConnNotImplemented("delete image is not supported in vcenter")\r
+\r
+    def get_image_id_from_path(self, path):\r
+        """Get the image id from image path in the VIM database.\r
+        Returns the image_id or raises a VimConnNotFoundException\r
+        """\r
+        self.logger.debug("Get image from path: %s", path)\r
+        raise vimconn.VimConnNotImplemented(\r
+            "get image from path is not supported in vcenter"\r
+        )\r
+\r
+    @handle_connector_exceptions\r
+    def get_image_list(self, filter_dict=None):\r
+        """Obtain tenant images from VIM\r
+        Filter_dict can be:\r
+            name: image name\r
+            id: image uuid\r
+            checksum: image checksum\r
+            location: image path\r
+        Returns the image list of dictionaries:\r
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]\r
+            List can be empty\r
+        """\r
+        filter_dict = filter_dict or {}\r
+        self.logger.debug("Get image list, filter_dict: %s", filter_dict)\r
+\r
+        session = self._get_vcenter_instance()\r
+        try:\r
+            # Get images\r
+            image_list = self.vcvms_util.list_images(session, filter_dict=filter_dict)\r
+\r
+            self.logger.debug("Image list: %s", image_list)\r
+            return image_list\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+    def new_vminstance(\r
+        self,\r
+        name: str,\r
+        description: str,\r
+        start: bool,\r
+        image_id: str,\r
+        flavor_id: str,\r
+        affinity_group_list: list,\r
+        net_list: list,\r
+        cloud_config=None,\r
+        disk_list=None,\r
+        availability_zone_index=None,\r
+        availability_zone_list=None,\r
+        security_group_name=None,\r
+    ) -> tuple:\r
+        """Adds a VM instance to VIM.\r
+\r
+        Args:\r
+            name    (str):          name of VM\r
+            description (str):      description\r
+            start   (bool):         indicates if VM must start or boot in pause mode. Ignored\r
+            image_id    (str)       image uuid\r
+            flavor_id   (str)       flavor uuid\r
+            affinity_group_list (list):     list of affinity groups, each one is a dictionary.Ignore if empty.\r
+            net_list    (list):         list of interfaces, each one is a dictionary with:\r
+                name:   name of network\r
+                net_id:     network uuid to connect\r
+                vpci:   virtual vcpi to assign, ignored because openstack lack #TODO\r
+                model:  interface model, ignored #TODO\r
+                mac_address:    used for  SR-IOV ifaces #TODO for other types\r
+                use:    'data', 'bridge',  'mgmt'\r
+                type:   'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'\r
+                vim_id:     filled/added by this function\r
+                floating_ip:    True/False (or it can be None)\r
+                port_security:  True/False\r
+            cloud_config    (dict): (optional) dictionary with:\r
+                key-pairs:      (optional) list of strings with the public key to be inserted to the default user\r
+                users:      (optional) list of users to be inserted, each item is a dict with:\r
+                    name:   (mandatory) user name,\r
+                    key-pairs: (optional) list of strings with the public key to be inserted to the user\r
+                user-data:  (optional) string is a text script to be passed directly to cloud-init\r
+                config-files:   (optional). List of files to be transferred. Each item is a dict with:\r
+                    dest:   (mandatory) string with the destination absolute path\r
+                    encoding:   (optional, by default text). Can be one of:\r
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'\r
+                    content :    (mandatory) string with the content of the file\r
+                    permissions:    (optional) string with file permissions, typically octal notation '0644'\r
+                    owner:  (optional) file owner, string with the format 'owner:group'\r
+                boot-data-drive:    boolean to indicate if user-data must be passed using a boot drive (hard disk)\r
+            disk_list:  (optional) list with additional disks to the VM. Each item is a dict with:\r
+                image_id:   (optional). VIM id of an existing image. If not provided an empty disk must be mounted\r
+                size:   (mandatory) string with the size of the disk in GB\r
+                vim_id:  (optional) should use this existing volume id\r
+            availability_zone_index:    Index of availability_zone_list to use for this this VM. None if not AV required\r
+            availability_zone_list:     list of availability zones given by user in the VNFD descriptor.  Ignore if\r
+                availability_zone_index is None\r
+                #TODO ip, security groups\r
+\r
+        Returns:\r
+            A tuple with the instance identifier and created_items or raises an exception on error\r
+            created_items can be None or a dictionary where this method can include key-values that will be passed to\r
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.\r
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same\r
+            as not present.\r
+\r
+        """\r
+        self.logger.info(\r
+            "new vm_instance name: %s, image_id: %s, flavor_id: %s",\r
+            name,\r
+            image_id,\r
+            flavor_id,\r
+        )\r
+        self.logger.debug(\r
+            "new_vinstance data, net_list: %s, disk_list: %s"\r
+            " affinity_group_list: %s, cloud_config: %s,",\r
+            net_list,\r
+            disk_list,\r
+            affinity_group_list,\r
+            cloud_config,\r
+        )\r
+        net_list = net_list or []\r
+        disk_list = disk_list or []\r
+        affinity_group_list = affinity_group_list or []\r
+\r
+        session = self._get_vcenter_instance()\r
+        new_vm = None\r
+        created_items = {}\r
+        try:\r
+            vc_vmops = VCenterVmsOps(\r
+                self.vcenter_config, self.vcvms_util, self.vcnet_util, session\r
+            )\r
+\r
+            # Recover flavor, image template, resource pool, cluster, datastore\r
+            # datastore info, if it is not in configuration, get the same that template\r
+            flavor = self.get_flavor(flavor_id)\r
+            self.logger.debug("Flavor recovered: %s", flavor)\r
+\r
+            # Obtain image to clone\r
+            image_vm = self.vcvms_util.get_image_by_uuid(session, image_id)\r
+            self.logger.debug("Image recovered: %s", image_vm)\r
+\r
+            # Obtain needed configuration\r
+            datastore = self.vcenter_config.get_datastore(session)\r
+            self.logger.debug("Datastore 1: %s", datastore)\r
+            cluster, resource_pool = self.vcenter_config.get_cluster_rp_from_av_zone(\r
+                session, availability_zone_index, availability_zone_list\r
+            )\r
+            vms_folder = self.vcenter_config.get_instances_folder(session)\r
+            self.logger.debug("Cluster: %s, resource_pool: %s", cluster, resource_pool)\r
+\r
+            # Start to prepare config data\r
+\r
+            # Prepare affinity groups (check that they can be found)\r
+            affinity_groups_full = self._prepare_affinity_groups(affinity_group_list)\r
+\r
+            # Generate vm unique name\r
+            vm_name = self._generate_vm_name(name)\r
+\r
+            # Prepare vmconfig based on image and flavor data\r
+            vm_config_spec = vc_vmops.prepare_vm_base_config(vm_name, flavor, image_vm)\r
+\r
+            # Process flavor extended config\r
+            self._process_flavor_extended_config(vc_vmops, vm_config_spec, flavor)\r
+\r
+            # Prepare main disk\r
+            vc_vmops.prepare_vm_main_disk(flavor, image_vm, vm_config_spec, datastore)\r
+\r
+            # Add network interfaces configuration\r
+            vc_vmops.prepare_vm_networks(net_list, image_vm, vm_config_spec)\r
+\r
+            # Prepare disks configuration\r
+            self._prepare_vm_disks(\r
+                flavor=flavor,\r
+                disk_list=disk_list,\r
+                created_items=created_items,\r
+                vm_config_spec=vm_config_spec,\r
+                image_vm=image_vm,\r
+                vc_vmops=vc_vmops,\r
+            )\r
+\r
+            # Generate cloud init iso\r
+            iso_path, tmp_dir = self._generate_cloud_init_iso(cloud_config)\r
+\r
+            # Clone machine\r
+            self.logger.debug("Cloning image to create vm name %s", vm_config_spec.name)\r
+            # self.logger.debug("Cloning image config spec %s", vm_config_spec)\r
+            clone_spec = vim.vm.CloneSpec(\r
+                location=vim.vm.RelocateSpec(pool=resource_pool, datastore=datastore),\r
+                powerOn=False,  # Power on the VM after creation\r
+                template=False,\r
+                config=vm_config_spec,\r
+            )\r
+            clone_task = image_vm.Clone(\r
+                folder=vms_folder, name=vm_config_spec.name, spec=clone_spec\r
+            )\r
+            self.logger.debug("Machine cloned, wait for clone task to complete")\r
+\r
+            # Wait until clone task is completed\r
+            new_vm = vcutil.wait_for_task(clone_task)\r
+\r
+            # Attach cloud init to vm\r
+            self._attach_cloud_init_iso(\r
+                vc_vmops, new_vm, iso_path, tmp_dir, created_items\r
+            )\r
+\r
+            # Add the machine to affinity groups\r
+            self._add_vm_affinity_groups(\r
+                session, cluster, new_vm, affinity_groups_full, created_items\r
+            )\r
+\r
+            # Assign vim_id to net\r
+            self._assign_vim_id_to_net(new_vm, net_list)\r
+\r
+            # Assign fixed ip addresses if there are any\r
+            self.vcenter_ipmanager.set_vm_ips(session, name, new_vm, net_list)\r
+\r
+            # Start vm\r
+            self.vcvms_util.start_vm(new_vm)\r
+\r
+            self.logger.info(\r
+                "Created vm, server_id:  %s, vm_name: %s, created_items: %s, "\r
+                " net_list: %s",\r
+                new_vm.config.instanceUuid,\r
+                vm_name,\r
+                created_items,\r
+                net_list,\r
+            )\r
+            return new_vm.config.instanceUuid, created_items\r
+\r
+        except Exception as e:\r
+            if new_vm:\r
+                try:\r
+                    server_uuid = new_vm.config.instanceUuid\r
+\r
+                    created_items = self.remove_keep_tag_from_persistent_volumes(\r
+                        created_items\r
+                    )\r
+\r
+                    self.delete_vminstance(server_uuid, created_items)\r
+\r
+                except Exception as e2:\r
+                    self.logger.error(f"new_vminstance rollback fail {e2}")\r
+\r
+            # Logs and reraises exception\r
+            self._format_raise_exception(e)\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+    @staticmethod\r
+    def remove_keep_tag_from_persistent_volumes(created_items: dict) -> dict:\r
+        """Removes the keep flag from persistent volumes. So, those volumes could be removed.\r
+\r
+        Args:\r
+            created_items (dict):       All created items belongs to VM\r
+\r
+        Returns:\r
+            updated_created_items   (dict):     Dict which does not include keep flag for volumes.\r
+\r
+        """\r
+        return {\r
+            key.replace(":keep", ""): value for (key, value) in created_items.items()\r
+        }\r
+\r
+    def _assign_vim_id_to_net(self, vm, net_list):\r
+        """\r
+        Obtains the vim_id and assigns it to the net, also assigns the mac_address it is is available\r
+        """\r
+        nics_info = self.vcvms_util.get_vm_nics_list(vm)\r
+        for net in net_list:\r
+            net_id = net.get("net_id")\r
+            # Obtain the first interface with the same net_id\r
+            for index, nic in enumerate(nics_info):\r
+                if nic.get("vim_net_id") == net_id:\r
+                    net["vim_id"] = nic.get("vim_interface_id")\r
+                    if nic.get("mac_address"):\r
+                        net["mac_address"] = nic.get("mac_address")\r
+                    del nics_info[index]\r
+                    break\r
+        if nics_info:\r
+            self.logger.warning("Unassigned elements in network: %s", nics_info)\r
+\r
+    def _prepare_vm_disks(\r
+        self, flavor, disk_list, created_items, vm_config_spec, image_vm, vc_vmops\r
+    ):\r
+        """\r
+        Prepare all volumes for vm instance\r
+        """\r
+        disk_list = disk_list or []\r
+        datastore = image_vm.datastore[\r
+            0\r
+        ]  # could configure to store permanent disk in anther datastore\r
+\r
+        # Check if an ephemeral disk needs to be created\r
+        ephemeral_disk_size_gb = flavor.get("ephemeral", 0)\r
+        if int(ephemeral_disk_size_gb) > 0:\r
+            # Create ephemeral disk\r
+            vc_vmops.prepare_ephemeral_disk(\r
+                image_vm,\r
+                vm_config_spec,\r
+                datastore,\r
+                ephemeral_disk_size_gb,\r
+                created_items,\r
+            )\r
+\r
+        self.logger.debug("Process disk list: %s", disk_list)\r
+        for disk_index, disk in enumerate(disk_list, start=1):\r
+            self.logger.debug("disk_index: %s, disk: %s", disk_index, disk)\r
+            if "image_id" in disk:\r
+                self.logger.warning("Volume disks with image id not supported yet")\r
+            elif disk.get("multiattach"):\r
+                self.logger.warning("Volume disks with image id not supported yet")\r
+            elif disk.get("volume_id"):\r
+                self.logger.warning("Volumes already existing not supported yet")\r
+            else:\r
+                # Create permanent disk\r
+                vc_vmops.prepare_permanent_disk(\r
+                    image_vm, vm_config_spec, datastore, disk, disk_index, created_items\r
+                )\r
+\r
+    def _prepare_affinity_groups(self, affinity_group_id_list):\r
+        """\r
+        Check affinity groups ids in the list can be found and recover the affinity groups from ids\r
+        """\r
+        affinity_groups = None\r
+        if affinity_group_id_list:\r
+            affinity_groups = []\r
+            for item in affinity_group_id_list:\r
+                affinity_group_id = item["affinity_group_id"]\r
+                # Obtain the affinity group from the environment\r
+                affinity_group = self._affinity_groups.get(affinity_group_id)\r
+                if not affinity_group:\r
+                    raise vimconn.VimConnNotFoundException(\r
+                        f"Affinity group: {affinity_group_id} not found"\r
+                    )\r
+                else:\r
+                    affinity_groups.append(affinity_group)\r
+        return affinity_groups\r
+\r
+    def _add_vm_affinity_groups(\r
+        self, session, cluster, new_vm, affinity_group_list, created_items\r
+    ):\r
+\r
+        if affinity_group_list:\r
+            self.logger.debug("Add vm to affinity group list: %s", affinity_group_list)\r
+            for affinity_group in affinity_group_list:\r
+                self.vcvms_util.add_vm_or_create_affinity_group(\r
+                    session,\r
+                    cluster,\r
+                    affinity_group.get("name"),\r
+                    affinity_group.get("type"),\r
+                    new_vm,\r
+                )\r
+                affinity_group_txt = "affinity-group:" + affinity_group.get("name")\r
+                created_items[affinity_group_txt] = True\r
+\r
+    def _process_flavor_extended_config(self, vc_vmops, vm_config_spec, flavor):\r
+        """\r
+        Process the flavor extended configuration\r
+        :param flavor_data, dict with flavor_data, extended configuration is in key extended\r
+        :param vm_config_spec, dictionaty with the new vm config to be completed with extended flavor config\r
+        """\r
+        quotas_keys = {"cpu-quota", "mem-quota"}\r
+        # quotas = {"cpu-quota", "mem-quota", "vif-quota", "disk-io-quota"}\r
+\r
+        extended = flavor.get("extended")\r
+        if extended:\r
+            self.logger.debug("Process flavor extended data: %s", extended)\r
+\r
+            # Process quotas\r
+            extended_quotas = {\r
+                key: extended[key] for key in quotas_keys & extended.keys()\r
+            }\r
+            if extended_quotas:\r
+                vc_vmops.prepare_vm_quotas(extended_quotas, vm_config_spec)\r
+\r
+    def get_vminstance(self, vm_id):\r
+        """Returns the VM instance information from VIM"""\r
+        self.logger.debug("Get vm_instance id: %s", vm_id)\r
+\r
+        session = self._get_vcenter_instance()\r
+        try:\r
+            vm = self.vcvms_util.get_vm_by_uuid(session, vm_id)\r
+            return vm\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+    @handle_connector_exceptions\r
+    def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None):\r
+        """\r
+        Removes a VM instance from VIM and its associated elements\r
+        :param vm_id: VIM identifier of the VM, provided by method new_vminstance\r
+        :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method\r
+            action_vminstance\r
+        :return: None or the same vm_id. Raises an exception on fail\r
+        """\r
+        self.logger.debug(\r
+            "Delete vm_instance: vm_id: %s, "\r
+            "    created_items: %s,"\r
+            "    volumes_to_hold: %s",\r
+            vm_id,\r
+            created_items,\r
+            volumes_to_hold,\r
+        )\r
+\r
+        created_items = created_items or {}\r
+        volumes_to_hold = volumes_to_hold or {}\r
+\r
+        session = self._get_vcenter_instance()\r
+        try:\r
+            # Obtain volumes to keep\r
+            volumes_to_keep = self._extract_volumes_to_keep(created_items)\r
+            self.logger.debug("volumes_to_keep: %s", volumes_to_keep)\r
+\r
+            # Obtain cloud init iso files to delete\r
+            cloud_init_iso = self._extract_cloudinit_iso(created_items)\r
+            self.logger.debug("cloud init iso: %s", cloud_init_iso)\r
+\r
+            # Obtain vm\r
+            vm = self.vcvms_util.get_vm_by_uuid(session, vm_id)\r
+\r
+            # Shutdown vm and wait to avoid probles when volumes are unattached\r
+            stop_task = self.vcvms_util.stop_vm(vm)\r
+            vcutil.wait_for_task(stop_task)\r
+\r
+            # Prepare spec to unattach volumes\r
+            unattach_spec = None\r
+            if volumes_to_keep:\r
+                unattach_spec = self.vcvms_util.prepare_unattach_volumes(\r
+                    vm, volumes_to_keep, unattach_spec\r
+                )\r
+\r
+            # Prepare spec to unattach iso\r
+            if cloud_init_iso:\r
+                unattach_spec = self.vcvms_util.prepare_unattach_cloudinitiso(\r
+                    vm, cloud_init_iso, unattach_spec\r
+                )\r
+\r
+            # Unattach volumes to keep and iso\r
+            self.vcvms_util.reconfig_vm(session, vm, unattach_spec)\r
+\r
+            # Delete iso files\r
+            self.vcvms_util.delete_iso_files(session, cloud_init_iso)\r
+\r
+            # Delete vm from affinity group\r
+            self._delete_vm_affinity_groups(session, vm, created_items)\r
+\r
+            # Delete vm\r
+            self.vcvms_util.delete_vm(session, vm_id)\r
+\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+    def _delete_vm_affinity_groups(self, session, vm, created_items):\r
+\r
+        self.logger.debug("Delete vm affinity groups: %s", created_items)\r
+        vm_name = vm.name\r
+        cluster = self.vcvms_util.get_vm_cluster(session, vm)\r
+\r
+        for key, value in created_items.items():\r
+            if value is True and key.startswith("affinity-group:"):\r
+                self.logger.debug("Delete vm affinity groups key: %s", key)\r
+                # Remove vm from affinity group if there is just one delete affinity group\r
+                affinity_rule_name = key.split(":")[1]\r
+                self.vcvms_util.delete_vm_affinity_rule(\r
+                    session, cluster, affinity_rule_name, vm_name\r
+                )\r
+                created_items[key] = False\r
+\r
+    @staticmethod\r
+    def _extract_volumes_to_keep(created_items: dict) -> dict:\r
+        volumes_to_keep = []\r
+        for key, value in created_items.items():\r
+            if value is True and key.startswith("volume:") and ":keep" in key:\r
+                # Extract the volume ID (the part between "volume:" and ":keep")\r
+                volume_id = key.split(":")[1]\r
+                volumes_to_keep.append(volume_id)\r
+        return volumes_to_keep\r
+\r
+    @staticmethod\r
+    def _extract_cloudinit_iso(created_items: dict) -> dict:\r
+        cloud_init_iso_list = []\r
+        for key, value in created_items.items():\r
+            if value is True and key.startswith("cloud-init-iso:"):\r
+                cloud_init_id = key.split(":")[1]\r
+                cloud_init_iso_list.append(cloud_init_id)\r
+        return cloud_init_iso_list\r
+\r
+    def refresh_vms_status(self, vm_list):\r
+        """Get the status of the virtual machines and their interfaces/ports\r
+        Params: the list of VM identifiers\r
+        Returns a dictionary with:\r
+            vm_id:          #VIM id of this Virtual Machine\r
+                status:     #Mandatory. Text with one of:\r
+                            #  DELETED (not found at vim)\r
+                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)\r
+                            #  OTHER (Vim reported other status not understood)\r
+                            #  ERROR (VIM indicates an ERROR status)\r
+                            #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),\r
+                            #  CREATING (on building process), ERROR\r
+                            #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address\r
+                            #\r
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR\r
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)\r
+                interfaces:\r
+                 -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)\r
+                    mac_address:      #Text format XX:XX:XX:XX:XX:XX\r
+                    vim_net_id:       #network id where this interface is connected\r
+                    vim_interface_id: #interface/port VIM id\r
+                    ip_address:       #null, or text with IPv4, IPv6 address\r
+                    compute_node:     #identification of compute node where PF,VF interface is allocated\r
+                    pci:              #PCI address of the NIC that hosts the PF,VF\r
+                    vlan:             #physical VLAN used for VF\r
+        """\r
+        self.logger.debug("Refresh vm_status vm_list: %s", vm_list)\r
+        vm_list = vm_list or []\r
+        out_vms = {}\r
+\r
+        session = self._get_vcenter_instance()\r
+        try:\r
+            for vm_id in vm_list:\r
+                self.logger.debug("Refresh vm id: %s", vm_id)\r
+                out_vm = {}\r
+                try:\r
+                    vm = self.vcvms_util.get_vm_by_uuid(session, vm_id)\r
+\r
+                    vim_vm = self.vcvms_util.get_vim_vm_basic(vm)\r
+                    out_vm["vim_info"] = self.serialize(vim_vm)\r
+                    out_vm["status"] = vim_vm.get("status", "other")\r
+\r
+                    out_vm["interfaces"] = self.vcvms_util.get_vm_nics_list(vm)\r
+\r
+                    mac_ips_dict = self.vcenter_ipmanager.get_vm_ips(session, vm)\r
+                    self.logger.debug(\r
+                        "Obtained list of macs and ip addresses: %s", mac_ips_dict\r
+                    )\r
+\r
+                    for interface in out_vm["interfaces"]:\r
+                        mac_address = interface.get("mac_address")\r
+                        if mac_ips_dict.get(mac_address):\r
+                            interface["ip_address"] = ";".join(\r
+                                mac_ips_dict.get(mac_address)\r
+                            )\r
+\r
+                except vimconn.VimConnNotFoundException as e:\r
+                    self.logger.error(\r
+                        "Not found error recovering vm id: %s, message: %s",\r
+                        vm_id,\r
+                        str(e),\r
+                    )\r
+                    out_vm["status"] = "DELETED"\r
+                    out_vm["error_msg"] = str(e)\r
+                except Exception as e:\r
+                    self.logger.error(f"Error recovering vm id: {vm_id}".format(), e)\r
+                    out_vm["status"] = "VIM_ERROR"\r
+                    out_vm["error_msg"] = str(e)\r
+\r
+                out_vms[vm_id] = out_vm\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+        self.logger.debug("Refresh vm status, result: %s", out_vms)\r
+        return out_vms\r
+\r
+    @handle_connector_exceptions\r
+    def action_vminstance(self, vm_id, action_dict, created_items=None):\r
+        """\r
+        Send and action over a VM instance. Returns created_items if the action was successfully sent to the VIM.\r
+        created_items is a dictionary with items that\r
+        :param vm_id: VIM identifier of the VM, provided by method new_vminstance\r
+        :param action_dict: dictionary with the action to perform\r
+        :param created_items: provided by method new_vminstance is a dictionary with key-values that will be passed to\r
+            the method delete_vminstance. Can be used to store created ports, volumes, etc. Format is VimConnector\r
+            dependent, but do not use nested dictionaries and a value of None should be the same as not present. This\r
+            method can modify this value\r
+        :return: None, or a console dict\r
+        """\r
+        self.logger.debug(\r
+            "Action vm_instance, id: %s, action_dict: %s", vm_id, str(action_dict)\r
+        )\r
+        created_items = created_items or {}\r
+\r
+        session = self._get_vcenter_instance()\r
+        try:\r
+            # Get vm\r
+            vm = self.vcvms_util.get_vm_by_uuid(session, vm_id)\r
+            self.logger.debug("vm state: %s", vm.runtime.powerState)\r
+\r
+            if "start" in action_dict:\r
+                self.vcvms_util.start_vm(vm)\r
+            elif "shutoff" in action_dict or "shutdown" in action_dict:\r
+                self.vcvms_util.stop_vm(vm)\r
+            elif "pause" in action_dict:\r
+                # todo - pause\r
+                self.logger.warning("pause not implemented yet")\r
+\r
+            elif "resume" in action_dict:\r
+                self.logger.warning("resume not implemented yet")\r
+\r
+            elif "forceOff" in action_dict:\r
+                self.logger.warning("forceOff not implemented yet")\r
+\r
+            elif "reboot" in action_dict:\r
+                self.logger.warning("reboot action not implemented yet")\r
+\r
+            elif "terminate" in action_dict:\r
+                self.logger.warning("terminate action not implemented yet")\r
+\r
+            elif "rebuild" in action_dict:\r
+                self.logger.warning("rebuild action not implemented yet")\r
+\r
+            else:\r
+                raise vimconn.VimConnException(\r
+                    f"action_vminstance: Invalid action {action_dict} or action is None."\r
+                )\r
+\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+    def get_vminstance_console(self, vm_id, console_type="vnc"):\r
+        """\r
+        Get a console for the virtual machine\r
+        Params:\r
+            vm_id: uuid of the VM\r
+            console_type, can be:\r
+                "novnc" (by default), "xvpvnc" for VNC types,\r
+                "rdp-html5" for RDP types, "spice-html5" for SPICE types\r
+        Returns dict with the console parameters:\r
+                protocol: ssh, ftp, http, https, ...\r
+                server:   usually ip address\r
+                port:     the http, ssh, ... port\r
+                suffix:   extra text, e.g. the http path and query string\r
+        """\r
+        self.logger.debug(\r
+            "Get vm instance console, vm_id: %s, console_type: %s", vm_id, console_type\r
+        )\r
+        raise vimconn.VimConnNotImplemented(\r
+            "get instance console is not supported in vcenter"\r
+        )\r
+\r
+    @handle_connector_exceptions\r
+    def new_network(\r
+        self,\r
+        net_name,\r
+        net_type,\r
+        ip_profile=None,\r
+        shared=False,\r
+        provider_network_profile=None,\r
+    ):\r
+        """Adds a tenant network to VIM\r
+        Params:\r
+            'net_name': name of the network\r
+            'net_type': one of:\r
+                'bridge': overlay isolated network\r
+                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces\r
+                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.\r
+            'ip_profile': is a dict containing the IP parameters of the network\r
+                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)\r
+                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y\r
+                'gateway_address': (Optional) ip_schema, that is X.X.X.X\r
+                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]\r
+                'dhcp_enabled': True or False\r
+                'dhcp_start_address': ip_schema, first IP to grant\r
+                'dhcp_count': number of IPs to grant.\r
+            'shared': if this network can be seen/use by other tenants/organization\r
+            'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}\r
+        Returns a tuple with the network identifier and created_items, or raises an exception on error\r
+            created_items can be None or a dictionary where this method can include key-values that will be passed to\r
+            the method delete_network. Can be used to store created segments, created l2gw connections, etc.\r
+            Format is VimConnector dependent, but do not use nested dictionaries and a value of None should be the same\r
+            as not present.\r
+        """\r
+        self.logger.debug(\r
+            "new network, net_name: %s, net_type: %s, ip_profile: %s,"\r
+            "    shared: %s, provider_network_profile: %s",\r
+            net_name,\r
+            net_type,\r
+            ip_profile,\r
+            shared,\r
+            provider_network_profile,\r
+        )\r
+        created_items = {}\r
+\r
+        # Generate network name with suffix\r
+        net_unique_name = self._generate_network_name(net_name)\r
+\r
+        # Create distributed port group\r
+        net_id = self._create_distributed_port_group(\r
+            net_unique_name, net_type, ip_profile, provider_network_profile\r
+        )\r
+\r
+        self.logger.debug("Created network id: %s, name: %s", net_id, net_unique_name)\r
+        return net_id, created_items\r
+\r
+    def _create_distributed_port_group(\r
+        self, net_name, net_type, ip_profile, provider_network_profile\r
+    ):\r
+        self.logger.debug("Create distributed port group with name: %s", net_name)\r
+\r
+        session = self._get_vcenter_instance()\r
+        try:\r
+            # Obtain dvs_names\r
+            dvs_names = self.vcenter_config.get_dvs_names(session)\r
+            if len(dvs_names) != 1:\r
+                raise vimconn.VimConnException(\r
+                    "Creation of networks is unsupported if not just one distributed switch is configured"\r
+                )\r
+\r
+            dvs_name = dvs_names[0]\r
+\r
+            # Create distributed port group\r
+            vlan = None\r
+            if provider_network_profile:\r
+                vlan = provider_network_profile.get("segmentation-id")\r
+                self.logger.debug("vlan value for network: %s", vlan)\r
+\r
+            net_id, port_group = self.vcnet_util.create_distributed_port_group(\r
+                session, net_name, dvs_name, vlan=vlan\r
+            )\r
+\r
+            return net_id\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+    def get_network_list(self, filter_dict=None):\r
+        """Obtain tenant networks of VIM\r
+        Params:\r
+            'filter_dict' (optional) contains entries to return only networks that matches ALL\r
+            entries:\r
+                name: string  => returns only networks with this name\r
+                id:   string  => returns networks with this VIM id, this imply returns one network\r
+                at most\r
+                shared: boolean >= returns only networks that are (or are not) shared\r
+                tenant_id: sting => returns only networks that belong to this tenant/project\r
+                ,#(not used yet) admin_state_up: boolean => returns only networks that are\r
+                (or are not) in admin state\r
+                    active\r
+                #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this\r
+                # status\r
+        Returns the network list of dictionaries. each dictionary contains:\r
+            'id': (mandatory) VIM network id\r
+            'name': (mandatory) VIM network name\r
+            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR',\r
+            'VIM_ERROR', 'OTHER'\r
+            'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'\r
+            'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains\r
+            the segmentation id\r
+            'error_msg': (optional) text that explains the ERROR status\r
+            other VIM specific fields: (optional) whenever possible using the same naming of\r
+            filter_dict param\r
+        List can be empty if no network map the filter_dict. Raise an exception only upon VIM\r
+        connectivity,\r
+            authorization, or some other unspecific error\r
+        """\r
+        self.logger.debug("get network list, filter_dict: %s", filter_dict)\r
+        filter_dict = filter_dict or {}\r
+\r
+        # Get network list: step 1: get the list of distributed port groups\r
+        session = self._get_vcenter_instance()\r
+        try:\r
+            # Get the list of available distributed switches\r
+            dvs_names = self.vcenter_config.get_dvs_names(session)\r
+\r
+            # Get the list of distributed port groups for the distributed switches\r
+            dport_groups = self.vcnet_util.get_port_groups_by_dvs_name(\r
+                session, dvs_names\r
+            )\r
+            # self.logger.debug("Distributed port groups: %s", dport_groups)\r
+\r
+            network_list = []  # network list object to be returned\r
+            for port_group in dport_groups:\r
+                if filter_dict:\r
+                    if (\r
+                        filter_dict.get("id")\r
+                        and str(port_group.key) != filter_dict["id"]\r
+                    ):\r
+                        continue\r
+\r
+                    if (\r
+                        filter_dict.get("name")\r
+                        and str(port_group.name) != filter_dict["name"]\r
+                    ):\r
+                        continue\r
+\r
+                # Obtain vim networl data\r
+                network_list.append(self.vcnet_util.get_vim_network_from_pg(port_group))\r
+\r
+            self.logger.debug("Network list obtained: %s", network_list)\r
+            return network_list\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+    @handle_connector_exceptions\r
+    def get_network(self, net_id):\r
+        """Obtain network details from the 'net_id' VIM network\r
+        Return a dict that contains:\r
+            'id': (mandatory) VIM network id, that is, net_id\r
+            'name': (mandatory) VIM network name\r
+            'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR',\r
+            'VIM_ERROR', 'OTHER'\r
+            'error_msg': (optional) text that explains the ERROR status\r
+            other VIM specific fields: (optional) whenever possible using the same naming of\r
+            filter_dict param\r
+        Raises an exception upon error or when network is not found\r
+        """\r
+        self.logger.debug("get network id: %s", net_id)\r
+\r
+        session = self._get_vcenter_instance()\r
+        try:\r
+            vim_net = self.vcnet_util.get_vim_network_by_id(session, net_id)\r
+            return vim_net\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+    @handle_connector_exceptions\r
+    def delete_network(self, net_id, created_items=None):\r
+        """\r
+        Removes a tenant network from VIM and its associated elements\r
+        :param net_id: VIM identifier of the network, provided by method new_network\r
+        :param created_items: dictionary with extra items to be deleted. provided by method new_network\r
+        Returns the network identifier or raises an exception upon error or when network is not found\r
+        """\r
+        self.logger.debug(\r
+            "delete network id: %s, created_items: %s", net_id, created_items\r
+        )\r
+\r
+        session = self._get_vcenter_instance()\r
+        try:\r
+            # Check the network is distributed port group\r
+            if not self.vcnet_util.is_distributed_port_group(net_id):\r
+                raise vimconn.VimConnNotSupportedException(\r
+                    f"Network with id: {net_id} is not a distributed port group, deleting is not supported"\r
+                )\r
+\r
+            # Obtain the network\r
+            net = self.vcnet_util.get_network_by_id(session, net_id)\r
+            if self.vcnet_util.is_nsx_port_group(net):\r
+                raise vimconn.VimConnNotSupportedException(\r
+                    f"Network with id: {net_id} is a nsx backed network, deleting is not supported"\r
+                )\r
+\r
+            # Obtain connected vms\r
+            connected_vms = self.vcnet_util.get_distributed_port_connected_vms(net)\r
+\r
+            # Disconnect vms\r
+            self.vcvms_util.disconnect_vms_from_dpg(session, net_id, connected_vms)\r
+\r
+            # Delete the network\r
+            self.vcnet_util.delete_distributed_port_group(net)\r
+\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+    def refresh_nets_status(self, net_list):\r
+        """Get the status of the networks\r
+        Params:\r
+            'net_list': a list with the VIM network id to be get the status\r
+        Returns a dictionary with:\r
+            'net_id':         #VIM id of this network\r
+                status:     #Mandatory. Text with one of:\r
+                    #  DELETED (not found at vim)\r
+                    #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)\r
+                    #  OTHER (Vim reported other status not understood)\r
+                    #  ERROR (VIM indicates an ERROR status)\r
+                    #  ACTIVE, INACTIVE, DOWN (admin down),\r
+                    #  BUILD (on building process)\r
+                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR\r
+                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)\r
+            'net_id2': ...\r
+        """\r
+        self.logger.debug("Refresh network list %s", net_list)\r
+        net_list = net_list or []\r
+        net_dict = {}\r
+\r
+        session = self._get_vcenter_instance()\r
+        try:\r
+            for net_id in net_list:\r
+                net = {}\r
+\r
+                try:\r
+                    vim_net = self.vcnet_util.get_vim_network_by_id(session, net_id)\r
+\r
+                    net["vim_info"] = self.serialize(vim_net)\r
+                    net["status"] = vim_net.get("status", "ACTIVE")\r
+                    # vcenter does not a status flag\r
+\r
+                except vimconn.VimConnNotFoundException as e:\r
+                    self.logger.error("Exception getting net status: %s", str(e))\r
+                    net["status"] = "DELETED"\r
+                    net["error_msg"] = str(e)\r
+                except vimconn.VimConnException as e:\r
+                    self.logger.error("Exception getting net status: %s", str(e))\r
+                    net["status"] = "VIM_ERROR"\r
+                    net["error_msg"] = str(e)\r
+                net_dict[net_id] = net\r
+\r
+        finally:\r
+            self._disconnect_si(session)\r
+\r
+        self.logger.debug("Refresh net status, result: %s", net_dict)\r
+        return net_dict\r
+\r
+    def serialize(self, value):\r
+        """Serialization of python basic types.\r
+\r
+        In the case value is not serializable a message will be logged and a\r
+        simple representation of the data that cannot be converted back to\r
+        python is returned.\r
+        """\r
+        if isinstance(value, str):\r
+            return value\r
+\r
+        try:\r
+            return yaml.dump(value, default_flow_style=True, width=256)\r
+        except yaml.representer.RepresenterError:\r
+            self.logger.debug(\r
+                "The following entity cannot be serialized in YAML:\n\n%s\n\n",\r
+                str(value),\r
+                exc_info=True,\r
+            )\r
+\r
+            return str(value)\r
+\r
+    def _generate_cloud_init_iso(self, cloud_config):\r
+        iso_path = None\r
+        tmp_dir = None\r
+\r
+        if cloud_config:\r
+            self.logger.debug("Cloud config provided, generate ISO file")\r
+            _, userdata = self._create_user_data(cloud_config)\r
+            iso_path, tmp_dir = self.cloudinit_helper.generate_cloud_init_iso(userdata)\r
+\r
+        return iso_path, tmp_dir\r
+\r
+    def _attach_cloud_init_iso(\r
+        self, vc_vmops, new_vm, iso_path, tmp_dir, created_items\r
+    ):\r
+        """\r
+        Attachs a previously generated cloud init iso file to a vm\r
+        """\r
+\r
+        if iso_path:\r
+            # Obtain vm folder name and datastore name\r
+            folder_name = new_vm.name\r
+            datastore_name = new_vm.datastore[0].info.name\r
+            file_name = new_vm.name + "-cloud-init.iso"\r
+\r
+            # Obtain datacenter name for the datastore\r
+            datacenter_name = self.vcenter_config.get_datacenter_name(vc_vmops.session)\r
+\r
+            # Upload iso file\r
+            self.vcenter_fileuploader.upload_file(\r
+                iso_path, datacenter_name, datastore_name, folder_name, file_name\r
+            )\r
+            iso_filename = f"[{datastore_name}] {folder_name}/{file_name}"\r
+\r
+            iso_filename_txt = "cloud-init-iso:" + quote(iso_filename)\r
+            created_items[iso_filename_txt] = True\r
+\r
+            # Attach iso to vm\r
+            vc_vmops.attach_cdrom(new_vm, iso_filename)\r
+\r
+            # Delete tmp_dir\r
+            self.cloudinit_helper.delete_tmp_dir(tmp_dir)\r
+\r
+    @staticmethod\r
+    def _generate_short_suffix():\r
+        # Generate a UUID and take the first 8 characters\r
+        return str(uuid.uuid4())[:8]\r
+\r
+    def _generate_vm_name(self, vm_name):\r
+        return vm_name + "-" + self._generate_short_suffix()\r
+\r
+    def _generate_network_name(self, network_name):\r
+        return network_name + "-" + self._generate_short_suffix()\r
+\r
+    @staticmethod\r
+    def _format_raise_exception(exception):\r
+        """Transform a PyVmomi exception into a VimConn exception by analyzing the cause."""\r
+        logger = logging.getLogger("ro.vim.vcenter")\r
+        message_error = str(exception)\r
+\r
+        # Log the error before reraising\r
+        logger.error(f"Exception ocurred, message: {message_error}", exc_info=True)\r
+\r
+        # Reraise VimConnException directly\r
+        if isinstance(exception, vimconn.VimConnException):\r
+            raise exception\r
+        else:\r
+            # General Errors\r
+            raise vimconn.VimConnException(\r
+                f"Exception: {type(exception).__name__}: {message_error}"\r
+            )\r
+\r
+    def _get_vcenter_instance(self):\r
+        self.logger.debug(\r
+            "Connect to vcenter, hostname: %s, port: %s, " "user: %s",\r
+            self.vcenter_hostname,\r
+            self.vcenter_port,\r
+            self.user,\r
+        )\r
+        si = SmartConnect(\r
+            host=self.vcenter_hostname,\r
+            user=self.user,\r
+            pwd=self.passwd,\r
+            port=self.vcenter_port,\r
+            sslContext=self.ssl_context,\r
+        )\r
+        return si\r
+\r
+    def _disconnect_si(self, server_instance):\r
+        Disconnect(server_instance)\r
+\r
+    def _get_vcenter_content(self, server_instance):\r
+        return server_instance.RetrieveContent()\r
+\r
+    def _validate_int(self, value, var_name):\r
+        if not isinstance(value, int):\r
+            raise vimconn.VimConnException(\r
+                f"Variable '{var_name}' must be an int. Got value: {value} ({type(value).__name__})"\r
+            )\r
diff --git a/RO-VIM-vcenter/requirements.in b/RO-VIM-vcenter/requirements.in
new file mode 100644 (file)
index 0000000..8f9bcbd
--- /dev/null
@@ -0,0 +1,21 @@
+# Copyright ETSI Contributors and Others.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#    http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\r
+# implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+##\r
+\r
+PyYAML\r
+requests\r
+netaddr\r
+pyvmomi\r
+\r
diff --git a/RO-VIM-vcenter/setup.py b/RO-VIM-vcenter/setup.py
new file mode 100644 (file)
index 0000000..6e93d88
--- /dev/null
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3\r
+# -*- coding: utf-8 -*-\r
+\r
+##\r
+# Copyright VMware Inc.\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#    http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\r
+# implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+##\r
+\r
+from setuptools import setup\r
+\r
+_name = "osm_rovim_vcenter"\r
+_version_command = ("git describe --match v* --tags --long --dirty", "pep440-git-full")\r
+_description = "OSM ro vim plugin for vmware"\r
+_author = "OSM Support"\r
+_author_email = "osmsupport@etsi.org"\r
+_maintainer = "OSM Support"\r
+_maintainer_email = "osmsupport@etsi.org"\r
+_license = "Apache 2.0"\r
+_url = "https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary"\r
+\r
+_readme = """\r
+===========\r
+osm--rovim_vcenter\r
+===========\r
+\r
+osm-ro pluging for vmware vcenter VIM\r
+"""\r
+\r
+setup(\r
+    name=_name,\r
+    description=_description,\r
+    long_description=_readme,\r
+    version_command=_version_command,\r
+    author=_author,\r
+    author_email=_author_email,\r
+    maintainer=_maintainer,\r
+    maintainer_email=_maintainer_email,\r
+    url=_url,\r
+    license=_license,\r
+    packages=[_name],\r
+    include_package_data=True,\r
+    setup_requires=["setuptools-version-command"],\r
+    entry_points={\r
+        "osm_rovim.plugins": [\r
+            "rovim_vcenter = osm_rovim_vcenter.vimconn_vcenter:vimconnector"\r
+        ],\r
+    },\r
+)\r
diff --git a/RO-VIM-vcenter/stdeb.cfg b/RO-VIM-vcenter/stdeb.cfg
new file mode 100644 (file)
index 0000000..4765eba
--- /dev/null
@@ -0,0 +1,19 @@
+##\r
+# Copyright VMware Inc.\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#    http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\r
+# implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+##\r
+\r
+[DEFAULT]\r
+X-Python3-Version : >= 3.5\r
+Depends3: genisoimage\r
index 3d570c7..5547361 100755 (executable)
@@ -35,6 +35,7 @@ dist_ro_vim_azure
 dist_ro_vim_openstack
 dist_ro_vim_openvim
 dist_ro_vim_vmware
+dist_ro_vim_vcenter
 dist_ro_vim_gcp"
 
 TOX_ENV_LIST="$(echo $PACKAGES | sed "s/ /,/g")"
diff --git a/tox.ini b/tox.ini
index a3af3d5..b75bc7f 100644 (file)
--- a/tox.ini
+++ b/tox.ini
@@ -63,6 +63,7 @@ commands =
         black --check --diff RO-VIM-openstack
         black --check --diff RO-VIM-openvim
         black --check --diff RO-VIM-vmware
+        black --check --diff RO-VIM-vcenter
         black --check --diff RO-VIM-gcp
         black --check --diff integration-tests
 
@@ -162,6 +163,7 @@ commands =
         flake8 RO-VIM-azure/osm_rovim_azure/ RO-VIM-azure/setup.py
         flake8 RO-VIM-openstack/osm_rovim_openstack/ RO-VIM-openstack/setup.py
         flake8 RO-VIM-openvim/osm_rovim_openvim/ RO-VIM-openvim/setup.py
+        flake8 RO-VIM-vcenter/osm_rovim_vcenter/ RO-VIM-vcenter/setup.py
         flake8 RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware.py RO-VIM-vmware/setup.py
         flake8 RO-VIM-gcp/osm_rovim_gcp/ RO-VIM-gcp/setup.py
         flake8 integration-tests/
@@ -188,6 +190,7 @@ commands =
         pylint -E RO-SDN-onos_vpls/osm_rosdn_onos_vpls --disable=E1101
         pylint -E RO-SDN-tapi/osm_rosdn_tapi
         pylint -E RO-VIM-aws/osm_rovim_aws
+        pylint -E RO-VIM-vcenter/osm_rovim_vcenter
         - pylint -E RO-VIM-azure/osm_rovim_azure --disable=all
         pylint -E RO-VIM-openstack/osm_rovim_openstack --disable=E1101
         - pylint -E RO-VIM-openvim/osm_rovim_openvim --disable=all
@@ -445,6 +448,18 @@ commands =
         python3 setup.py --command-packages=stdeb.command sdist_dsc
         sh -c 'cd deb_dist/osm-rovim-vmware*/ && dpkg-buildpackage -rfakeroot -uc -us'
 
+#######################################################################################
+[testenv:dist_ro_vim_vcenter]
+deps =  {[testenv]deps}
+        -r{toxinidir}/requirements-dist.txt
+skip_install = true
+allowlist_externals = sh
+changedir = {toxinidir}/RO-VIM-vcenter
+commands =
+        sh -c 'rm -rf deb_dist dist osm_rovim_vcenter.egg-info osm_rovim_vcenter*.tar.gz'
+        python3 setup.py --command-packages=stdeb.command sdist_dsc
+        sh -c 'cd deb_dist/osm-rovim-vcenter*/ && dpkg-buildpackage -rfakeroot -uc -us'
+
 
 #######################################################################################
 [testenv:dist_ro_vim_gcp]