Fixes multiattach issues in attaching and deletion
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index 959839f..8de6534 100644 (file)
@@ -339,7 +339,7 @@ class vimconnector(vimconn.VimConnector):
             version = self.config.get("microversion")
 
             if not version:
-                version = "2.1"
+                version = "2.60"
 
             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
             # Titanium cloud and StarlingX
@@ -628,6 +628,31 @@ class vimconnector(vimconn.VimConnector):
                         "Not found security group {} for this tenant".format(sg)
                     )
 
+    def _find_nova_server(self, vm_id):
+        """
+        Returns the VM instance from Openstack and completes it with flavor ID
+        Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
+        """
+        try:
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
+            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+            server_dict = server.to_dict()
+            try:
+                server_dict["flavor"]["id"] = self.nova.flavors.find(
+                    name=server_dict["flavor"]["original_name"]
+                ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
+            return server_dict
+        except (
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
     def check_vim_connectivity(self):
         # just get network list to check connectivity and credentials
         self.get_network_list(filter_dict={})
@@ -889,7 +914,7 @@ class vimconnector(vimconn.VimConnector):
 
             if not ip_profile.get("subnet_address"):
                 # Fake subnet is required
-                subnet_rand = random.randint(0, 255)
+                subnet_rand = random.SystemRandom().randint(0, 255)
                 ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand)
 
             if "ip_version" not in ip_profile:
@@ -934,6 +959,15 @@ class vimconnector(vimconn.VimConnector):
                 ip_str = str(netaddr.IPAddress(ip_int))
                 subnet["allocation_pools"][0]["end"] = ip_str
 
+            if (
+                ip_profile.get("ipv6_address_mode")
+                and ip_profile["ip_version"] != "IPv4"
+            ):
+                subnet["ipv6_address_mode"] = ip_profile["ipv6_address_mode"]
+                # ipv6_ra_mode can be set to the same value for most use cases, see documentation:
+                # https://docs.openstack.org/neutron/latest/admin/config-ipv6.html#ipv6-ra-mode-and-ipv6-address-mode-combinations
+                subnet["ipv6_ra_mode"] = ip_profile["ipv6_address_mode"]
+
             # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
             self.neutron.create_subnet({"subnet": subnet})
 
@@ -1402,10 +1436,6 @@ class vimconnector(vimconn.VimConnector):
             extra_specs     (dict):        Extra specs dict to be updated
 
         """
-        # If there is not any numa, numas_nodes equals to 0.
-        if not numa_nodes:
-            extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
-
         # If there are several numas, we do not define specific affinity.
         extra_specs["vmware:latency_sensitivity_level"] = "high"
 
@@ -1941,8 +1971,14 @@ class vimconnector(vimconn.VimConnector):
         if net.get("mac_address"):
             port_dict["mac_address"] = net["mac_address"]
 
-        if net.get("ip_address"):
-            port_dict["fixed_ips"] = [{"ip_address": net["ip_address"]}]
+        ip_dual_list = []
+        if ip_list := net.get("ip_address"):
+            if not isinstance(ip_list, list):
+                ip_list = [ip_list]
+            for ip in ip_list:
+                ip_dict = {"ip_address": ip}
+                ip_dual_list.append(ip_dict)
+            port_dict["fixed_ips"] = ip_dual_list
             # TODO add "subnet_id": <subnet_id>
 
     def _create_new_port(self, port_dict: dict, created_items: dict, net: dict) -> Dict:
@@ -1959,7 +1995,7 @@ class vimconnector(vimconn.VimConnector):
         """
         new_port = self.neutron.create_port({"port": port_dict})
         created_items["port:" + str(new_port["port"]["id"])] = True
-        net["mac_adress"] = new_port["port"]["mac_address"]
+        net["mac_address"] = new_port["port"]["mac_address"]
         net["vim_id"] = new_port["port"]["id"]
 
         return new_port
@@ -2134,11 +2170,59 @@ class vimconnector(vimconn.VimConnector):
                 "Created volume is not valid, does not have id attribute."
             )
 
+        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+        if disk.get("multiattach"):  # multiattach volumes do not belong to VDUs
+            return
         volume_txt = "volume:" + str(volume.id)
         if disk.get("keep"):
             volume_txt += ":keep"
         created_items[volume_txt] = True
-        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+
+    def new_shared_volumes(self, shared_volume_data) -> (str, str):
+        try:
+            volume = self.cinder.volumes.create(
+                size=shared_volume_data["size"],
+                name=shared_volume_data["name"],
+                volume_type="multiattach",
+            )
+            return (volume.name, volume.id)
+        except (ConnectionError, KeyError) as e:
+            self._format_exception(e)
+
+    def _prepare_shared_volumes(
+        self,
+        name: str,
+        disk: dict,
+        base_disk_index: int,
+        block_device_mapping: dict,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ):
+        volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
+        if volumes.get(disk["name"]):
+            sv_id = volumes[disk["name"]]
+            max_retries = 3
+            vol_status = ""
+            # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
+            while max_retries:
+                max_retries -= 1
+                volume = self.cinder.volumes.get(sv_id)
+                vol_status = volume.status
+                if volume.status not in ("in-use", "available"):
+                    time.sleep(5)
+                    continue
+                self.update_block_device_mapping(
+                    volume=volume,
+                    block_device_mapping=block_device_mapping,
+                    base_disk_index=base_disk_index,
+                    disk=disk,
+                    created_items=created_items,
+                )
+                return
+            raise vimconn.VimConnException(
+                "Shared volume is not prepared, status is: {}".format(vol_status),
+                http_code=vimconn.HTTP_Internal_Server_Error,
+            )
 
     def _prepare_non_root_persistent_volumes(
         self,
@@ -2164,17 +2248,15 @@ class vimconnector(vimconn.VimConnector):
         # Non-root persistent volumes
         # Disk may include only vim_volume_id or only vim_id."
         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
-
         if disk.get(key_id):
             # Use existing persistent volume
             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
             existing_vim_volumes.append({"id": disk[key_id]})
-
         else:
-            # Create persistent volume
+            volume_name = f"{name}vd{chr(base_disk_index)}"
             volume = self.cinder.volumes.create(
                 size=disk["size"],
-                name=name + "vd" + chr(base_disk_index),
+                name=volume_name,
                 # Make sure volume is in the same AZ as the VM to be attached to
                 availability_zone=vm_av_zone,
             )
@@ -2199,7 +2281,6 @@ class vimconnector(vimconn.VimConnector):
             elapsed_time    (int):          Time spent while waiting
 
         """
-
         while elapsed_time < volume_timeout:
             for created_item in created_items:
                 v, volume_id = (
@@ -2207,7 +2288,13 @@ class vimconnector(vimconn.VimConnector):
                     created_item.split(":")[1],
                 )
                 if v == "volume":
-                    if self.cinder.volumes.get(volume_id).status != "available":
+                    volume = self.cinder.volumes.get(volume_id)
+                    if (
+                        volume.volume_type == "multiattach"
+                        and volume.status == "in-use"
+                    ):
+                        return elapsed_time
+                    elif volume.status != "available":
                         break
             else:
                 # All ready: break from while
@@ -2234,7 +2321,10 @@ class vimconnector(vimconn.VimConnector):
 
         while elapsed_time < volume_timeout:
             for volume in existing_vim_volumes:
-                if self.cinder.volumes.get(volume["id"]).status != "available":
+                v = self.cinder.volumes.get(volume["id"])
+                if v.volume_type == "multiattach" and v.status == "in-use":
+                    return elapsed_time
+                elif v.status != "available":
                     break
             else:  # all ready: break from while
                 break
@@ -2268,7 +2358,6 @@ class vimconnector(vimconn.VimConnector):
         base_disk_index = ord("b")
         boot_volume_id = None
         elapsed_time = 0
-
         for disk in disk_list:
             if "image_id" in disk:
                 # Root persistent volume
@@ -2282,6 +2371,15 @@ class vimconnector(vimconn.VimConnector):
                     existing_vim_volumes=existing_vim_volumes,
                     created_items=created_items,
                 )
+            elif disk.get("multiattach"):
+                self._prepare_shared_volumes(
+                    name=name,
+                    disk=disk,
+                    base_disk_index=base_disk_index,
+                    block_device_mapping=block_device_mapping,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
             else:
                 # Non-root persistent volume
                 self._prepare_non_root_persistent_volumes(
@@ -2737,7 +2835,6 @@ class vimconnector(vimconn.VimConnector):
                     server_group_id,
                 )
             )
-
             # Create VM
             server = self.nova.servers.create(
                 name=name,
@@ -2801,20 +2898,7 @@ class vimconnector(vimconn.VimConnector):
 
     def get_vminstance(self, vm_id):
         """Returns the VM instance information from VIM"""
-        # self.logger.debug("Getting VM from VIM")
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
-            return server.to_dict()
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        return self._find_nova_server(vm_id)
 
     def get_vminstance_console(self, vm_id, console_type="vnc"):
         """
@@ -2907,6 +2991,36 @@ class vimconnector(vimconn.VimConnector):
         except Exception as e:
             self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
 
+    def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
+        """Cinder delete volume by id.
+        Args:
+            shared_volume_vim_id    (str):                  ID of shared volume in VIM
+        """
+        elapsed_time = 0
+        try:
+            while elapsed_time < server_timeout:
+                vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
+                if vol_status == "available":
+                    self.cinder.volumes.delete(shared_volume_vim_id)
+                    return True
+
+                time.sleep(5)
+                elapsed_time += 5
+
+            if elapsed_time >= server_timeout:
+                raise vimconn.VimConnException(
+                    "Timeout waiting for volume "
+                    + shared_volume_vim_id
+                    + " to be available",
+                    http_code=vimconn.HTTP_Request_Timeout,
+                )
+
+        except Exception as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
+            self._format_exception(e)
+
     def _delete_volumes_by_id_wth_cinder(
         self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
     ) -> bool:
@@ -2984,7 +3098,6 @@ class vimconnector(vimconn.VimConnector):
 
             try:
                 k_item, k_id = self._get_item_name_id(k)
-
                 if k_item == "volume":
                     unavailable_vol = self._delete_volumes_by_id_wth_cinder(
                         k, k_id, volumes_to_hold, created_items
@@ -3599,8 +3712,7 @@ class vimconnector(vimconn.VimConnector):
         self.logger.debug("Getting the status of VM")
         self.logger.debug("VIM VM ID %s", vm_id)
         self._reload_connection()
-        server = self.nova.servers.find(id=vm_id)
-        server_dict = server.to_dict()
+        server_dict = self._find_nova_server(vm_id)
         vdu_data = [
             server_dict["status"],
             server_dict["flavor"]["id"],
@@ -3807,3 +3919,26 @@ class vimconnector(vimconn.VimConnector):
             self.__wait_for_vm(vm_id, "ACTIVE")
         instance_status = self.get_vdu_state(vm_id)[0]
         return instance_status
+
+    def get_monitoring_data(self):
+        try:
+            self.logger.debug("Getting servers and ports data from Openstack VIMs.")
+            self._reload_connection()
+            all_servers = self.nova.servers.list(detailed=True)
+            try:
+                for server in all_servers:
+                    server.flavor["id"] = self.nova.flavors.find(
+                        name=server.flavor["original_name"]
+                    ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
+            all_ports = self.neutron.list_ports()
+            return all_servers, all_ports
+        except (
+            vimconn.VimConnException,
+            vimconn.VimConnNotFoundException,
+            vimconn.VimConnConnectionException,
+        ) as e:
+            raise vimconn.VimConnException(
+                f"Exception in monitoring while getting VMs and ports status: {str(e)}"
+            )