Fix bug 2275
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index f1df8a7..f225be4 100644 (file)
@@ -339,7 +339,7 @@ class vimconnector(vimconn.VimConnector):
             version = self.config.get("microversion")
 
             if not version:
-                version = "2.1"
+                version = "2.60"
 
             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River
             # Titanium cloud and StarlingX
@@ -567,9 +567,15 @@ class vimconnector(vimconn.VimConnector):
                 ksExceptions.BadRequest,
             ),
         ):
+            if message_error == "OS-EXT-SRV-ATTR:host":
+                tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
+                raise vimconn.VimConnInsufficientCredentials(
+                    type(exception).__name__ + ": " + message_error + tip
+                )
             raise vimconn.VimConnException(
                 type(exception).__name__ + ": " + message_error
             )
+
         elif isinstance(
             exception,
             (
@@ -628,6 +634,31 @@ class vimconnector(vimconn.VimConnector):
                         "Not found security group {} for this tenant".format(sg)
                     )
 
+    def _find_nova_server(self, vm_id):
+        """
+        Returns the VM instance from Openstack and completes it with flavor ID
+        Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
+        """
+        try:
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
+            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+            server_dict = server.to_dict()
+            try:
+                server_dict["flavor"]["id"] = self.nova.flavors.find(
+                    name=server_dict["flavor"]["original_name"]
+                ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
+            return server_dict
+        except (
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
     def check_vim_connectivity(self):
         # just get network list to check connectivity and credentials
         self.get_network_list(filter_dict={})
@@ -2145,11 +2176,59 @@ class vimconnector(vimconn.VimConnector):
                 "Created volume is not valid, does not have id attribute."
             )
 
+        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+        if disk.get("multiattach"):  # multiattach volumes do not belong to VDUs
+            return
         volume_txt = "volume:" + str(volume.id)
         if disk.get("keep"):
             volume_txt += ":keep"
         created_items[volume_txt] = True
-        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+
+    def new_shared_volumes(self, shared_volume_data) -> (str, str):
+        try:
+            volume = self.cinder.volumes.create(
+                size=shared_volume_data["size"],
+                name=shared_volume_data["name"],
+                volume_type="multiattach",
+            )
+            return (volume.name, volume.id)
+        except (ConnectionError, KeyError) as e:
+            self._format_exception(e)
+
+    def _prepare_shared_volumes(
+        self,
+        name: str,
+        disk: dict,
+        base_disk_index: int,
+        block_device_mapping: dict,
+        existing_vim_volumes: list,
+        created_items: dict,
+    ):
+        volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
+        if volumes.get(disk["name"]):
+            sv_id = volumes[disk["name"]]
+            max_retries = 3
+            vol_status = ""
+            # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
+            while max_retries:
+                max_retries -= 1
+                volume = self.cinder.volumes.get(sv_id)
+                vol_status = volume.status
+                if volume.status not in ("in-use", "available"):
+                    time.sleep(5)
+                    continue
+                self.update_block_device_mapping(
+                    volume=volume,
+                    block_device_mapping=block_device_mapping,
+                    base_disk_index=base_disk_index,
+                    disk=disk,
+                    created_items=created_items,
+                )
+                return
+            raise vimconn.VimConnException(
+                "Shared volume is not prepared, status is: {}".format(vol_status),
+                http_code=vimconn.HTTP_Internal_Server_Error,
+            )
 
     def _prepare_non_root_persistent_volumes(
         self,
@@ -2175,17 +2254,15 @@ class vimconnector(vimconn.VimConnector):
         # Non-root persistent volumes
         # Disk may include only vim_volume_id or only vim_id."
         key_id = "vim_volume_id" if "vim_volume_id" in disk.keys() else "vim_id"
-
         if disk.get(key_id):
             # Use existing persistent volume
             block_device_mapping["vd" + chr(base_disk_index)] = disk[key_id]
             existing_vim_volumes.append({"id": disk[key_id]})
-
         else:
-            # Create persistent volume
+            volume_name = f"{name}vd{chr(base_disk_index)}"
             volume = self.cinder.volumes.create(
                 size=disk["size"],
-                name=name + "vd" + chr(base_disk_index),
+                name=volume_name,
                 # Make sure volume is in the same AZ as the VM to be attached to
                 availability_zone=vm_av_zone,
             )
@@ -2210,7 +2287,6 @@ class vimconnector(vimconn.VimConnector):
             elapsed_time    (int):          Time spent while waiting
 
         """
-
         while elapsed_time < volume_timeout:
             for created_item in created_items:
                 v, volume_id = (
@@ -2218,7 +2294,13 @@ class vimconnector(vimconn.VimConnector):
                     created_item.split(":")[1],
                 )
                 if v == "volume":
-                    if self.cinder.volumes.get(volume_id).status != "available":
+                    volume = self.cinder.volumes.get(volume_id)
+                    if (
+                        volume.volume_type == "multiattach"
+                        and volume.status == "in-use"
+                    ):
+                        return elapsed_time
+                    elif volume.status != "available":
                         break
             else:
                 # All ready: break from while
@@ -2245,7 +2327,10 @@ class vimconnector(vimconn.VimConnector):
 
         while elapsed_time < volume_timeout:
             for volume in existing_vim_volumes:
-                if self.cinder.volumes.get(volume["id"]).status != "available":
+                v = self.cinder.volumes.get(volume["id"])
+                if v.volume_type == "multiattach" and v.status == "in-use":
+                    return elapsed_time
+                elif v.status != "available":
                     break
             else:  # all ready: break from while
                 break
@@ -2279,7 +2364,6 @@ class vimconnector(vimconn.VimConnector):
         base_disk_index = ord("b")
         boot_volume_id = None
         elapsed_time = 0
-
         for disk in disk_list:
             if "image_id" in disk:
                 # Root persistent volume
@@ -2293,6 +2377,15 @@ class vimconnector(vimconn.VimConnector):
                     existing_vim_volumes=existing_vim_volumes,
                     created_items=created_items,
                 )
+            elif disk.get("multiattach"):
+                self._prepare_shared_volumes(
+                    name=name,
+                    disk=disk,
+                    base_disk_index=base_disk_index,
+                    block_device_mapping=block_device_mapping,
+                    existing_vim_volumes=existing_vim_volumes,
+                    created_items=created_items,
+                )
             else:
                 # Non-root persistent volume
                 self._prepare_non_root_persistent_volumes(
@@ -2748,7 +2841,6 @@ class vimconnector(vimconn.VimConnector):
                     server_group_id,
                 )
             )
-
             # Create VM
             server = self.nova.servers.create(
                 name=name,
@@ -2812,20 +2904,7 @@ class vimconnector(vimconn.VimConnector):
 
     def get_vminstance(self, vm_id):
         """Returns the VM instance information from VIM"""
-        # self.logger.debug("Getting VM from VIM")
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
-            return server.to_dict()
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        return self._find_nova_server(vm_id)
 
     def get_vminstance_console(self, vm_id, console_type="vnc"):
         """
@@ -2909,15 +2988,41 @@ class vimconnector(vimconn.VimConnector):
             k_id    (str):      Port id in the VIM
         """
         try:
-            port_dict = self.neutron.list_ports()
-            existing_ports = [port["id"] for port in port_dict["ports"] if port_dict]
-
-            if k_id in existing_ports:
-                self.neutron.delete_port(k_id)
+            self.neutron.delete_port(k_id)
 
         except Exception as e:
             self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
 
+    def delete_shared_volumes(self, shared_volume_vim_id: str) -> bool:
+        """Cinder delete volume by id.
+        Args:
+            shared_volume_vim_id    (str):                  ID of shared volume in VIM
+        """
+        elapsed_time = 0
+        try:
+            while elapsed_time < server_timeout:
+                vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
+                if vol_status == "available":
+                    self.cinder.volumes.delete(shared_volume_vim_id)
+                    return True
+
+                time.sleep(5)
+                elapsed_time += 5
+
+            if elapsed_time >= server_timeout:
+                raise vimconn.VimConnException(
+                    "Timeout waiting for volume "
+                    + shared_volume_vim_id
+                    + " to be available",
+                    http_code=vimconn.HTTP_Request_Timeout,
+                )
+
+        except Exception as e:
+            self.logger.error(
+                "Error deleting volume: {}: {}".format(type(e).__name__, e)
+            )
+            self._format_exception(e)
+
     def _delete_volumes_by_id_wth_cinder(
         self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
     ) -> bool:
@@ -2995,7 +3100,6 @@ class vimconnector(vimconn.VimConnector):
 
             try:
                 k_item, k_id = self._get_item_name_id(k)
-
                 if k_item == "volume":
                     unavailable_vol = self._delete_volumes_by_id_wth_cinder(
                         k, k_id, volumes_to_hold, created_items
@@ -3601,25 +3705,35 @@ class vimconnector(vimconn.VimConnector):
         ) as e:
             self._format_exception(e)
 
-    def get_vdu_state(self, vm_id):
-        """
-        Getting the state of a vdu
-        param:
-            vm_id: ID of an instance
+    def get_vdu_state(self, vm_id, host_is_required=False) -> list:
+        """Getting the state of a VDU.
+        Args:
+            vm_id   (str): ID of an instance
+            host_is_required    (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
+                                           and if this is set to True, it raises KeyError.
+        Returns:
+            vdu_data    (list): VDU details including state, flavor, host_info, AZ
         """
         self.logger.debug("Getting the status of VM")
         self.logger.debug("VIM VM ID %s", vm_id)
-        self._reload_connection()
-        server = self.nova.servers.find(id=vm_id)
-        server_dict = server.to_dict()
-        vdu_data = [
-            server_dict["status"],
-            server_dict["flavor"]["id"],
-            server_dict["OS-EXT-SRV-ATTR:host"],
-            server_dict["OS-EXT-AZ:availability_zone"],
-        ]
-        self.logger.debug("vdu_data %s", vdu_data)
-        return vdu_data
+        try:
+            self._reload_connection()
+            server_dict = self._find_nova_server(vm_id)
+            srv_attr = "OS-EXT-SRV-ATTR:host"
+            host_info = (
+                server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
+            )
+            vdu_data = [
+                server_dict["status"],
+                server_dict["flavor"]["id"],
+                host_info,
+                server_dict["OS-EXT-AZ:availability_zone"],
+            ]
+            self.logger.debug("vdu_data %s", vdu_data)
+            return vdu_data
+
+        except Exception as e:
+            self._format_exception(e)
 
     def check_compute_availability(self, host, server_flavor_details):
         self._reload_connection()
@@ -3686,7 +3800,7 @@ class vimconnector(vimconn.VimConnector):
         """
         self._reload_connection()
         vm_state = False
-        instance_state = self.get_vdu_state(vm_id)
+        instance_state = self.get_vdu_state(vm_id, host_is_required=True)
         server_flavor_id = instance_state[1]
         server_hypervisor_name = instance_state[2]
         server_availability_zone = instance_state[3]
@@ -3721,17 +3835,19 @@ class vimconnector(vimconn.VimConnector):
                     http_code=vimconn.HTTP_Bad_Request,
                 )
             if available_compute_id is not None:
+                # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
                 self.nova.servers.live_migrate(
                     server=vm_id,
                     host=available_compute_id,
                     block_migration=True,
-                    disk_over_commit=False,
                 )
                 state = "MIGRATING"
                 changed_compute_host = ""
                 if state == "MIGRATING":
                     vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
-                    changed_compute_host = self.get_vdu_state(vm_id)[2]
+                    changed_compute_host = self.get_vdu_state(
+                        vm_id, host_is_required=True
+                    )[2]
                 if vm_state and changed_compute_host == available_compute_id:
                     self.logger.debug(
                         "Instance '{}' migrated to the new compute host '{}'".format(
@@ -3824,6 +3940,13 @@ class vimconnector(vimconn.VimConnector):
             self.logger.debug("Getting servers and ports data from Openstack VIMs.")
             self._reload_connection()
             all_servers = self.nova.servers.list(detailed=True)
+            try:
+                for server in all_servers:
+                    server.flavor["id"] = self.nova.flavors.find(
+                        name=server.flavor["original_name"]
+                    ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
             all_ports = self.neutron.list_ports()
             return all_servers, all_ports
         except (