Bug 2109. Fix VIM info DB update after vertical scaling, migrate and operate
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index 82a1e37..70b3f21 100644 (file)
@@ -567,9 +567,15 @@ class vimconnector(vimconn.VimConnector):
                 ksExceptions.BadRequest,
             ),
         ):
+            if message_error == "OS-EXT-SRV-ATTR:host":
+                tip = " (If the user does not have non-admin credentials, this attribute will be missing)"
+                raise vimconn.VimConnInsufficientCredentials(
+                    type(exception).__name__ + ": " + message_error + tip
+                )
             raise vimconn.VimConnException(
                 type(exception).__name__ + ": " + message_error
             )
+
         elif isinstance(
             exception,
             (
@@ -628,6 +634,31 @@ class vimconnector(vimconn.VimConnector):
                         "Not found security group {} for this tenant".format(sg)
                     )
 
+    def _find_nova_server(self, vm_id):
+        """
+        Returns the VM instance from Openstack and completes it with flavor ID
+        Do not call nova.servers.find directly, as it does not return flavor ID with microversion>=2.47
+        """
+        try:
+            self._reload_connection()
+            server = self.nova.servers.find(id=vm_id)
+            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+            server_dict = server.to_dict()
+            try:
+                server_dict["flavor"]["id"] = self.nova.flavors.find(
+                    name=server_dict["flavor"]["original_name"]
+                ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
+            return server_dict
+        except (
+            ksExceptions.ClientException,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+            ConnectionError,
+        ) as e:
+            self._format_exception(e)
+
     def check_vim_connectivity(self):
         # just get network list to check connectivity and credentials
         self.get_network_list(filter_dict={})
@@ -2145,11 +2176,13 @@ class vimconnector(vimconn.VimConnector):
                 "Created volume is not valid, does not have id attribute."
             )
 
+        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
+        if disk.get("multiattach"):  # multiattach volumes do not belong to VDUs
+            return
         volume_txt = "volume:" + str(volume.id)
         if disk.get("keep"):
             volume_txt += ":keep"
         created_items[volume_txt] = True
-        block_device_mapping["vd" + chr(base_disk_index)] = volume.id
 
     def new_shared_volumes(self, shared_volume_data) -> (str, str):
         try:
@@ -2174,13 +2207,27 @@ class vimconnector(vimconn.VimConnector):
         volumes = {volume.name: volume.id for volume in self.cinder.volumes.list()}
         if volumes.get(disk["name"]):
             sv_id = volumes[disk["name"]]
-            volume = self.cinder.volumes.get(sv_id)
-            self.update_block_device_mapping(
-                volume=volume,
-                block_device_mapping=block_device_mapping,
-                base_disk_index=base_disk_index,
-                disk=disk,
-                created_items=created_items,
+            max_retries = 3
+            vol_status = ""
+            # If this is not the first VM to attach the volume, volume status may be "reserved" for a short time
+            while max_retries:
+                max_retries -= 1
+                volume = self.cinder.volumes.get(sv_id)
+                vol_status = volume.status
+                if volume.status not in ("in-use", "available"):
+                    time.sleep(5)
+                    continue
+                self.update_block_device_mapping(
+                    volume=volume,
+                    block_device_mapping=block_device_mapping,
+                    base_disk_index=base_disk_index,
+                    disk=disk,
+                    created_items=created_items,
+                )
+                return
+            raise vimconn.VimConnException(
+                "Shared volume is not prepared, status is: {}".format(vol_status),
+                http_code=vimconn.HTTP_Internal_Server_Error,
             )
 
     def _prepare_non_root_persistent_volumes(
@@ -2857,20 +2904,7 @@ class vimconnector(vimconn.VimConnector):
 
     def get_vminstance(self, vm_id):
         """Returns the VM instance information from VIM"""
-        # self.logger.debug("Getting VM from VIM")
-        try:
-            self._reload_connection()
-            server = self.nova.servers.find(id=vm_id)
-            # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
-
-            return server.to_dict()
-        except (
-            ksExceptions.ClientException,
-            nvExceptions.ClientException,
-            nvExceptions.NotFound,
-            ConnectionError,
-        ) as e:
-            self._format_exception(e)
+        return self._find_nova_server(vm_id)
 
     def get_vminstance_console(self, vm_id, console_type="vnc"):
         """
@@ -2968,17 +3002,30 @@ class vimconnector(vimconn.VimConnector):
         Args:
             shared_volume_vim_id    (str):                  ID of shared volume in VIM
         """
+        elapsed_time = 0
         try:
-            if self.cinder.volumes.get(shared_volume_vim_id).status != "available":
-                return True
+            while elapsed_time < server_timeout:
+                vol_status = self.cinder.volumes.get(shared_volume_vim_id).status
+                if vol_status == "available":
+                    self.cinder.volumes.delete(shared_volume_vim_id)
+                    return True
 
-            else:
-                self.cinder.volumes.delete(shared_volume_vim_id)
+                time.sleep(5)
+                elapsed_time += 5
+
+            if elapsed_time >= server_timeout:
+                raise vimconn.VimConnException(
+                    "Timeout waiting for volume "
+                    + shared_volume_vim_id
+                    + " to be available",
+                    http_code=vimconn.HTTP_Request_Timeout,
+                )
 
         except Exception as e:
             self.logger.error(
                 "Error deleting volume: {}: {}".format(type(e).__name__, e)
             )
+            self._format_exception(e)
 
     def _delete_volumes_by_id_wth_cinder(
         self, k: str, k_id: str, volumes_to_hold: list, created_items: dict
@@ -3662,25 +3709,35 @@ class vimconnector(vimconn.VimConnector):
         ) as e:
             self._format_exception(e)
 
-    def get_vdu_state(self, vm_id):
-        """
-        Getting the state of a vdu
-        param:
-            vm_id: ID of an instance
+    def get_vdu_state(self, vm_id, host_is_required=False) -> list:
+        """Getting the state of a VDU.
+        Args:
+            vm_id   (str): ID of an instance
+            host_is_required    (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
+                                           and if this is set to True, it raises KeyError.
+        Returns:
+            vdu_data    (list): VDU details including state, flavor, host_info, AZ
         """
         self.logger.debug("Getting the status of VM")
         self.logger.debug("VIM VM ID %s", vm_id)
-        self._reload_connection()
-        server = self.nova.servers.find(id=vm_id)
-        server_dict = server.to_dict()
-        vdu_data = [
-            server_dict["status"],
-            server_dict["flavor"]["id"],
-            server_dict["OS-EXT-SRV-ATTR:host"],
-            server_dict["OS-EXT-AZ:availability_zone"],
-        ]
-        self.logger.debug("vdu_data %s", vdu_data)
-        return vdu_data
+        try:
+            self._reload_connection()
+            server_dict = self._find_nova_server(vm_id)
+            srv_attr = "OS-EXT-SRV-ATTR:host"
+            host_info = (
+                server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
+            )
+            vdu_data = [
+                server_dict["status"],
+                server_dict["flavor"]["id"],
+                host_info,
+                server_dict["OS-EXT-AZ:availability_zone"],
+            ]
+            self.logger.debug("vdu_data %s", vdu_data)
+            return vdu_data
+
+        except Exception as e:
+            self._format_exception(e)
 
     def check_compute_availability(self, host, server_flavor_details):
         self._reload_connection()
@@ -3747,7 +3804,7 @@ class vimconnector(vimconn.VimConnector):
         """
         self._reload_connection()
         vm_state = False
-        instance_state = self.get_vdu_state(vm_id)
+        instance_state = self.get_vdu_state(vm_id, host_is_required=True)
         server_flavor_id = instance_state[1]
         server_hypervisor_name = instance_state[2]
         server_availability_zone = instance_state[3]
@@ -3782,17 +3839,19 @@ class vimconnector(vimconn.VimConnector):
                     http_code=vimconn.HTTP_Bad_Request,
                 )
             if available_compute_id is not None:
+                # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
                 self.nova.servers.live_migrate(
                     server=vm_id,
                     host=available_compute_id,
                     block_migration=True,
-                    disk_over_commit=False,
                 )
                 state = "MIGRATING"
                 changed_compute_host = ""
                 if state == "MIGRATING":
                     vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
-                    changed_compute_host = self.get_vdu_state(vm_id)[2]
+                    changed_compute_host = self.get_vdu_state(
+                        vm_id, host_is_required=True
+                    )[2]
                 if vm_state and changed_compute_host == available_compute_id:
                     self.logger.debug(
                         "Instance '{}' migrated to the new compute host '{}'".format(
@@ -3885,6 +3944,13 @@ class vimconnector(vimconn.VimConnector):
             self.logger.debug("Getting servers and ports data from Openstack VIMs.")
             self._reload_connection()
             all_servers = self.nova.servers.list(detailed=True)
+            try:
+                for server in all_servers:
+                    server.flavor["id"] = self.nova.flavors.find(
+                        name=server.flavor["original_name"]
+                    ).id
+            except nClient.exceptions.NotFound as e:
+                self.logger.warning(str(e.message))
             all_ports = self.neutron.list_ports()
             return all_servers, all_ports
         except (