Fix Bug 2012 use existing volumes as instantiation parameters
[osm/RO.git] / RO-VIM-openstack / osm_rovim_openstack / vimconn_openstack.py
index 6b46dc1..d9edb32 100644 (file)
@@ -1369,7 +1369,6 @@ class vimconnector(vimconn.VimConnector):
                                     "Invalid mempage-size %s. Will be ignored",
                                     extended.get("mempage-size"),
                                 )
-
                     # create flavor
                     new_flavor = self.nova.flavors.create(
                         name=name,
@@ -1898,32 +1897,55 @@ class vimconnector(vimconn.VimConnector):
             if disk_list:
                 block_device_mapping = {}
                 for disk in disk_list:
-                    if disk.get("vim_id"):
-                        block_device_mapping["_vd" + chr(base_disk_index)] = disk[
-                            "vim_id"
-                        ]
-                        existing_vim_volumes.append({"id": disk["vim_id"]})
-                    else:
-                        if "image_id" in disk:
-                            base_disk_index = ord("a")
+                    if "image_id" in disk:
+                        # persistent root volume
+                        base_disk_index = ord("a")
+                        image_id = ""
+                        if disk.get("vim_volume_id"):
+
+                            # use existing persistent root volume
+                            block_device_mapping["vd" + chr(base_disk_index)] = disk[
+                                "vim_volume_id"
+                            ]
+                            existing_vim_volumes.append({"id": disk["vim_volume_id"]})
+
+                        else:
+                            # create persistent root volume
                             volume = self.cinder.volumes.create(
                                 size=disk["size"],
-                                name=name + "_vd" + chr(base_disk_index),
+                                name=name + "vd" + chr(base_disk_index),
                                 imageRef=disk["image_id"],
                                 # Make sure volume is in the same AZ as the VM to be attached to
                                 availability_zone=vm_av_zone,
                             )
                             boot_volume_id = volume.id
+                            created_items["volume:" + str(volume.id)] = True
+                            block_device_mapping[
+                                "vd" + chr(base_disk_index)
+                            ] = volume.id
+                    else:
+                        # non-root persistent volume
+                        if disk.get("vim_volume_id"):
+
+                            # use existing persistent volume
+                            block_device_mapping["vd" + chr(base_disk_index)] = disk[
+                                "vim_volume_id"
+                            ]
+                            existing_vim_volumes.append({"id": disk["vim_volume_id"]})
+
                         else:
+
+                            # create persistent volume
                             volume = self.cinder.volumes.create(
                                 size=disk["size"],
-                                name=name + "_vd" + chr(base_disk_index),
+                                name=name + "vd" + chr(base_disk_index),
                                 # Make sure volume is in the same AZ as the VM to be attached to
                                 availability_zone=vm_av_zone,
                             )
-
-                        created_items["volume:" + str(volume.id)] = True
-                        block_device_mapping["_vd" + chr(base_disk_index)] = volume.id
+                            created_items["volume:" + str(volume.id)] = True
+                            block_device_mapping[
+                                "vd" + chr(base_disk_index)
+                            ] = volume.id
 
                     base_disk_index += 1
 
@@ -1988,9 +2010,9 @@ class vimconnector(vimconn.VimConnector):
                 )
             )
             server = self.nova.servers.create(
-                name,
-                image_id,
-                flavor_id,
+                name=name,
+                image=image_id,
+                flavor=flavor_id,
                 nics=net_list_vim,
                 security_groups=self.config.get("security_groups"),
                 # TODO remove security_groups in future versions. Already at neutron port
@@ -2295,7 +2317,12 @@ class vimconnector(vimconn.VimConnector):
                 try:
                     k_item, _, k_id = k.partition(":")
                     if k_item == "port":
-                        self.neutron.delete_port(k_id)
+                        port_dict = self.neutron.list_ports()
+                        existing_ports = [
+                            port["id"] for port in port_dict["ports"] if port_dict
+                        ]
+                        if k_id in existing_ports:
+                            self.neutron.delete_port(k_id)
                 except Exception as e:
                     self.logger.error(
                         "Error deleting port: {}: {}".format(type(e).__name__, e)
@@ -2514,12 +2541,29 @@ class vimconnector(vimconn.VimConnector):
                         server.resume()
                     elif server.status == "SHUTOFF":
                         server.start()
+                    else:
+                        self.logger.debug(
+                            "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state"
+                        )
+                        raise vimconn.VimConnException(
+                            "Cannot 'start' instance while it is in active state",
+                            http_code=vimconn.HTTP_Bad_Request,
+                        )
+
             elif "pause" in action_dict:
                 server.pause()
             elif "resume" in action_dict:
                 server.resume()
             elif "shutoff" in action_dict or "shutdown" in action_dict:
-                server.stop()
+                self.logger.debug("server status %s", server.status)
+                if server.status == "ACTIVE":
+                    server.stop()
+                else:
+                    self.logger.debug("ERROR: VM is not in Active state")
+                    raise vimconn.VimConnException(
+                        "VM is not in active state, stop operation is not allowed",
+                        http_code=vimconn.HTTP_Bad_Request,
+                    )
             elif "forceOff" in action_dict:
                 server.stop()  # TODO
             elif "terminate" in action_dict:
@@ -3718,3 +3762,62 @@ class vimconnector(vimconn.VimConnector):
             nvExceptions.NotFound,
         ) as e:
             self._format_exception(e)
+
+    def resize_instance(self, vm_id, new_flavor_id):
+        """
+        For resizing the vm based on the given
+        flavor details
+        param:
+            vm_id : ID of an instance
+            new_flavor_id : Flavor id to be resized
+        Return the status of a resized instance
+        """
+        self._reload_connection()
+        self.logger.debug("resize the flavor of an instance")
+        instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
+        old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
+        new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
+        try:
+            if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+                if old_flavor_disk > new_flavor_disk:
+                    raise nvExceptions.BadRequest(
+                        400,
+                        message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+                    )
+                else:
+                    self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+                    vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+                    if vm_state:
+                        instance_resized_status = self.confirm_resize(vm_id)
+                        return instance_resized_status
+                    else:
+                        raise nvExceptions.BadRequest(
+                            409,
+                            message="Cannot 'resize' vm_state is in ERROR",
+                        )
+
+            else:
+                self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+                raise nvExceptions.BadRequest(
+                    409,
+                    message="Cannot 'resize' instance while it is in vm_state resized",
+                )
+        except (
+            nvExceptions.BadRequest,
+            nvExceptions.ClientException,
+            nvExceptions.NotFound,
+        ) as e:
+            self._format_exception(e)
+
+    def confirm_resize(self, vm_id):
+        """
+        Confirm the resize of an instance
+        param:
+            vm_id: ID of an instance
+        """
+        self._reload_connection()
+        self.nova.servers.confirm_resize(server=vm_id)
+        if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
+            self.__wait_for_vm(vm_id, "ACTIVE")
+        instance_status = self.get_vdu_state(vm_id)[0]
+        return instance_status