X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=RO-VIM-openstack%2Fosm_rovim_openstack%2Fvimconn_openstack.py;h=02d7e85f1c58d7baa02be72f45567457b60d5c0e;hb=dca2c239b699d258c2906292182cff330157a957;hp=6b46dc1197d4e4cb13fcdbd7331df0f04961cd05;hpb=8658c2cd33bcea66c9b99aabb0825071c5c42df0;p=osm%2FRO.git diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py index 6b46dc11..02d7e85f 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py @@ -753,7 +753,7 @@ class vimconnector(vimconn.VimConnector): self._reload_connection() network_dict = {"name": net_name, "admin_state_up": True} - if net_type in ("data", "ptp"): + if net_type in ("data", "ptp") or provider_network_profile: provider_physical_network = None if provider_network_profile and provider_network_profile.get( @@ -1283,13 +1283,7 @@ class vimconnector(vimconn.VimConnector): if numas: numa_nodes = len(numas) - if numa_nodes > 1: - return -1, "Can not add flavor with more than one numa" - extra_specs["hw:numa_nodes"] = str(numa_nodes) - extra_specs["hw:mem_page_size"] = "large" - extra_specs["hw:cpu_policy"] = "dedicated" - extra_specs["hw:numa_mempolicy"] = "strict" if self.vim_type == "VIO": extra_specs[ @@ -1298,13 +1292,25 @@ class vimconnector(vimconn.VimConnector): extra_specs["vmware:latency_sensitivity_level"] = "high" for numa in numas: + if "id" in numa: + node_id = numa["id"] + + if "memory" in numa: + memory_mb = numa["memory"] * 1024 + memory = "hw:numa_mem.{}".format(node_id) + extra_specs[memory] = int(memory_mb) + + if "vcpu" in numa: + vcpu = numa["vcpu"] + cpu = "hw:numa_cpus.{}".format(node_id) + vcpu = ",".join(map(str, vcpu)) + extra_specs[cpu] = vcpu + # overwrite ram and vcpus # check if key "memory" is present in numa else use ram value at flavor - if "memory" in numa: - ram = numa["memory"] * 1024 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/ # implemented/virt-driver-cpu-thread-pinning.html - extra_specs["hw:cpu_sockets"] = 1 + extra_specs["hw:cpu_sockets"] = str(numa_nodes) if "paired-threads" in numa: vcpus = numa["paired-threads"] * 2 @@ -1369,6 +1375,22 @@ class vimconnector(vimconn.VimConnector): "Invalid mempage-size %s. Will be ignored", extended.get("mempage-size"), ) + if extended.get("cpu-pinning-policy"): + extra_specs["hw:cpu_policy"] = extended.get( + "cpu-pinning-policy" + ).lower() + + # Set the cpu thread pinning policy as specified in the descriptor + if extended.get("cpu-thread-pinning-policy"): + extra_specs["hw:cpu_thread_policy"] = extended.get( + "cpu-thread-pinning-policy" + ).lower() + + # Set the mem policy as specified in the descriptor + if extended.get("mem-policy"): + extra_specs["hw:numa_mempolicy"] = extended.get( + "mem-policy" + ).lower() # create flavor new_flavor = self.nova.flavors.create( @@ -1898,32 +1920,61 @@ class vimconnector(vimconn.VimConnector): if disk_list: block_device_mapping = {} for disk in disk_list: - if disk.get("vim_id"): - block_device_mapping["_vd" + chr(base_disk_index)] = disk[ - "vim_id" - ] - existing_vim_volumes.append({"id": disk["vim_id"]}) - else: - if "image_id" in disk: - base_disk_index = ord("a") + if "image_id" in disk: + # persistent root volume + base_disk_index = ord("a") + image_id = "" + # use existing persistent root volume + if disk.get("vim_volume_id"): + block_device_mapping["vd" + chr(base_disk_index)] = disk[ + "vim_volume_id" + ] + existing_vim_volumes.append({"id": disk["vim_volume_id"]}) + # use existing persistent root volume + elif disk.get("vim_id"): + block_device_mapping["vd" + chr(base_disk_index)] = disk[ + "vim_id" + ] + existing_vim_volumes.append({"id": disk["vim_id"]}) + else: + # create persistent root volume volume = self.cinder.volumes.create( size=disk["size"], - name=name + "_vd" + chr(base_disk_index), + name=name + "vd" + chr(base_disk_index), imageRef=disk["image_id"], # Make sure volume is in the same AZ as the VM to be attached to availability_zone=vm_av_zone, ) boot_volume_id = volume.id + created_items["volume:" + str(volume.id)] = True + block_device_mapping[ + "vd" + chr(base_disk_index) + ] = volume.id + else: + # non-root persistent volume + key_id = ( + "vim_volume_id" + if "vim_volume_id" in disk.keys() + else "vim_id" + ) + if disk.get(key_id): + # use existing persistent volume + block_device_mapping["vd" + chr(base_disk_index)] = disk[ + key_id + ] + existing_vim_volumes.append({"id": disk[key_id]}) else: + # create persistent volume volume = self.cinder.volumes.create( size=disk["size"], - name=name + "_vd" + chr(base_disk_index), + name=name + "vd" + chr(base_disk_index), # Make sure volume is in the same AZ as the VM to be attached to availability_zone=vm_av_zone, ) - - created_items["volume:" + str(volume.id)] = True - block_device_mapping["_vd" + chr(base_disk_index)] = volume.id + created_items["volume:" + str(volume.id)] = True + block_device_mapping[ + "vd" + chr(base_disk_index) + ] = volume.id base_disk_index += 1 @@ -1988,9 +2039,9 @@ class vimconnector(vimconn.VimConnector): ) ) server = self.nova.servers.create( - name, - image_id, - flavor_id, + name=name, + image=image_id, + flavor=flavor_id, nics=net_list_vim, security_groups=self.config.get("security_groups"), # TODO remove security_groups in future versions. Already at neutron port @@ -2295,7 +2346,12 @@ class vimconnector(vimconn.VimConnector): try: k_item, _, k_id = k.partition(":") if k_item == "port": - self.neutron.delete_port(k_id) + port_dict = self.neutron.list_ports() + existing_ports = [ + port["id"] for port in port_dict["ports"] if port_dict + ] + if k_id in existing_ports: + self.neutron.delete_port(k_id) except Exception as e: self.logger.error( "Error deleting port: {}: {}".format(type(e).__name__, e) @@ -2497,7 +2553,8 @@ class vimconnector(vimconn.VimConnector): def action_vminstance(self, vm_id, action_dict, created_items={}): """Send and action over a VM instance from VIM - Returns None or the console dict if the action was successfully sent to the VIM""" + Returns None or the console dict if the action was successfully sent to the VIM + """ self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict)) try: @@ -2514,12 +2571,29 @@ class vimconnector(vimconn.VimConnector): server.resume() elif server.status == "SHUTOFF": server.start() + else: + self.logger.debug( + "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state" + ) + raise vimconn.VimConnException( + "Cannot 'start' instance while it is in active state", + http_code=vimconn.HTTP_Bad_Request, + ) + elif "pause" in action_dict: server.pause() elif "resume" in action_dict: server.resume() elif "shutoff" in action_dict or "shutdown" in action_dict: - server.stop() + self.logger.debug("server status %s", server.status) + if server.status == "ACTIVE": + server.stop() + else: + self.logger.debug("ERROR: VM is not in Active state") + raise vimconn.VimConnException( + "VM is not in active state, stop operation is not allowed", + http_code=vimconn.HTTP_Bad_Request, + ) elif "forceOff" in action_dict: server.stop() # TODO elif "terminate" in action_dict: @@ -3718,3 +3792,62 @@ class vimconnector(vimconn.VimConnector): nvExceptions.NotFound, ) as e: self._format_exception(e) + + def resize_instance(self, vm_id, new_flavor_id): + """ + For resizing the vm based on the given + flavor details + param: + vm_id : ID of an instance + new_flavor_id : Flavor id to be resized + Return the status of a resized instance + """ + self._reload_connection() + self.logger.debug("resize the flavor of an instance") + instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id) + old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"] + new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"] + try: + if instance_status == "ACTIVE" or instance_status == "SHUTOFF": + if old_flavor_disk > new_flavor_disk: + raise nvExceptions.BadRequest( + 400, + message="Server disk resize failed. Resize to lower disk flavor is not allowed", + ) + else: + self.nova.servers.resize(server=vm_id, flavor=new_flavor_id) + vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE") + if vm_state: + instance_resized_status = self.confirm_resize(vm_id) + return instance_resized_status + else: + raise nvExceptions.BadRequest( + 409, + message="Cannot 'resize' vm_state is in ERROR", + ) + + else: + self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state") + raise nvExceptions.BadRequest( + 409, + message="Cannot 'resize' instance while it is in vm_state resized", + ) + except ( + nvExceptions.BadRequest, + nvExceptions.ClientException, + nvExceptions.NotFound, + ) as e: + self._format_exception(e) + + def confirm_resize(self, vm_id): + """ + Confirm the resize of an instance + param: + vm_id: ID of an instance + """ + self._reload_connection() + self.nova.servers.confirm_resize(server=vm_id) + if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE": + self.__wait_for_vm(vm_id, "ACTIVE") + instance_status = self.get_vdu_state(vm_id)[0] + return instance_status