X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2FRO.git;a=blobdiff_plain;f=RO-VIM-openstack%2Fosm_rovim_openstack%2Fvimconn_openstack.py;h=0a39041205ce8b06e0836316138cb42ead17b97d;hp=dcbf5cad4c0edbc06c8b8ee57489e7e4d769bf6b;hb=36cad669a99ed1655dc39b1c37aea6197ddbab55;hpb=049cbb1b256805f589c24776dcf092c77fefec6a diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py index dcbf5cad..0a390412 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py @@ -32,6 +32,7 @@ to the VIM connector's SFC resources as follows: import copy from http.client import HTTPException +import json import logging from pprint import pformat import random @@ -1200,7 +1201,7 @@ class vimconnector(vimconn.VimConnector): flavor.vcpus, flavor.disk, flavor.ephemeral, - flavor.swap, + flavor.swap if isinstance(flavor.swap, int) else 0, ) if flavor_data == flavor_target: return flavor.id @@ -1349,6 +1350,26 @@ class vimconnector(vimconn.VimConnector): extended.get("disk-io-quota"), "disk_io", extra_specs ) + # Set the mempage size as specified in the descriptor + if extended.get("mempage-size"): + if extended.get("mempage-size") == "LARGE": + extra_specs["hw:mem_page_size"] = "large" + elif extended.get("mempage-size") == "SMALL": + extra_specs["hw:mem_page_size"] = "small" + elif extended.get("mempage-size") == "SIZE_2MB": + extra_specs["hw:mem_page_size"] = "2MB" + elif extended.get("mempage-size") == "SIZE_1GB": + extra_specs["hw:mem_page_size"] = "1GB" + elif extended.get("mempage-size") == "PREFER_LARGE": + extra_specs["hw:mem_page_size"] = "any" + else: + # The validations in NBI should make reaching here not possible. + # If this message is shown, check validations + self.logger.debug( + "Invalid mempage-size %s. Will be ignored", + extended.get("mempage-size"), + ) + # create flavor new_flavor = self.nova.flavors.create( name=name, @@ -1681,6 +1702,7 @@ class vimconnector(vimconn.VimConnector): start, image_id, flavor_id, + affinity_group_list, net_list, cloud_config=None, disk_list=None, @@ -1690,7 +1712,9 @@ class vimconnector(vimconn.VimConnector): """Adds a VM instance to VIM Params: start: indicates if VM must start or boot in pause mode. Ignored - image_id,flavor_id: iamge and flavor uuid + image_id,flavor_id: image and flavor uuid + affinity_group_list: list of affinity groups, each one is a dictionary. + Ignore if empty. net_list: list of interfaces, each one is a dictionary with: name: net_id: network uuid to connect @@ -1862,8 +1886,15 @@ class vimconnector(vimconn.VimConnector): # cloud config config_drive, userdata = self._create_user_data(cloud_config) + # get availability Zone + vm_av_zone = self._get_vm_availability_zone( + availability_zone_index, availability_zone_list + ) + # Create additional volumes in case these are present in disk_list + existing_vim_volumes = [] base_disk_index = ord("b") + boot_volume_id = None if disk_list: block_device_mapping = {} for disk in disk_list: @@ -1871,17 +1902,24 @@ class vimconnector(vimconn.VimConnector): block_device_mapping["_vd" + chr(base_disk_index)] = disk[ "vim_id" ] + existing_vim_volumes.append({"id": disk["vim_id"]}) else: if "image_id" in disk: + base_disk_index = ord("a") volume = self.cinder.volumes.create( size=disk["size"], name=name + "_vd" + chr(base_disk_index), imageRef=disk["image_id"], + # Make sure volume is in the same AZ as the VM to be attached to + availability_zone=vm_av_zone, ) + boot_volume_id = volume.id else: volume = self.cinder.volumes.create( size=disk["size"], name=name + "_vd" + chr(base_disk_index), + # Make sure volume is in the same AZ as the VM to be attached to + availability_zone=vm_av_zone, ) created_items["volume:" + str(volume.id)] = True @@ -1903,22 +1941,39 @@ class vimconnector(vimconn.VimConnector): time.sleep(5) elapsed_time += 5 + # Wait until existing volumes in vim are with status available + while elapsed_time < volume_timeout: + for volume in existing_vim_volumes: + if self.cinder.volumes.get(volume["id"]).status != "available": + break + else: # all ready: break from while + break + + time.sleep(5) + elapsed_time += 5 + # If we exceeded the timeout rollback if elapsed_time >= volume_timeout: raise vimconn.VimConnException( "Timeout creating volumes for instance " + name, http_code=vimconn.HTTP_Request_Timeout, ) + if boot_volume_id: + self.cinder.volumes.set_bootable(boot_volume_id, True) - # get availability Zone - vm_av_zone = self._get_vm_availability_zone( - availability_zone_index, availability_zone_list - ) + # Manage affinity groups/server groups + server_group_id = None + scheduller_hints = {} + + if affinity_group_list: + # Only first id on the list will be used. Openstack restriction + server_group_id = affinity_group_list[0]["affinity_group_id"] + scheduller_hints["group"] = server_group_id self.logger.debug( "nova.servers.create({}, {}, {}, nics={}, security_groups={}, " "availability_zone={}, key_name={}, userdata={}, config_drive={}, " - "block_device_mapping={})".format( + "block_device_mapping={}, server_group={})".format( name, image_id, flavor_id, @@ -1929,6 +1984,7 @@ class vimconnector(vimconn.VimConnector): userdata, config_drive, block_device_mapping, + server_group_id, ) ) server = self.nova.servers.create( @@ -1943,6 +1999,7 @@ class vimconnector(vimconn.VimConnector): userdata=userdata, config_drive=config_drive, block_device_mapping=block_device_mapping, + scheduler_hints=scheduller_hints, ) # , description=description) vm_start_time = time.time() @@ -2222,7 +2279,7 @@ class vimconnector(vimconn.VimConnector): ) as e: self._format_exception(e) - def delete_vminstance(self, vm_id, created_items=None): + def delete_vminstance(self, vm_id, created_items=None, volumes_to_hold=None): """Removes a VM instance from VIM. Returns the old identifier""" # print "osconnector: Getting VM from VIM" if created_items is None: @@ -2238,7 +2295,12 @@ class vimconnector(vimconn.VimConnector): try: k_item, _, k_id = k.partition(":") if k_item == "port": - self.neutron.delete_port(k_id) + port_dict = self.neutron.list_ports() + existing_ports = [ + port["id"] for port in port_dict["ports"] if port_dict + ] + if k_id in existing_ports: + self.neutron.delete_port(k_id) except Exception as e: self.logger.error( "Error deleting port: {}: {}".format(type(e).__name__, e) @@ -2272,8 +2334,9 @@ class vimconnector(vimconn.VimConnector): if self.cinder.volumes.get(k_id).status != "available": keep_waiting = True else: - self.cinder.volumes.delete(k_id) - created_items[k] = None + if k_id not in volumes_to_hold: + self.cinder.volumes.delete(k_id) + created_items[k] = None elif k_item == "floating_ip": # floating ip self.neutron.delete_floatingip(k_id) created_items[k] = None @@ -2456,12 +2519,29 @@ class vimconnector(vimconn.VimConnector): server.resume() elif server.status == "SHUTOFF": server.start() + else: + self.logger.debug( + "ERROR : Instance is not in SHUTOFF/PAUSE/SUSPEND state" + ) + raise vimconn.VimConnException( + "Cannot 'start' instance while it is in active state", + http_code=vimconn.HTTP_Bad_Request, + ) + elif "pause" in action_dict: server.pause() elif "resume" in action_dict: server.resume() elif "shutoff" in action_dict or "shutdown" in action_dict: - server.stop() + self.logger.debug("server status %s", server.status) + if server.status == "ACTIVE": + server.stop() + else: + self.logger.debug("ERROR: VM is not in Active state") + raise vimconn.VimConnException( + "VM is not in active state, stop operation is not allowed", + http_code=vimconn.HTTP_Bad_Request, + ) elif "forceOff" in action_dict: server.stop() # TODO elif "terminate" in action_dict: @@ -3444,3 +3524,278 @@ class vimconnector(vimconn.VimConnector): classification_dict[classification_id] = classification return classification_dict + + def new_affinity_group(self, affinity_group_data): + """Adds a server group to VIM + affinity_group_data contains a dictionary with information, keys: + name: name in VIM for the server group + type: affinity or anti-affinity + scope: Only nfvi-node allowed + Returns the server group identifier""" + self.logger.debug("Adding Server Group '%s'", str(affinity_group_data)) + + try: + name = affinity_group_data["name"] + policy = affinity_group_data["type"] + + self._reload_connection() + new_server_group = self.nova.server_groups.create(name, policy) + + return new_server_group.id + except ( + ksExceptions.ClientException, + nvExceptions.ClientException, + ConnectionError, + KeyError, + ) as e: + self._format_exception(e) + + def get_affinity_group(self, affinity_group_id): + """Obtain server group details from the VIM. Returns the server group detais as a dict""" + self.logger.debug("Getting flavor '%s'", affinity_group_id) + try: + self._reload_connection() + server_group = self.nova.server_groups.find(id=affinity_group_id) + + return server_group.to_dict() + except ( + nvExceptions.NotFound, + nvExceptions.ClientException, + ksExceptions.ClientException, + ConnectionError, + ) as e: + self._format_exception(e) + + def delete_affinity_group(self, affinity_group_id): + """Deletes a server group from the VIM. Returns the old affinity_group_id""" + self.logger.debug("Getting server group '%s'", affinity_group_id) + try: + self._reload_connection() + self.nova.server_groups.delete(affinity_group_id) + + return affinity_group_id + except ( + nvExceptions.NotFound, + ksExceptions.ClientException, + nvExceptions.ClientException, + ConnectionError, + ) as e: + self._format_exception(e) + + def get_vdu_state(self, vm_id): + """ + Getting the state of a vdu + param: + vm_id: ID of an instance + """ + self.logger.debug("Getting the status of VM") + self.logger.debug("VIM VM ID %s", vm_id) + self._reload_connection() + server = self.nova.servers.find(id=vm_id) + server_dict = server.to_dict() + vdu_data = [ + server_dict["status"], + server_dict["flavor"]["id"], + server_dict["OS-EXT-SRV-ATTR:host"], + server_dict["OS-EXT-AZ:availability_zone"], + ] + self.logger.debug("vdu_data %s", vdu_data) + return vdu_data + + def check_compute_availability(self, host, server_flavor_details): + self._reload_connection() + hypervisor_search = self.nova.hypervisors.search( + hypervisor_match=host, servers=True + ) + for hypervisor in hypervisor_search: + hypervisor_id = hypervisor.to_dict()["id"] + hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id) + hypervisor_dict = hypervisor_details.to_dict() + hypervisor_temp = json.dumps(hypervisor_dict) + hypervisor_json = json.loads(hypervisor_temp) + resources_available = [ + hypervisor_json["free_ram_mb"], + hypervisor_json["disk_available_least"], + hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"], + ] + compute_available = all( + x > y for x, y in zip(resources_available, server_flavor_details) + ) + if compute_available: + return host + + def check_availability_zone( + self, old_az, server_flavor_details, old_host, host=None + ): + self._reload_connection() + az_check = {"zone_check": False, "compute_availability": None} + aggregates_list = self.nova.aggregates.list() + for aggregate in aggregates_list: + aggregate_details = aggregate.to_dict() + aggregate_temp = json.dumps(aggregate_details) + aggregate_json = json.loads(aggregate_temp) + if aggregate_json["availability_zone"] == old_az: + hosts_list = aggregate_json["hosts"] + if host is not None: + if host in hosts_list: + az_check["zone_check"] = True + available_compute_id = self.check_compute_availability( + host, server_flavor_details + ) + if available_compute_id is not None: + az_check["compute_availability"] = available_compute_id + else: + for check_host in hosts_list: + if check_host != old_host: + available_compute_id = self.check_compute_availability( + check_host, server_flavor_details + ) + if available_compute_id is not None: + az_check["zone_check"] = True + az_check["compute_availability"] = available_compute_id + break + else: + az_check["zone_check"] = True + return az_check + + def migrate_instance(self, vm_id, compute_host=None): + """ + Migrate a vdu + param: + vm_id: ID of an instance + compute_host: Host to migrate the vdu to + """ + self._reload_connection() + vm_state = False + instance_state = self.get_vdu_state(vm_id) + server_flavor_id = instance_state[1] + server_hypervisor_name = instance_state[2] + server_availability_zone = instance_state[3] + try: + server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict() + server_flavor_details = [ + server_flavor["ram"], + server_flavor["disk"], + server_flavor["vcpus"], + ] + if compute_host == server_hypervisor_name: + raise vimconn.VimConnException( + "Unable to migrate instance '{}' to the same host '{}'".format( + vm_id, compute_host + ), + http_code=vimconn.HTTP_Bad_Request, + ) + az_status = self.check_availability_zone( + server_availability_zone, + server_flavor_details, + server_hypervisor_name, + compute_host, + ) + availability_zone_check = az_status["zone_check"] + available_compute_id = az_status.get("compute_availability") + + if availability_zone_check is False: + raise vimconn.VimConnException( + "Unable to migrate instance '{}' to a different availability zone".format( + vm_id + ), + http_code=vimconn.HTTP_Bad_Request, + ) + if available_compute_id is not None: + self.nova.servers.live_migrate( + server=vm_id, + host=available_compute_id, + block_migration=True, + disk_over_commit=False, + ) + state = "MIGRATING" + changed_compute_host = "" + if state == "MIGRATING": + vm_state = self.__wait_for_vm(vm_id, "ACTIVE") + changed_compute_host = self.get_vdu_state(vm_id)[2] + if vm_state and changed_compute_host == available_compute_id: + self.logger.debug( + "Instance '{}' migrated to the new compute host '{}'".format( + vm_id, changed_compute_host + ) + ) + return state, available_compute_id + else: + raise vimconn.VimConnException( + "Migration Failed. Instance '{}' not moved to the new host {}".format( + vm_id, available_compute_id + ), + http_code=vimconn.HTTP_Bad_Request, + ) + else: + raise vimconn.VimConnException( + "Compute '{}' not available or does not have enough resources to migrate the instance".format( + available_compute_id + ), + http_code=vimconn.HTTP_Bad_Request, + ) + except ( + nvExceptions.BadRequest, + nvExceptions.ClientException, + nvExceptions.NotFound, + ) as e: + self._format_exception(e) + + def resize_instance(self, vm_id, new_flavor_id): + """ + For resizing the vm based on the given + flavor details + param: + vm_id : ID of an instance + new_flavor_id : Flavor id to be resized + Return the status of a resized instance + """ + self._reload_connection() + self.logger.debug("resize the flavor of an instance") + instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id) + old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"] + new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"] + try: + if instance_status == "ACTIVE" or instance_status == "SHUTOFF": + if old_flavor_disk > new_flavor_disk: + raise nvExceptions.BadRequest( + 400, + message="Server disk resize failed. Resize to lower disk flavor is not allowed", + ) + else: + self.nova.servers.resize(server=vm_id, flavor=new_flavor_id) + vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE") + if vm_state: + instance_resized_status = self.confirm_resize(vm_id) + return instance_resized_status + else: + raise nvExceptions.BadRequest( + 409, + message="Cannot 'resize' vm_state is in ERROR", + ) + + else: + self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state") + raise nvExceptions.BadRequest( + 409, + message="Cannot 'resize' instance while it is in vm_state resized", + ) + except ( + nvExceptions.BadRequest, + nvExceptions.ClientException, + nvExceptions.NotFound, + ) as e: + self._format_exception(e) + + def confirm_resize(self, vm_id): + """ + Confirm the resize of an instance + param: + vm_id: ID of an instance + """ + self._reload_connection() + self.nova.servers.confirm_resize(server=vm_id) + if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE": + self.__wait_for_vm(vm_id, "ACTIVE") + instance_status = self.get_vdu_state(vm_id)[0] + return instance_status