+
+ @catch_any_exception
+ def new_affinity_group(self, affinity_group_data):
+ """Adds a server group to VIM
+ affinity_group_data contains a dictionary with information, keys:
+ name: name in VIM for the server group
+ type: affinity or anti-affinity
+ scope: Only nfvi-node allowed
+ Returns the server group identifier"""
+ self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+ name = affinity_group_data["name"]
+ policy = affinity_group_data["type"]
+ self._reload_connection()
+ new_server_group = self.nova.server_groups.create(name, policy)
+ return new_server_group.id
+
+ @catch_any_exception
+ def get_affinity_group(self, affinity_group_id):
+ """Obtain server group details from the VIM. Returns the server group detais as a dict"""
+ self.logger.debug("Getting flavor '%s'", affinity_group_id)
+ self._reload_connection()
+ server_group = self.nova.server_groups.find(id=affinity_group_id)
+ return server_group.to_dict()
+
+ @catch_any_exception
+ def delete_affinity_group(self, affinity_group_id):
+ """Deletes a server group from the VIM. Returns the old affinity_group_id"""
+ self.logger.debug("Getting server group '%s'", affinity_group_id)
+ self._reload_connection()
+ self.nova.server_groups.delete(affinity_group_id)
+ return affinity_group_id
+
+ @catch_any_exception
+ def get_vdu_state(self, vm_id, host_is_required=False) -> list:
+ """Getting the state of a VDU.
+ Args:
+ vm_id (str): ID of an instance
+ host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
+ and if this is set to True, it raises KeyError.
+ Returns:
+ vdu_data (list): VDU details including state, flavor, host_info, AZ
+ """
+ self.logger.debug("Getting the status of VM")
+ self.logger.debug("VIM VM ID %s", vm_id)
+ self._reload_connection()
+ server_dict = self._find_nova_server(vm_id)
+ srv_attr = "OS-EXT-SRV-ATTR:host"
+ host_info = (
+ server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
+ )
+ vdu_data = [
+ server_dict["status"],
+ server_dict["flavor"]["id"],
+ host_info,
+ server_dict["OS-EXT-AZ:availability_zone"],
+ ]
+ self.logger.debug("vdu_data %s", vdu_data)
+ return vdu_data
+
+ def check_compute_availability(self, host, server_flavor_details):
+ self._reload_connection()
+ hypervisor_search = self.nova.hypervisors.search(
+ hypervisor_match=host, servers=True
+ )
+ for hypervisor in hypervisor_search:
+ hypervisor_id = hypervisor.to_dict()["id"]
+ hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
+ hypervisor_dict = hypervisor_details.to_dict()
+ hypervisor_temp = json.dumps(hypervisor_dict)
+ hypervisor_json = json.loads(hypervisor_temp)
+ resources_available = [
+ hypervisor_json["free_ram_mb"],
+ hypervisor_json["disk_available_least"],
+ hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
+ ]
+ compute_available = all(
+ x > y for x, y in zip(resources_available, server_flavor_details)
+ )
+ if compute_available:
+ return host
+
+ def check_availability_zone(
+ self, old_az, server_flavor_details, old_host, host=None
+ ):
+ self._reload_connection()
+ az_check = {"zone_check": False, "compute_availability": None}
+ aggregates_list = self.nova.aggregates.list()
+ for aggregate in aggregates_list:
+ aggregate_details = aggregate.to_dict()
+ aggregate_temp = json.dumps(aggregate_details)
+ aggregate_json = json.loads(aggregate_temp)
+ if aggregate_json["availability_zone"] == old_az:
+ hosts_list = aggregate_json["hosts"]
+ if host is not None:
+ if host in hosts_list:
+ az_check["zone_check"] = True
+ available_compute_id = self.check_compute_availability(
+ host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["compute_availability"] = available_compute_id
+ else:
+ for check_host in hosts_list:
+ if check_host != old_host:
+ available_compute_id = self.check_compute_availability(
+ check_host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["zone_check"] = True
+ az_check["compute_availability"] = available_compute_id
+ break
+ else:
+ az_check["zone_check"] = True
+ return az_check
+
+ @catch_any_exception
+ def migrate_instance(self, vm_id, compute_host=None):
+ """
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ """
+ self._reload_connection()
+ vm_state = False
+ instance_state = self.get_vdu_state(vm_id, host_is_required=True)
+ server_flavor_id = instance_state[1]
+ server_hypervisor_name = instance_state[2]
+ server_availability_zone = instance_state[3]
+ server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+ server_flavor_details = [
+ server_flavor["ram"],
+ server_flavor["disk"],
+ server_flavor["vcpus"],
+ ]
+ if compute_host == server_hypervisor_name:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to the same host '{}'".format(
+ vm_id, compute_host
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ az_status = self.check_availability_zone(
+ server_availability_zone,
+ server_flavor_details,
+ server_hypervisor_name,
+ compute_host,
+ )
+ availability_zone_check = az_status["zone_check"]
+ available_compute_id = az_status.get("compute_availability")
+
+ if availability_zone_check is False:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to a different availability zone".format(
+ vm_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ if available_compute_id is not None:
+ # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
+ self.nova.servers.live_migrate(
+ server=vm_id,
+ host=available_compute_id,
+ block_migration=True,
+ )
+ state = "MIGRATING"
+ changed_compute_host = ""
+ if state == "MIGRATING":
+ vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+ changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[
+ 2
+ ]
+ if vm_state and changed_compute_host == available_compute_id:
+ self.logger.debug(
+ "Instance '{}' migrated to the new compute host '{}'".format(
+ vm_id, changed_compute_host
+ )
+ )
+ return state, available_compute_id
+ else:
+ raise vimconn.VimConnException(
+ "Migration Failed. Instance '{}' not moved to the new host {}".format(
+ vm_id, available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ else:
+ raise vimconn.VimConnException(
+ "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+ available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+
+ @catch_any_exception
+ def resize_instance(self, vm_id, new_flavor_id):
+ """
+ For resizing the vm based on the given
+ flavor details
+ param:
+ vm_id : ID of an instance
+ new_flavor_id : Flavor id to be resized
+ Return the status of a resized instance
+ """
+ self._reload_connection()
+ self.logger.debug("resize the flavor of an instance")
+ instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
+ old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
+ new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
+ if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+ if old_flavor_disk > new_flavor_disk:
+ raise nvExceptions.BadRequest(
+ 400,
+ message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+ )
+ else:
+ self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+ vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+ if vm_state:
+ instance_resized_status = self.confirm_resize(vm_id)
+ return instance_resized_status
+ else:
+ raise nvExceptions.BadRequest(
+ 409,
+ message="Cannot 'resize' vm_state is in ERROR",
+ )
+
+ else:
+ self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+ raise nvExceptions.BadRequest(
+ 409,
+ message="Cannot 'resize' instance while it is in vm_state resized",
+ )
+
+ def confirm_resize(self, vm_id):
+ """
+ Confirm the resize of an instance
+ param:
+ vm_id: ID of an instance
+ """
+ self._reload_connection()
+ self.nova.servers.confirm_resize(server=vm_id)
+ if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
+ self.__wait_for_vm(vm_id, "ACTIVE")
+ instance_status = self.get_vdu_state(vm_id)[0]
+ return instance_status
+
+ def get_monitoring_data(self):
+ try:
+ self.logger.debug("Getting servers and ports data from Openstack VIMs.")
+ self._reload_connection()
+ all_servers = self.nova.servers.list(detailed=True)
+ try:
+ for server in all_servers:
+ if server.flavor.get("original_name"):
+ server.flavor["id"] = self.nova.flavors.find(
+ name=server.flavor["original_name"]
+ ).id
+ except nClient.exceptions.NotFound as e:
+ self.logger.warning(str(e.message))
+ all_ports = self.neutron.list_ports()
+ return all_servers, all_ports
+ except Exception as e:
+ raise vimconn.VimConnException(
+ f"Exception in monitoring while getting VMs and ports status: {str(e)}"
+ )