+
+ def refresh_sfps_status(self, sfp_list):
+ """Get the status of the service function path
+ Params: the list of sfp identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function path
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
+ """
+ sfp_dict = {}
+ self.logger.debug(
+ "refresh_sfps status: Getting tenant SFP information from VIM"
+ )
+
+ for sfp_id in sfp_list:
+ sfp = {}
+
+ try:
+ sfp_vim = self.get_sfp(sfp_id)
+
+ if sfp_vim["spi"]:
+ sfp["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sfp["status"] = "OTHER"
+ sfp["error_msg"] = "VIM status reported " + sfp["status"]
+
+ sfp["vim_info"] = self.serialize(sfp_vim)
+
+ if sfp_vim.get("fault"):
+ sfp["error_msg"] = str(sfp_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sfp status: %s", str(e))
+ sfp["status"] = "DELETED"
+ sfp["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sfp status: %s", str(e))
+ sfp["status"] = "VIM_ERROR"
+ sfp["error_msg"] = str(e)
+
+ sfp_dict[sfp_id] = sfp
+
+ return sfp_dict
+
+ def refresh_sfis_status(self, sfi_list):
+ """Get the status of the service function instances
+ Params: the list of sfi identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function instance
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ sfi_dict = {}
+ self.logger.debug(
+ "refresh_sfis status: Getting tenant sfi information from VIM"
+ )
+
+ for sfi_id in sfi_list:
+ sfi = {}
+
+ try:
+ sfi_vim = self.get_sfi(sfi_id)
+
+ if sfi_vim:
+ sfi["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sfi["status"] = "OTHER"
+ sfi["error_msg"] = "VIM status reported " + sfi["status"]
+
+ sfi["vim_info"] = self.serialize(sfi_vim)
+
+ if sfi_vim.get("fault"):
+ sfi["error_msg"] = str(sfi_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sfi status: %s", str(e))
+ sfi["status"] = "DELETED"
+ sfi["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sfi status: %s", str(e))
+ sfi["status"] = "VIM_ERROR"
+ sfi["error_msg"] = str(e)
+
+ sfi_dict[sfi_id] = sfi
+
+ return sfi_dict
+
+ def refresh_sfs_status(self, sf_list):
+ """Get the status of the service functions
+ Params: the list of sf identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ sf_dict = {}
+ self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
+
+ for sf_id in sf_list:
+ sf = {}
+
+ try:
+ sf_vim = self.get_sf(sf_id)
+
+ if sf_vim:
+ sf["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sf["status"] = "OTHER"
+ sf["error_msg"] = "VIM status reported " + sf_vim["status"]
+
+ sf["vim_info"] = self.serialize(sf_vim)
+
+ if sf_vim.get("fault"):
+ sf["error_msg"] = str(sf_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sf status: %s", str(e))
+ sf["status"] = "DELETED"
+ sf["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sf status: %s", str(e))
+ sf["status"] = "VIM_ERROR"
+ sf["error_msg"] = str(e)
+
+ sf_dict[sf_id] = sf
+
+ return sf_dict
+
+ def refresh_classifications_status(self, classification_list):
+ """Get the status of the classifications
+ Params: the list of classification identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this classifier
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ classification_dict = {}
+ self.logger.debug(
+ "refresh_classifications status: Getting tenant classification information from VIM"
+ )
+
+ for classification_id in classification_list:
+ classification = {}
+
+ try:
+ classification_vim = self.get_classification(classification_id)
+
+ if classification_vim:
+ classification["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ classification["status"] = "OTHER"
+ classification["error_msg"] = (
+ "VIM status reported " + classification["status"]
+ )
+
+ classification["vim_info"] = self.serialize(classification_vim)
+
+ if classification_vim.get("fault"):
+ classification["error_msg"] = str(classification_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting classification status: %s", str(e))
+ classification["status"] = "DELETED"
+ classification["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting classification status: %s", str(e))
+ classification["status"] = "VIM_ERROR"
+ classification["error_msg"] = str(e)
+
+ classification_dict[classification_id] = classification
+
+ return classification_dict
+
+ def new_affinity_group(self, affinity_group_data):
+ """Adds a server group to VIM
+ affinity_group_data contains a dictionary with information, keys:
+ name: name in VIM for the server group
+ type: affinity or anti-affinity
+ scope: Only nfvi-node allowed
+ Returns the server group identifier"""
+ self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+
+ try:
+ name = affinity_group_data["name"]
+ policy = affinity_group_data["type"]
+
+ self._reload_connection()
+ new_server_group = self.nova.server_groups.create(name, policy)
+
+ return new_server_group.id
+ except (
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ ConnectionError,
+ KeyError,
+ ) as e:
+ self._format_exception(e)
+
+ def get_affinity_group(self, affinity_group_id):
+ """Obtain server group details from the VIM. Returns the server group detais as a dict"""
+ self.logger.debug("Getting flavor '%s'", affinity_group_id)
+ try:
+ self._reload_connection()
+ server_group = self.nova.server_groups.find(id=affinity_group_id)
+
+ return server_group.to_dict()
+ except (
+ nvExceptions.NotFound,
+ nvExceptions.ClientException,
+ ksExceptions.ClientException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def delete_affinity_group(self, affinity_group_id):
+ """Deletes a server group from the VIM. Returns the old affinity_group_id"""
+ self.logger.debug("Getting server group '%s'", affinity_group_id)
+ try:
+ self._reload_connection()
+ self.nova.server_groups.delete(affinity_group_id)
+
+ return affinity_group_id
+ except (
+ nvExceptions.NotFound,
+ ksExceptions.ClientException,
+ nvExceptions.ClientException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def get_vdu_state(self, vm_id):
+ """
+ Getting the state of a vdu
+ param:
+ vm_id: ID of an instance
+ """
+ self.logger.debug("Getting the status of VM")
+ self.logger.debug("VIM VM ID %s", vm_id)
+ self._reload_connection()
+ server = self.nova.servers.find(id=vm_id)
+ server_dict = server.to_dict()
+ vdu_data = [
+ server_dict["status"],
+ server_dict["flavor"]["id"],
+ server_dict["OS-EXT-SRV-ATTR:host"],
+ server_dict["OS-EXT-AZ:availability_zone"],
+ ]
+ self.logger.debug("vdu_data %s", vdu_data)
+ return vdu_data
+
+ def check_compute_availability(self, host, server_flavor_details):
+ self._reload_connection()
+ hypervisor_search = self.nova.hypervisors.search(
+ hypervisor_match=host, servers=True
+ )
+ for hypervisor in hypervisor_search:
+ hypervisor_id = hypervisor.to_dict()["id"]
+ hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
+ hypervisor_dict = hypervisor_details.to_dict()
+ hypervisor_temp = json.dumps(hypervisor_dict)
+ hypervisor_json = json.loads(hypervisor_temp)
+ resources_available = [
+ hypervisor_json["free_ram_mb"],
+ hypervisor_json["disk_available_least"],
+ hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
+ ]
+ compute_available = all(
+ x > y for x, y in zip(resources_available, server_flavor_details)
+ )
+ if compute_available:
+ return host
+
+ def check_availability_zone(
+ self, old_az, server_flavor_details, old_host, host=None
+ ):
+ self._reload_connection()
+ az_check = {"zone_check": False, "compute_availability": None}
+ aggregates_list = self.nova.aggregates.list()
+ for aggregate in aggregates_list:
+ aggregate_details = aggregate.to_dict()
+ aggregate_temp = json.dumps(aggregate_details)
+ aggregate_json = json.loads(aggregate_temp)
+ if aggregate_json["availability_zone"] == old_az:
+ hosts_list = aggregate_json["hosts"]
+ if host is not None:
+ if host in hosts_list:
+ az_check["zone_check"] = True
+ available_compute_id = self.check_compute_availability(
+ host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["compute_availability"] = available_compute_id
+ else:
+ for check_host in hosts_list:
+ if check_host != old_host:
+ available_compute_id = self.check_compute_availability(
+ check_host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["zone_check"] = True
+ az_check["compute_availability"] = available_compute_id
+ break
+ else:
+ az_check["zone_check"] = True
+ return az_check
+
+ def migrate_instance(self, vm_id, compute_host=None):
+ """
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ """
+ self._reload_connection()
+ vm_state = False
+ instance_state = self.get_vdu_state(vm_id)
+ server_flavor_id = instance_state[1]
+ server_hypervisor_name = instance_state[2]
+ server_availability_zone = instance_state[3]
+ try:
+ server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+ server_flavor_details = [
+ server_flavor["ram"],
+ server_flavor["disk"],
+ server_flavor["vcpus"],
+ ]
+ if compute_host == server_hypervisor_name:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to the same host '{}'".format(
+ vm_id, compute_host
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ az_status = self.check_availability_zone(
+ server_availability_zone,
+ server_flavor_details,
+ server_hypervisor_name,
+ compute_host,
+ )
+ availability_zone_check = az_status["zone_check"]
+ available_compute_id = az_status.get("compute_availability")
+
+ if availability_zone_check is False:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to a different availability zone".format(
+ vm_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ if available_compute_id is not None:
+ self.nova.servers.live_migrate(
+ server=vm_id,
+ host=available_compute_id,
+ block_migration=True,
+ disk_over_commit=False,
+ )
+ state = "MIGRATING"
+ changed_compute_host = ""
+ if state == "MIGRATING":
+ vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+ changed_compute_host = self.get_vdu_state(vm_id)[2]
+ if vm_state and changed_compute_host == available_compute_id:
+ self.logger.debug(
+ "Instance '{}' migrated to the new compute host '{}'".format(
+ vm_id, changed_compute_host
+ )
+ )
+ return state, available_compute_id
+ else:
+ raise vimconn.VimConnException(
+ "Migration Failed. Instance '{}' not moved to the new host {}".format(
+ vm_id, available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ else:
+ raise vimconn.VimConnException(
+ "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+ available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ except (
+ nvExceptions.BadRequest,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ) as e:
+ self._format_exception(e)
+
+ def resize_instance(self, vm_id, new_flavor_id):
+ """
+ For resizing the vm based on the given
+ flavor details
+ param:
+ vm_id : ID of an instance
+ new_flavor_id : Flavor id to be resized
+ Return the status of a resized instance
+ """
+ self._reload_connection()
+ self.logger.debug("resize the flavor of an instance")
+ instance_status, old_flavor_id, compute_host, az = self.get_vdu_state(vm_id)
+ old_flavor_disk = self.nova.flavors.find(id=old_flavor_id).to_dict()["disk"]
+ new_flavor_disk = self.nova.flavors.find(id=new_flavor_id).to_dict()["disk"]
+ try:
+ if instance_status == "ACTIVE" or instance_status == "SHUTOFF":
+ if old_flavor_disk > new_flavor_disk:
+ raise nvExceptions.BadRequest(
+ 400,
+ message="Server disk resize failed. Resize to lower disk flavor is not allowed",
+ )
+ else:
+ self.nova.servers.resize(server=vm_id, flavor=new_flavor_id)
+ vm_state = self.__wait_for_vm(vm_id, "VERIFY_RESIZE")
+ if vm_state:
+ instance_resized_status = self.confirm_resize(vm_id)
+ return instance_resized_status
+ else:
+ raise nvExceptions.BadRequest(
+ 409,
+ message="Cannot 'resize' vm_state is in ERROR",
+ )
+
+ else:
+ self.logger.debug("ERROR : Instance is not in ACTIVE or SHUTOFF state")
+ raise nvExceptions.BadRequest(
+ 409,
+ message="Cannot 'resize' instance while it is in vm_state resized",
+ )
+ except (
+ nvExceptions.BadRequest,
+ nvExceptions.ClientException,
+ nvExceptions.NotFound,
+ ) as e:
+ self._format_exception(e)
+
+ def confirm_resize(self, vm_id):
+ """
+ Confirm the resize of an instance
+ param:
+ vm_id: ID of an instance
+ """
+ self._reload_connection()
+ self.nova.servers.confirm_resize(server=vm_id)
+ if self.get_vdu_state(vm_id)[0] == "VERIFY_RESIZE":
+ self.__wait_for_vm(vm_id, "ACTIVE")
+ instance_status = self.get_vdu_state(vm_id)[0]
+ return instance_status