+ def new_sf(self, name, sfis, sfc_encap=True):
+ self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
+
+ new_sf = None
+
+ try:
+ self._reload_connection()
+
+ for instance in sfis:
+ sfi = self.get_sfi(instance)
+
+ if sfi.get("sfc_encap") != sfc_encap:
+ raise vimconn.VimConnNotSupportedException(
+ "OpenStack VIM connector requires all SFIs of the "
+ "same SF to share the same SFC Encapsulation"
+ )
+
+ sf_dict = {"name": name, "port_pairs": sfis}
+
+ self.logger.info("Adding a new SF to VIM, {}.".format(sf_dict))
+ new_sf = self.neutron.create_sfc_port_pair_group(
+ {"port_pair_group": sf_dict}
+ )
+
+ return new_sf["port_pair_group"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ if new_sf:
+ try:
+ new_sf_id = new_sf.get("port_pair_group").get("id")
+ self.neutron.delete_sfc_port_pair_group(new_sf_id)
+ except Exception:
+ self.logger.error(
+ "Creation of Service Function failed, with "
+ "subsequent deletion failure as well."
+ )
+
+ self._format_exception(e)
+
+ def get_sf(self, sf_id):
+ self.logger.debug("Getting Service Function %s from VIM", sf_id)
+ filter_dict = {"id": sf_id}
+ sf_list = self.get_sf_list(filter_dict)
+
+ if len(sf_list) == 0:
+ raise vimconn.VimConnNotFoundException(
+ "Service Function '{}' not found".format(sf_id)
+ )
+ elif len(sf_list) > 1:
+ raise vimconn.VimConnConflictException(
+ "Found more than one Service Function with this criteria"
+ )
+
+ sf = sf_list[0]
+
+ return sf
+
+ def get_sf_list(self, filter_dict={}):
+ self.logger.debug(
+ "Getting Service Function from VIM filter: '%s'", str(filter_dict)
+ )
+
+ try:
+ self._reload_connection()
+ filter_dict_os = filter_dict.copy()
+
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+ sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
+ sf_list = sf_dict["port_pair_groups"]
+ self.__sf_os2mano(sf_list)
+
+ return sf_list
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def delete_sf(self, sf_id):
+ self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
+
+ try:
+ self._reload_connection()
+ self.neutron.delete_sfc_port_pair_group(sf_id)
+
+ return sf_id
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
+ self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
+
+ new_sfp = None
+
+ try:
+ self._reload_connection()
+ # In networking-sfc the MPLS encapsulation is legacy
+ # should be used when no full SFC Encapsulation is intended
+ correlation = "mpls"
+
+ if sfc_encap:
+ correlation = "nsh"
+
+ sfp_dict = {
+ "name": name,
+ "flow_classifiers": classifications,
+ "port_pair_groups": sfs,
+ "chain_parameters": {"correlation": correlation},
+ }
+
+ if spi:
+ sfp_dict["chain_id"] = spi
+
+ self.logger.info("Adding a new SFP to VIM, {}.".format(sfp_dict))
+ new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
+
+ return new_sfp["port_chain"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ if new_sfp:
+ try:
+ new_sfp_id = new_sfp.get("port_chain").get("id")
+ self.neutron.delete_sfc_port_chain(new_sfp_id)
+ except Exception:
+ self.logger.error(
+ "Creation of Service Function Path failed, with "
+ "subsequent deletion failure as well."
+ )
+
+ self._format_exception(e)
+
+ def get_sfp(self, sfp_id):
+ self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
+
+ filter_dict = {"id": sfp_id}
+ sfp_list = self.get_sfp_list(filter_dict)
+
+ if len(sfp_list) == 0:
+ raise vimconn.VimConnNotFoundException(
+ "Service Function Path '{}' not found".format(sfp_id)
+ )
+ elif len(sfp_list) > 1:
+ raise vimconn.VimConnConflictException(
+ "Found more than one Service Function Path with this criteria"
+ )
+
+ sfp = sfp_list[0]
+
+ return sfp
+
+ def get_sfp_list(self, filter_dict={}):
+ self.logger.debug(
+ "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
+ )
+
+ try:
+ self._reload_connection()
+ filter_dict_os = filter_dict.copy()
+
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+ sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
+ sfp_list = sfp_dict["port_chains"]
+ self.__sfp_os2mano(sfp_list)
+
+ return sfp_list
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def delete_sfp(self, sfp_id):
+ self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
+
+ try:
+ self._reload_connection()
+ self.neutron.delete_sfc_port_chain(sfp_id)
+
+ return sfp_id
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def refresh_sfps_status(self, sfp_list):
+ """Get the status of the service function path
+ Params: the list of sfp identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function path
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
+ """
+ sfp_dict = {}
+ self.logger.debug(
+ "refresh_sfps status: Getting tenant SFP information from VIM"
+ )
+
+ for sfp_id in sfp_list:
+ sfp = {}
+
+ try:
+ sfp_vim = self.get_sfp(sfp_id)
+
+ if sfp_vim["spi"]:
+ sfp["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sfp["status"] = "OTHER"
+ sfp["error_msg"] = "VIM status reported " + sfp["status"]
+
+ sfp["vim_info"] = self.serialize(sfp_vim)
+
+ if sfp_vim.get("fault"):
+ sfp["error_msg"] = str(sfp_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sfp status: %s", str(e))
+ sfp["status"] = "DELETED"
+ sfp["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sfp status: %s", str(e))
+ sfp["status"] = "VIM_ERROR"
+ sfp["error_msg"] = str(e)
+
+ sfp_dict[sfp_id] = sfp
+
+ return sfp_dict
+
+ def refresh_sfis_status(self, sfi_list):
+ """Get the status of the service function instances
+ Params: the list of sfi identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function instance
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ sfi_dict = {}
+ self.logger.debug(
+ "refresh_sfis status: Getting tenant sfi information from VIM"
+ )
+
+ for sfi_id in sfi_list:
+ sfi = {}
+
+ try:
+ sfi_vim = self.get_sfi(sfi_id)
+
+ if sfi_vim:
+ sfi["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sfi["status"] = "OTHER"
+ sfi["error_msg"] = "VIM status reported " + sfi["status"]
+
+ sfi["vim_info"] = self.serialize(sfi_vim)
+
+ if sfi_vim.get("fault"):
+ sfi["error_msg"] = str(sfi_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sfi status: %s", str(e))
+ sfi["status"] = "DELETED"
+ sfi["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sfi status: %s", str(e))
+ sfi["status"] = "VIM_ERROR"
+ sfi["error_msg"] = str(e)
+
+ sfi_dict[sfi_id] = sfi
+
+ return sfi_dict
+
+ def refresh_sfs_status(self, sf_list):
+ """Get the status of the service functions
+ Params: the list of sf identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ sf_dict = {}
+ self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
+
+ for sf_id in sf_list:
+ sf = {}
+
+ try:
+ sf_vim = self.get_sf(sf_id)
+
+ if sf_vim:
+ sf["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sf["status"] = "OTHER"
+ sf["error_msg"] = "VIM status reported " + sf_vim["status"]
+
+ sf["vim_info"] = self.serialize(sf_vim)
+
+ if sf_vim.get("fault"):
+ sf["error_msg"] = str(sf_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sf status: %s", str(e))
+ sf["status"] = "DELETED"
+ sf["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sf status: %s", str(e))
+ sf["status"] = "VIM_ERROR"
+ sf["error_msg"] = str(e)
+
+ sf_dict[sf_id] = sf
+
+ return sf_dict
+
+ def refresh_classifications_status(self, classification_list):
+ """Get the status of the classifications
+ Params: the list of classification identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this classifier
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ classification_dict = {}
+ self.logger.debug(
+ "refresh_classifications status: Getting tenant classification information from VIM"
+ )
+
+ for classification_id in classification_list:
+ classification = {}
+
+ try:
+ classification_vim = self.get_classification(classification_id)
+
+ if classification_vim:
+ classification["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ classification["status"] = "OTHER"
+ classification["error_msg"] = (
+ "VIM status reported " + classification["status"]
+ )
+
+ classification["vim_info"] = self.serialize(classification_vim)
+
+ if classification_vim.get("fault"):
+ classification["error_msg"] = str(classification_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting classification status: %s", str(e))
+ classification["status"] = "DELETED"
+ classification["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting classification status: %s", str(e))
+ classification["status"] = "VIM_ERROR"
+ classification["error_msg"] = str(e)
+
+ classification_dict[classification_id] = classification
+
+ return classification_dict
+
+ @catch_any_exception
+ def new_affinity_group(self, affinity_group_data):
+ """Adds a server group to VIM
+ affinity_group_data contains a dictionary with information, keys:
+ name: name in VIM for the server group
+ type: affinity or anti-affinity
+ scope: Only nfvi-node allowed
+ Returns the server group identifier"""
+ self.logger.debug("Adding Server Group '%s'", str(affinity_group_data))
+ name = affinity_group_data["name"]
+ policy = affinity_group_data["type"]
+ self._reload_connection()
+ new_server_group = self.nova.server_groups.create(name, policy)
+ return new_server_group.id
+
+ @catch_any_exception
+ def get_affinity_group(self, affinity_group_id):
+ """Obtain server group details from the VIM. Returns the server group detais as a dict"""
+ self.logger.debug("Getting flavor '%s'", affinity_group_id)
+ self._reload_connection()
+ server_group = self.nova.server_groups.find(id=affinity_group_id)
+ return server_group.to_dict()
+
+ @catch_any_exception
+ def delete_affinity_group(self, affinity_group_id):
+ """Deletes a server group from the VIM. Returns the old affinity_group_id"""
+ self.logger.debug("Getting server group '%s'", affinity_group_id)
+ self._reload_connection()
+ self.nova.server_groups.delete(affinity_group_id)
+ return affinity_group_id
+
+ @catch_any_exception
+ def get_vdu_state(self, vm_id, host_is_required=False) -> list:
+ """Getting the state of a VDU.
+ Args:
+ vm_id (str): ID of an instance
+ host_is_required (Boolean): If the VIM account is non-admin, host info does not appear in server_dict
+ and if this is set to True, it raises KeyError.
+ Returns:
+ vdu_data (list): VDU details including state, flavor, host_info, AZ
+ """
+ self.logger.debug("Getting the status of VM")
+ self.logger.debug("VIM VM ID %s", vm_id)
+ self._reload_connection()
+ server_dict = self._find_nova_server(vm_id)
+ srv_attr = "OS-EXT-SRV-ATTR:host"
+ host_info = (
+ server_dict[srv_attr] if host_is_required else server_dict.get(srv_attr)
+ )
+ vdu_data = [
+ server_dict["status"],
+ server_dict["flavor"]["id"],
+ host_info,
+ server_dict["OS-EXT-AZ:availability_zone"],
+ ]
+ self.logger.debug("vdu_data %s", vdu_data)
+ return vdu_data
+
+ def check_compute_availability(self, host, server_flavor_details):
+ self._reload_connection()
+ hypervisor_search = self.nova.hypervisors.search(
+ hypervisor_match=host, servers=True
+ )
+ for hypervisor in hypervisor_search:
+ hypervisor_id = hypervisor.to_dict()["id"]
+ hypervisor_details = self.nova.hypervisors.get(hypervisor=hypervisor_id)
+ hypervisor_dict = hypervisor_details.to_dict()
+ hypervisor_temp = json.dumps(hypervisor_dict)
+ hypervisor_json = json.loads(hypervisor_temp)
+ resources_available = [
+ hypervisor_json["free_ram_mb"],
+ hypervisor_json["disk_available_least"],
+ hypervisor_json["vcpus"] - hypervisor_json["vcpus_used"],
+ ]
+ compute_available = all(
+ x > y for x, y in zip(resources_available, server_flavor_details)
+ )
+ if compute_available:
+ return host
+
+ def check_availability_zone(
+ self, old_az, server_flavor_details, old_host, host=None
+ ):
+ self._reload_connection()
+ az_check = {"zone_check": False, "compute_availability": None}
+ aggregates_list = self.nova.aggregates.list()
+ for aggregate in aggregates_list:
+ aggregate_details = aggregate.to_dict()
+ aggregate_temp = json.dumps(aggregate_details)
+ aggregate_json = json.loads(aggregate_temp)
+ if aggregate_json["availability_zone"] == old_az:
+ hosts_list = aggregate_json["hosts"]
+ if host is not None:
+ if host in hosts_list:
+ az_check["zone_check"] = True
+ available_compute_id = self.check_compute_availability(
+ host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["compute_availability"] = available_compute_id
+ else:
+ for check_host in hosts_list:
+ if check_host != old_host:
+ available_compute_id = self.check_compute_availability(
+ check_host, server_flavor_details
+ )
+ if available_compute_id is not None:
+ az_check["zone_check"] = True
+ az_check["compute_availability"] = available_compute_id
+ break
+ else:
+ az_check["zone_check"] = True
+ return az_check
+
+ @catch_any_exception
+ def migrate_instance(self, vm_id, compute_host=None):
+ """
+ Migrate a vdu
+ param:
+ vm_id: ID of an instance
+ compute_host: Host to migrate the vdu to
+ """
+ self._reload_connection()
+ vm_state = False
+ instance_state = self.get_vdu_state(vm_id, host_is_required=True)
+ server_flavor_id = instance_state[1]
+ server_hypervisor_name = instance_state[2]
+ server_availability_zone = instance_state[3]
+ server_flavor = self.nova.flavors.find(id=server_flavor_id).to_dict()
+ server_flavor_details = [
+ server_flavor["ram"],
+ server_flavor["disk"],
+ server_flavor["vcpus"],
+ ]
+ if compute_host == server_hypervisor_name:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to the same host '{}'".format(
+ vm_id, compute_host
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ az_status = self.check_availability_zone(
+ server_availability_zone,
+ server_flavor_details,
+ server_hypervisor_name,
+ compute_host,
+ )
+ availability_zone_check = az_status["zone_check"]
+ available_compute_id = az_status.get("compute_availability")
+
+ if availability_zone_check is False:
+ raise vimconn.VimConnException(
+ "Unable to migrate instance '{}' to a different availability zone".format(
+ vm_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ if available_compute_id is not None:
+ # disk_over_commit parameter for live_migrate method is not valid for Nova API version >= 2.25
+ self.nova.servers.live_migrate(
+ server=vm_id,
+ host=available_compute_id,
+ block_migration=True,
+ )
+ state = "MIGRATING"
+ changed_compute_host = ""
+ if state == "MIGRATING":
+ vm_state = self.__wait_for_vm(vm_id, "ACTIVE")
+ changed_compute_host = self.get_vdu_state(vm_id, host_is_required=True)[
+ 2
+ ]
+ if vm_state and changed_compute_host == available_compute_id:
+ self.logger.debug(
+ "Instance '{}' migrated to the new compute host '{}'".format(
+ vm_id, changed_compute_host
+ )
+ )
+ return state, available_compute_id
+ else:
+ raise vimconn.VimConnException(
+ "Migration Failed. Instance '{}' not moved to the new host {}".format(
+ vm_id, available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+ else:
+ raise vimconn.VimConnException(
+ "Compute '{}' not available or does not have enough resources to migrate the instance".format(
+ available_compute_id
+ ),
+ http_code=vimconn.HTTP_Bad_Request,
+ )
+
+ @catch_any_exception