return None
+ def _get_db_all_tasks(self):
+ """
+ Read all content of table ro_tasks to log it
+ :return: None
+ """
+ try:
+ # Checking the content of the BD:
+
+ # read and return
+ ro_task = self.db.get_list("ro_tasks")
+ for rt in ro_task:
+ self._log_ro_task(rt, None, None, "TASK_WF", "GET_ALL_TASKS")
+ return ro_task
+
+ except DbException as e:
+ self.logger.error("Database exception at _get_db_all_tasks: {}".format(e))
+ except Exception as e:
+ self.logger.critical(
+ "Unexpected exception at _get_db_all_tasks: {}".format(e), exc_info=True
+ )
+
+ return None
+
+ def _log_ro_task(self, ro_task, db_ro_task_update, db_ro_task_delete, mark, event):
+ """
+ Generate a log with the following format:
+
+ Mark;Event;ro_task_id;locked_at;modified_at;created_at;to_check_at;locked_by;
+ target_id;vim_info.refresh_at;vim_info;no_of_tasks;task_status;action_id;
+ task_array_index;task_id;task_action;task_item;task_args
+
+ Example:
+
+ TASK_WF;GET_TASK;888f1864-749a-4fc2-bc1a-97c0fffd6a6f:2;1642158724.8210013;
+ 1642158640.7986135;1642158640.7986135;1642158640.7986135;b134c9494e75:0a
+ ;vim:b7ff9e24-8868-4d68-8a57-a59dc11d0327;None;{'created': False,
+ 'created_items': None, 'vim_id': None, 'vim_name': None, 'vim_status': None,
+ 'vim_details': None, 'vim_message': None, 'refresh_at': None};1;SCHEDULED;
+ 888f1864-749a-4fc2-bc1a-97c0fffd6a6f;0;888f1864-749a-4fc2-bc1a-97c0fffd6a6f:2;
+ CREATE;image;{'filter_dict': {'name': 'ubuntu-os-cloud:image-family:ubuntu-1804-lts'}}
+ """
+ try:
+ line = []
+ i = 0
+ if ro_task is not None and isinstance(ro_task, dict):
+ for t in ro_task["tasks"]:
+ line.clear()
+ line.append(mark)
+ line.append(event)
+ line.append(ro_task.get("_id", ""))
+ line.append(str(ro_task.get("locked_at", "")))
+ line.append(str(ro_task.get("modified_at", "")))
+ line.append(str(ro_task.get("created_at", "")))
+ line.append(str(ro_task.get("to_check_at", "")))
+ line.append(str(ro_task.get("locked_by", "")))
+ line.append(str(ro_task.get("target_id", "")))
+ line.append(str(ro_task.get("vim_info", {}).get("refresh_at", "")))
+ line.append(str(ro_task.get("vim_info", "")))
+ line.append(str(ro_task.get("tasks", "")))
+ if isinstance(t, dict):
+ line.append(str(t.get("status", "")))
+ line.append(str(t.get("action_id", "")))
+ line.append(str(i))
+ line.append(str(t.get("task_id", "")))
+ line.append(str(t.get("action", "")))
+ line.append(str(t.get("item", "")))
+ line.append(str(t.get("find_params", "")))
+ line.append(str(t.get("params", "")))
+ else:
+ line.extend([""] * 2)
+ line.append(str(i))
+ line.extend([""] * 5)
+
+ i += 1
+ self.logger.debug(";".join(line))
+ elif db_ro_task_update is not None and isinstance(db_ro_task_update, dict):
+ i = 0
+ while True:
+ st = "tasks.{}.status".format(i)
+ if st not in db_ro_task_update:
+ break
+ line.clear()
+ line.append(mark)
+ line.append(event)
+ line.append(db_ro_task_update.get("_id", ""))
+ line.append(str(db_ro_task_update.get("locked_at", "")))
+ line.append(str(db_ro_task_update.get("modified_at", "")))
+ line.append("")
+ line.append(str(db_ro_task_update.get("to_check_at", "")))
+ line.append(str(db_ro_task_update.get("locked_by", "")))
+ line.append("")
+ line.append(str(db_ro_task_update.get("vim_info.refresh_at", "")))
+ line.append("")
+ line.append(str(db_ro_task_update.get("vim_info", "")))
+ line.append(str(str(db_ro_task_update).count(".status")))
+ line.append(db_ro_task_update.get(st, ""))
+ line.append("")
+ line.append(str(i))
+ line.extend([""] * 3)
+ i += 1
+ self.logger.debug(";".join(line))
+
+ elif db_ro_task_delete is not None and isinstance(db_ro_task_delete, dict):
+ line.clear()
+ line.append(mark)
+ line.append(event)
+ line.append(db_ro_task_delete.get("_id", ""))
+ line.append("")
+ line.append(db_ro_task_delete.get("modified_at", ""))
+ line.extend([""] * 13)
+ self.logger.debug(";".join(line))
+
+ else:
+ line.clear()
+ line.append(mark)
+ line.append(event)
+ line.extend([""] * 16)
+ self.logger.debug(";".join(line))
+
+ except Exception as e:
+ self.logger.error("Error logging ro_task: {}".format(e))
+
def _delete_task(self, ro_task, task_index, task_depends, db_update):
"""
Determine if this task need to be done or superseded
\r
return new_data\r
\r
+ def get_pending_tasks(self):\r
+ return self.cvpClientApi.get_tasks_by_status("Pending")\r
+\r
+ def get_pending_tasks_old(self):\r
+ taskList = []\r
+ tasksField = {\r
+ "workOrderId": "workOrderId",\r
+ "workOrderState": "workOrderState",\r
+ "currentTaskName": "currentTaskName",\r
+ "description": "description",\r
+ "workOrderUserDefinedStatus": "workOrderUserDefinedStatus",\r
+ "note": "note",\r
+ "taskStatus": "taskStatus",\r
+ "workOrderDetails": "workOrderDetails",\r
+ }\r
+ tasks = self.cvpClientApi.get_tasks_by_status("Pending")\r
+\r
+ # Reduce task data to required fields\r
+ for task in tasks:\r
+ taskFacts = {}\r
+ for field in task.keys():\r
+ if field in tasksField:\r
+ taskFacts[tasksField[field]] = task[field]\r
+\r
+ taskList.append(taskFacts)\r
+\r
+ return taskList\r
+\r
def task_action(self, tasks, wait, state):\r
changed = False\r
data = dict()\r
import copy
import json
+from time import time
import requests
from requests.exceptions import ConnectionError
return resp.json()
+ def post_headers_cmd(self, url, headers, post_fields_dict=None):
+ self._logger.debug("")
+
+ # obfuscate password before logging dict
+ if (
+ post_fields_dict.get("auth", {})
+ .get("identity", {})
+ .get("password", {})
+ .get("user", {})
+ .get("password")
+ ):
+ post_fields_dict_copy = copy.deepcopy(post_fields_dict)
+ post_fields_dict["auth"]["identity"]["password"]["user"][
+ "password"
+ ] = "******"
+ json_data_log = post_fields_dict_copy
+ else:
+ json_data_log = post_fields_dict
+
+ self._logger.debug("Request POSTFIELDS: {}".format(json.dumps(json_data_log)))
+ resp = self._request("POST_HEADERS", url, headers, data=post_fields_dict)
+
+ return resp.text
+
def post_cmd(self, url, headers, post_fields_dict=None):
self._logger.debug("")
return resp.text
+ def _get_token(self, headers):
+ if self.auth_url:
+ self._logger.debug("Current Token: {}".format(self.token))
+ auth_url = self.auth_url + "auth/tokens"
+
+ if self.token is None or self._token_expired():
+ if not self.auth_url:
+ self.token = ""
+
+ resp = self._request_noauth(
+ url=auth_url, op="POST", headers=headers, data=self.auth_dict
+ )
+ self.token = resp.headers.get("x-subject-token")
+ self.last_token_time = time.time()
+ self._logger.debug("Obtained token: {}".format(self.token))
+
+ return self.token
+
+ def _token_expired(self):
+ current_time = time.time()
+
+ if self.last_token_time and (
+ current_time - self.last_token_time < self.token_timeout
+ ):
+ return False
+ else:
+ return True
+
def _request(self, op, url, http_headers, data=None, retry_auth_error=True):
headers = http_headers.copy()
"virtual-port-group"
)
+ def get_vpgs(self):
+ return self.get_all_by_type(self.controller_url, "virtual-port-groups")
+
def get_vpg_by_name(self, vpg_name):
fq_name = ["default-global-system-config", self.fabric, vpg_name]
def get_url(self):
return self.url
+ def get_overlay_url(self):
+ return self.overlay_url
+
def _create_port(self, switch_id, switch_port, network, vlan):
"""
1 - Look for virtual port groups for provided switch_id, switch_port using name
raise vimconn.VimConnConnectionException(type(e).__name__ + ": " + str(e))
+ def get_availability_zones_list(self):
+ """Obtain AvailabilityZones from AWS"""
+ try:
+ self._reload_connection()
+ az_list = []
+
+ for az in self.conn.get_all_zones():
+ az_list.append(az.name)
+
+ return az_list
+ except Exception as e:
+ self.format_vimconn_exception(e)
+
def get_tenant_list(self, filter_dict={}):
"""Obtain tenants of VIM
filter_dict dictionary that can contain the following keys:
from azure.core.exceptions import ResourceNotFoundError
from azure.identity import ClientSecretCredential
from azure.mgmt.compute import ComputeManagementClient
+from azure.mgmt.compute.models import DiskCreateOption
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.profiles import ProfileDefinition
virtual_machine = creation_result.result()
self.logger.debug("created vm name: %s", vm_name)
+ """ Por ahora no hacer polling para ver si tarda menos
+ # Add disks if they are provided
+ if disk_list:
+ for disk_index, disk in enumerate(disk_list):
+ self.logger.debug(
+ "add disk size: %s, image: %s",
+ disk.get("size"),
+ disk.get("image"),
+ )
+ self._add_newvm_disk(
+ virtual_machine, vm_name, disk_index, disk, created_items
+ )
+
+ if start:
+ self.conn_compute.virtual_machines.start(self.resource_group, vm_name)
+ # start_result.wait()
+ """
+
return virtual_machine.id, created_items
+ # run_command_parameters = {
+ # "command_id": "RunShellScript", # For linux, don't change it
+ # "script": [
+ # "date > /tmp/test.txt"
+ # ]
+ # }
except Exception as e:
# Rollback vm creacion
vm_id = None
def _get_azure_availability_zones(self):
return self.AZURE_ZONES
+ def _add_newvm_disk(
+ self, virtual_machine, vm_name, disk_index, disk, created_items={}
+ ):
+ disk_name = None
+ data_disk = None
+
+ # Check if must create empty disk or from image
+ if disk.get("vim_id"):
+ # disk already exists, just get
+ parsed_id = azure_tools.parse_resource_id(disk.get("vim_id"))
+ disk_name = parsed_id.get("name")
+ data_disk = self.conn_compute.disks.get(self.resource_group, disk_name)
+ else:
+ disk_name = vm_name + "_DataDisk_" + str(disk_index)
+ if not disk.get("image_id"):
+ self.logger.debug("create new data disk name: %s", disk_name)
+ async_disk_creation = self.conn_compute.disks.begin_create_or_update(
+ self.resource_group,
+ disk_name,
+ {
+ "location": self.region,
+ "disk_size_gb": disk.get("size"),
+ "creation_data": {"create_option": DiskCreateOption.empty},
+ },
+ )
+ data_disk = async_disk_creation.result()
+ created_items[data_disk.id] = True
+ else:
+ image_id = disk.get("image_id")
+
+ if azure_tools.is_valid_resource_id(image_id):
+ parsed_id = azure_tools.parse_resource_id(image_id)
+
+ # Check if image is snapshot or disk
+ image_name = parsed_id.get("name")
+ type = parsed_id.get("resource_type")
+
+ if type == "snapshots" or type == "disks":
+ self.logger.debug("create disk from copy name: %s", image_name)
+ # ¿Should check that snapshot exists?
+ async_disk_creation = (
+ self.conn_compute.disks.begin_create_or_update(
+ self.resource_group,
+ disk_name,
+ {
+ "location": self.region,
+ "creation_data": {
+ "create_option": "Copy",
+ "source_uri": image_id,
+ },
+ },
+ )
+ )
+ data_disk = async_disk_creation.result()
+ created_items[data_disk.id] = True
+ else:
+ raise vimconn.VimConnNotFoundException(
+ "Invalid image_id: %s ", image_id
+ )
+ else:
+ raise vimconn.VimConnNotFoundException(
+ "Invalid image_id: %s ", image_id
+ )
+
+ # Attach the disk created
+ virtual_machine.storage_profile.data_disks.append(
+ {
+ "lun": disk_index,
+ "name": disk_name,
+ "create_option": DiskCreateOption.attach,
+ "managed_disk": {"id": data_disk.id},
+ "disk_size_gb": disk.get("size"),
+ }
+ )
+ self.logger.debug("attach disk name: %s", disk_name)
+ self.conn_compute.virtual_machines.begin_create_or_update(
+ self.resource_group, virtual_machine.name, virtual_machine
+ )
+
+ # It is necesary extract from image_id data to create the VM with this format
+ # "image_reference": {
+ # "publisher": vm_reference["publisher"],
+ # "offer": vm_reference["offer"],
+ # "sku": vm_reference["sku"],
+ # "version": vm_reference["version"]
+ # },
def _get_image_reference(self, image_id):
try:
# The data input format example:
log_level=None,
config=config,
)
+
+ """
+ logger.debug("List images")
+ image = azure.get_image_list({"name": "Canonical:UbuntuServer:18.04-LTS:18.04.201809110"})
+ logger.debug("image: {}".format(image))
+
+ logger.debug("List networks")
+ network_list = azure.get_network_list({"name": "internal"})
+ logger.debug("Network_list: {}".format(network_list))
+
+ logger.debug("List flavors")
+ flavors = azure.get_flavor_id_from_data({"vcpus": 2})
+ logger.debug("flavors: {}".format(flavors))
+ """
+
+ """
+ # Create network and test machine
+ #new_network_id, _ = azure.new_network("testnet1", "data")
+ new_network_id = ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers")
+ "/Microsoft.Network/virtualNetworks/osm_vnet/subnets/testnet1"
+ ).format(test_params["resource_group"])
+ logger.debug("new_network_id: {}".format(new_network_id))
+
+ logger.debug("Delete network")
+ new_network_id = azure.delete_network(new_network_id)
+ logger.debug("deleted network_id: {}".format(new_network_id))
+ """
+
+ """
+ logger.debug("List networks")
+ network_list = azure.get_network_list({"name": "internal"})
+ logger.debug("Network_list: {}".format(network_list))
+
+ logger.debug("Show machine isabelvm")
+ vmachine = azure.get_vminstance( ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}"
+ "/providers/Microsoft.Compute/virtualMachines/isabelVM"
+ ).format(test_params["resource_group"])
+ )
+ logger.debug("Vmachine: {}".format(vmachine))
+ """
+
+ """
+ logger.debug("List images")
+ image = azure.get_image_list({"name": "Canonical:UbuntuServer:16.04"})
+ # image = azure.get_image_list({"name": "Canonical:UbuntuServer:18.04-LTS"})
+ logger.debug("image: {}".format(image))
+ """
+
+ """
+ # Create network and test machine
+ new_network_id, _ = azure.new_network("testnet1", "data")
+ image_id = ("/Subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/Providers/Microsoft.Compute"
+ "/Locations/northeurope/Publishers/Canonical/ArtifactTypes/VMImage/Offers/UbuntuServer"
+ "/Skus/18.04-LTS/Versions/18.04.201809110")
+ """
+ """
+
+ network_id = ("subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}
+ "/providers/Microsoft.Network/virtualNetworks/osm_vnet/subnets/internal"
+ ).format(test_params["resource_group"])
+ """
+
+ """
+ logger.debug("Create machine")
+ image_id = ("/Subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/Providers/Microsoft.Compute/Locations"
+ "/northeurope/Publishers/Canonical/ArtifactTypes/VMImage/Offers/UbuntuServer/Skus/18.04-LTS"
+ "/Versions/18.04.202103151")
+ cloud_config = {"user-data": (
+ "#cloud-config\n"
+ "password: osm4u\n"
+ "chpasswd: { expire: False }\n"
+ "ssh_pwauth: True\n\n"
+ "write_files:\n"
+ "- content: |\n"
+ " # My new helloworld file\n\n"
+ " owner: root:root\n"
+ " permissions: '0644'\n"
+ " path: /root/helloworld.txt",
+ "key-pairs": [
+ ("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/p7fuw/W0+6uhx9XNPY4dN/K2cXZweDfjJN8W/sQ1AhKvn"
+ "j0MF+dbBdsd2tfq6XUhx5LiKoGTunRpRonOw249ivH7pSyNN7FYpdLaij7Krn3K+QRNEOahMI4eoqdglVftA3"
+ "vlw4Oe/aZOU9BXPdRLxfr9hRKzg5zkK91/LBkEViAijpCwK6ODPZLDDUwY4iihYK9R5eZ3fmM4+3k3Jd0hPRk"
+ "B5YbtDQOu8ASWRZ9iTAWqr1OwQmvNc6ohSVg1tbq3wSxj/5bbz0J24A7TTpY0giWctne8Qkl/F2e0ZSErvbBB"
+ "GXKxfnq7sc23OK1hPxMAuS+ufzyXsnL1+fB4t2iF azureuser@osm-test-client\n"
+ )]
+ }
+ network_id = ("subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers"
+ "/Microsoft.Network/virtualNetworks/osm_vnet/subnets/internal"
+ ).format(test_params["resource_group"])
+ vm = azure.new_vminstance(name="isabelvm",
+ description="testvm",
+ start=True,
+ image_id=image_id,
+ flavor_id="Standard_B1ls",
+ net_list = [{"net_id": network_id, "name": "internal", "use": "mgmt", "floating_ip":True}],
+ cloud_config = cloud_config)
+ logger.debug("vm: {}".format(vm))
+ """
+
+ """
+ # Delete nonexistent vm
+ try:
+ logger.debug("Delete machine")
+ vm_id = ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Compute/"
+ "virtualMachines/isabelvm"
+ ).format(test_params["resource_group"])
+ created_items = {
+ ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Network"
+ "/networkInterfaces/isabelvm-nic-0"
+ ).format(test_params["resource_group"]): True,
+ ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Network"
+ "/publicIPAddresses/isabelvm-nic-0-public-ip"
+ ).format(test_params["resource_group"]): True
+ }
+ azure.delete_vminstance(vm_id, created_items)
+ except vimconn.VimConnNotFoundException as e:
+ print("Ok: excepcion no encontrada")
+ """
+
+ """
+ network_id = ("/subscriptions/5c1a2458-dfde-4adf-a4e3-08fa0e21d171/resourceGroups/{}/providers/Microsoft.Network"
+ "/virtualNetworks/osm_vnet/subnets/hfcloudinit-internal-1"
+ ).format(test_params["resource_group"])
+ azure.delete_network(network_id)
+ """
from random import choice as random_choice
import time
+from cryptography.hazmat.backends import default_backend as crypto_default_backend
+from cryptography.hazmat.primitives import serialization as crypto_serialization
+from cryptography.hazmat.primitives.asymmetric import rsa
from google.oauth2 import service_account
import googleapiclient.discovery
from osm_ro_plugin import vimconn
except Exception as e:
self._format_vimconn_exception(e)
+ def delete_inuse_nic(self, nic_name):
+ raise vimconn.VimConnNotImplemented("Not necessary")
+
def delete_image(self, image_id):
raise vimconn.VimConnNotImplemented("Not implemented")
# either password of ssh-keys are required
# we will always use ssh-keys, in case it is not available we will generate it
+ """
+ if cloud_config and cloud_config.get("key-pairs"):
+ key_data = ""
+ key_pairs = {}
+ if cloud_config.get("key-pairs"):
+ if isinstance(cloud_config["key-pairs"], list):
+ # Transform the format "<key> <user@host>" into "<user>:<key>"
+ key_data = ""
+ for key in cloud_config.get("key-pairs"):
+ key_data = key_data + key + "\n"
+ key_pairs = {
+ "key": "ssh-keys",
+ "value": key_data
+ }
+ else:
+ # If there is no ssh key in cloud config, a new key is generated:
+ _, key_data = self._generate_keys()
+ key_pairs = {
+ "key": "ssh-keys",
+ "value": "" + key_data
+ }
+ self.logger.debug("generated keys: %s", key_data)
+
+ metadata["items"].append(key_pairs)
+ """
self.logger.debug("metadata: %s", metadata)
return metadata
+ def _generate_keys(self):
+ """Method used to generate a pair of private/public keys.
+ This method is used because to create a vm in Azure we always need a key or a password
+ In some cases we may have a password in a cloud-init file but it may not be available
+ """
+ key = rsa.generate_private_key(
+ backend=crypto_default_backend(), public_exponent=65537, key_size=2048
+ )
+ private_key = key.private_bytes(
+ crypto_serialization.Encoding.PEM,
+ crypto_serialization.PrivateFormat.PKCS8,
+ crypto_serialization.NoEncryption(),
+ )
+ public_key = key.public_key().public_bytes(
+ crypto_serialization.Encoding.OpenSSH,
+ crypto_serialization.PublicFormat.OpenSSH,
+ )
+ private_key = private_key.decode("utf8")
+ # Change first line because Paramiko needs a explicit start with 'BEGIN RSA PRIVATE KEY'
+ i = private_key.find("\n")
+ private_key = "-----BEGIN RSA PRIVATE KEY-----" + private_key[i:]
+ public_key = public_key.decode("utf8")
+
+ return private_key, public_key
+
+ def _get_unused_vm_name(self, vm_name):
+ """
+ Checks the vm name and in case it is used adds a suffix to the name to allow creation
+ :return:
+ """
+ all_vms = (
+ self.conn_compute.instances()
+ .list(project=self.project, zone=self.zone)
+ .execute()
+ )
+ # Filter to vms starting with the indicated name
+ vms = list(filter(lambda vm: (vm.name.startswith(vm_name)), all_vms))
+ vm_names = [str(vm.name) for vm in vms]
+
+ # get the name with the first not used suffix
+ name_suffix = 0
+ # name = subnet_name + "-" + str(name_suffix)
+ name = vm_name # first subnet created will have no prefix
+
+ while name in vm_names:
+ name_suffix += 1
+ name = vm_name + "-" + str(name_suffix)
+
+ return name
+
def get_vminstance(self, vm_id):
"""
Obtaing the vm instance data from v_id
else:
self._format_vimconn_exception(e)
+ def _get_net_name_from_resource_id(self, resource_id):
+ try:
+ net_name = str(resource_id.split("/")[-1])
+
+ return net_name
+ except Exception:
+ raise vimconn.VimConnException(
+ "Unable to get google cloud net_name from invalid resource_id format '{}'".format(
+ resource_id
+ )
+ )
+
def _get_resource_name_from_resource_id(self, resource_id):
"""
Obtains resource_name from the google cloud complete identifier: resource_name will always be last item
)
self._format_vimconn_exception(e)
+ def _get_default_admin_user(self, image_id):
+ if "ubuntu" in image_id.lower():
+ return "ubuntu"
+ else:
+ return self._default_admin_user
+
def _create_firewall_rules(self, network):
"""
Creates the necessary firewall rules to allow the traffic in the network
This module contains unit tests for the OpenStack VIM connector
Run this directly with python2 or python3.
"""
+import copy
from copy import deepcopy
import logging
import unittest
+import mock
from mock import MagicMock, patch
+from neutronclient.v2_0.client import Client
from novaclient import exceptions as nvExceptions
from novaclient.exceptions import ClientException, Conflict
+from osm_ro_plugin import vimconn
from osm_ro_plugin.vimconn import (
VimConnConnectionException,
VimConnException,
}
+class TestSfcOperations(unittest.TestCase):
+ @mock.patch("logging.getLogger", autospec=True)
+ def setUp(self, mock_logger):
+ # Instantiate dummy VIM connector so we can test it
+ # It throws exception because of dummy parameters,
+ # We are disabling the logging of exception not to print them to console.
+ mock_logger = logging.getLogger()
+ mock_logger.disabled = True
+ self.vimconn = vimconnector(
+ "123",
+ "openstackvim",
+ "456",
+ "789",
+ "http://dummy.url",
+ None,
+ "user",
+ "pass",
+ )
+
+ def _test_new_sfi(
+ self,
+ create_sfc_port_pair,
+ sfc_encap,
+ ingress_ports=["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
+ egress_ports=["230cdf1b-de37-4891-bc07-f9010cf1f967"],
+ ):
+ # input to VIM connector
+ name = "osm_sfi"
+ # + ingress_ports
+ # + egress_ports
+ # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
+ correlation = "nsh"
+ if sfc_encap is not None:
+ if not sfc_encap:
+ correlation = None
+
+ # what OpenStack is assumed to respond (patch OpenStack"s return value)
+ dict_from_neutron = {
+ "port_pair": {
+ "id": "3d7ddc13-923c-4332-971e-708ed82902ce",
+ "name": name,
+ "description": "",
+ "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "ingress": ingress_ports[0] if len(ingress_ports) else None,
+ "egress": egress_ports[0] if len(egress_ports) else None,
+ "service_function_parameters": {"correlation": correlation},
+ }
+ }
+ create_sfc_port_pair.return_value = dict_from_neutron
+
+ # what the VIM connector is expected to
+ # send to OpenStack based on the input
+ dict_to_neutron = {
+ "port_pair": {
+ "name": name,
+ "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "egress": "230cdf1b-de37-4891-bc07-f9010cf1f967",
+ "service_function_parameters": {"correlation": correlation},
+ }
+ }
+
+ # call the VIM connector
+ if sfc_encap is None:
+ result = self.vimconn.new_sfi(name, ingress_ports, egress_ports)
+ else:
+ result = self.vimconn.new_sfi(name, ingress_ports, egress_ports, sfc_encap)
+
+ # assert that the VIM connector made the expected call to OpenStack
+ create_sfc_port_pair.assert_called_with(dict_to_neutron)
+ # assert that the VIM connector had the expected result / return value
+ self.assertEqual(result, dict_from_neutron["port_pair"]["id"])
+
+ def _test_new_sf(self, create_sfc_port_pair_group):
+ # input to VIM connector
+ name = "osm_sf"
+ instances = [
+ "bbd01220-cf72-41f2-9e70-0669c2e5c4cd",
+ "12ba215e-3987-4892-bd3a-d0fd91eecf98",
+ "e25a7c79-14c8-469a-9ae1-f601c9371ffd",
+ ]
+
+ # what OpenStack is assumed to respond (patch OpenStack"s return value)
+ dict_from_neutron = {
+ "port_pair_group": {
+ "id": "3d7ddc13-923c-4332-971e-708ed82902ce",
+ "name": name,
+ "description": "",
+ "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "port_pairs": instances,
+ "group_id": 1,
+ "port_pair_group_parameters": {
+ "lb_fields": [],
+ "ppg_n_tuple_mapping": {
+ "ingress_n_tuple": {},
+ "egress_n_tuple": {},
+ },
+ },
+ }
+ }
+ create_sfc_port_pair_group.return_value = dict_from_neutron
+
+ # what the VIM connector is expected to
+ # send to OpenStack based on the input
+ dict_to_neutron = {
+ "port_pair_group": {
+ "name": name,
+ "port_pairs": [
+ "bbd01220-cf72-41f2-9e70-0669c2e5c4cd",
+ "12ba215e-3987-4892-bd3a-d0fd91eecf98",
+ "e25a7c79-14c8-469a-9ae1-f601c9371ffd",
+ ],
+ }
+ }
+
+ # call the VIM connector
+ result = self.vimconn.new_sf(name, instances)
+
+ # assert that the VIM connector made the expected call to OpenStack
+ create_sfc_port_pair_group.assert_called_with(dict_to_neutron)
+ # assert that the VIM connector had the expected result / return value
+ self.assertEqual(result, dict_from_neutron["port_pair_group"]["id"])
+
+ def _test_new_sfp(self, create_sfc_port_chain, sfc_encap, spi):
+ # input to VIM connector
+ name = "osm_sfp"
+ classifications = [
+ "2bd2a2e5-c5fd-4eac-a297-d5e255c35c19",
+ "00f23389-bdfa-43c2-8b16-5815f2582fa8",
+ ]
+ sfs = [
+ "2314daec-c262-414a-86e3-69bb6fa5bc16",
+ "d8bfdb5d-195e-4f34-81aa-6135705317df",
+ ]
+
+ # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround)
+ correlation = "nsh"
+ chain_id = 33
+ if spi:
+ chain_id = spi
+
+ # what OpenStack is assumed to respond (patch OpenStack"s return value)
+ dict_from_neutron = {
+ "port_chain": {
+ "id": "5bc05721-079b-4b6e-a235-47cac331cbb6",
+ "name": name,
+ "description": "",
+ "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c",
+ "chain_id": chain_id,
+ "flow_classifiers": classifications,
+ "port_pair_groups": sfs,
+ "chain_parameters": {"correlation": correlation},
+ }
+ }
+ create_sfc_port_chain.return_value = dict_from_neutron
+
+ # what the VIM connector is expected to
+ # send to OpenStack based on the input
+ dict_to_neutron = {
+ "port_chain": {
+ "name": name,
+ "flow_classifiers": [
+ "2bd2a2e5-c5fd-4eac-a297-d5e255c35c19",
+ "00f23389-bdfa-43c2-8b16-5815f2582fa8",
+ ],
+ "port_pair_groups": [
+ "2314daec-c262-414a-86e3-69bb6fa5bc16",
+ "d8bfdb5d-195e-4f34-81aa-6135705317df",
+ ],
+ "chain_parameters": {"correlation": correlation},
+ }
+ }
+ if spi:
+ dict_to_neutron["port_chain"]["chain_id"] = spi
+
+ # call the VIM connector
+ if sfc_encap is None:
+ dict_to_neutron["port_chain"]["chain_parameters"] = {"correlation": "mpls"}
+ if spi is None:
+ result = self.vimconn.new_sfp(
+ name, classifications, sfs, sfc_encap=False
+ )
+ else:
+ result = self.vimconn.new_sfp(
+ name, classifications, sfs, sfc_encap=False, spi=spi
+ )
+ else:
+ if spi is None:
+ result = self.vimconn.new_sfp(name, classifications, sfs, sfc_encap)
+ else:
+ result = self.vimconn.new_sfp(
+ name, classifications, sfs, sfc_encap, spi
+ )
+
+ # assert that the VIM connector made the expected call to OpenStack
+ create_sfc_port_chain.assert_called_with(dict_to_neutron)
+ # assert that the VIM connector had the expected result / return value
+ self.assertEqual(result, dict_from_neutron["port_chain"]["id"])
+
+ def _test_new_classification(self, create_sfc_flow_classifier, ctype):
+ # input to VIM connector
+ name = "osm_classification"
+ definition = {
+ "ethertype": "IPv4",
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ "protocol": "tcp",
+ "source_ip_prefix": "192.168.2.0/24",
+ "source_port_range_max": 99,
+ "source_port_range_min": 50,
+ }
+
+ # what OpenStack is assumed to respond (patch OpenStack"s return value)
+ dict_from_neutron = {"flow_classifier": copy.copy(definition)}
+ dict_from_neutron["flow_classifier"][
+ "id"
+ ] = "7735ec2c-fddf-4130-9712-32ed2ab6a372"
+ dict_from_neutron["flow_classifier"]["name"] = name
+ dict_from_neutron["flow_classifier"]["description"] = ""
+ dict_from_neutron["flow_classifier"][
+ "tenant_id"
+ ] = "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c"
+ dict_from_neutron["flow_classifier"][
+ "project_id"
+ ] = "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c"
+ create_sfc_flow_classifier.return_value = dict_from_neutron
+
+ # what the VIM connector is expected to
+ # send to OpenStack based on the input
+ dict_to_neutron = {"flow_classifier": copy.copy(definition)}
+ dict_to_neutron["flow_classifier"]["name"] = "osm_classification"
+
+ # call the VIM connector
+ result = self.vimconn.new_classification(name, ctype, definition)
+
+ # assert that the VIM connector made the expected call to OpenStack
+ create_sfc_flow_classifier.assert_called_with(dict_to_neutron)
+ # assert that the VIM connector had the expected result / return value
+ self.assertEqual(result, dict_from_neutron["flow_classifier"]["id"])
+
+ @mock.patch.object(Client, "create_sfc_flow_classifier")
+ def test_new_classification(self, create_sfc_flow_classifier):
+ self._test_new_classification(
+ create_sfc_flow_classifier, "legacy_flow_classifier"
+ )
+
+ @mock.patch.object(Client, "create_sfc_flow_classifier")
+ def test_new_classification_unsupported_type(self, create_sfc_flow_classifier):
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_classification,
+ create_sfc_flow_classifier,
+ "h265",
+ )
+
+ @mock.patch.object(Client, "create_sfc_port_pair")
+ def test_new_sfi_with_sfc_encap(self, create_sfc_port_pair):
+ self._test_new_sfi(create_sfc_port_pair, True)
+
+ @mock.patch.object(Client, "create_sfc_port_pair")
+ def test_new_sfi_without_sfc_encap(self, create_sfc_port_pair):
+ self._test_new_sfi(create_sfc_port_pair, False)
+
+ @mock.patch.object(Client, "create_sfc_port_pair")
+ def test_new_sfi_default_sfc_encap(self, create_sfc_port_pair):
+ self._test_new_sfi(create_sfc_port_pair, None)
+
+ @mock.patch.object(Client, "create_sfc_port_pair")
+ def test_new_sfi_bad_ingress_ports(self, create_sfc_port_pair):
+ ingress_ports = [
+ "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "a0273f64-82c9-11e7-b08f-6328e53f0fa7",
+ ]
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_sfi,
+ create_sfc_port_pair,
+ True,
+ ingress_ports=ingress_ports,
+ )
+ ingress_ports = []
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_sfi,
+ create_sfc_port_pair,
+ True,
+ ingress_ports=ingress_ports,
+ )
+
+ @mock.patch.object(Client, "create_sfc_port_pair")
+ def test_new_sfi_bad_egress_ports(self, create_sfc_port_pair):
+ egress_ports = [
+ "230cdf1b-de37-4891-bc07-f9010cf1f967",
+ "b41228fe-82c9-11e7-9b44-17504174320b",
+ ]
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_sfi,
+ create_sfc_port_pair,
+ True,
+ egress_ports=egress_ports,
+ )
+ egress_ports = []
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_sfi,
+ create_sfc_port_pair,
+ True,
+ egress_ports=egress_ports,
+ )
+
+ @mock.patch.object(vimconnector, "get_sfi")
+ @mock.patch.object(Client, "create_sfc_port_pair_group")
+ def test_new_sf(self, create_sfc_port_pair_group, get_sfi):
+ get_sfi.return_value = {"sfc_encap": True}
+ self._test_new_sf(create_sfc_port_pair_group)
+
+ @mock.patch.object(vimconnector, "get_sfi")
+ @mock.patch.object(Client, "create_sfc_port_pair_group")
+ def test_new_sf_inconsistent_sfc_encap(self, create_sfc_port_pair_group, get_sfi):
+ get_sfi.return_value = {"sfc_encap": "nsh"}
+ self.assertRaises(
+ vimconn.VimConnNotSupportedException,
+ self._test_new_sf,
+ create_sfc_port_pair_group,
+ )
+
+ @mock.patch.object(Client, "create_sfc_port_chain")
+ def test_new_sfp_with_sfc_encap(self, create_sfc_port_chain):
+ self._test_new_sfp(create_sfc_port_chain, True, None)
+
+ @mock.patch.object(Client, "create_sfc_port_chain")
+ def test_new_sfp_without_sfc_encap(self, create_sfc_port_chain):
+ self._test_new_sfp(create_sfc_port_chain, None, None)
+ self._test_new_sfp(create_sfc_port_chain, None, 25)
+
+ @mock.patch.object(Client, "create_sfc_port_chain")
+ def test_new_sfp_default_sfc_encap(self, create_sfc_port_chain):
+ self._test_new_sfp(create_sfc_port_chain, None, None)
+
+ @mock.patch.object(Client, "create_sfc_port_chain")
+ def test_new_sfp_with_sfc_encap_spi(self, create_sfc_port_chain):
+ self._test_new_sfp(create_sfc_port_chain, True, 25)
+
+ @mock.patch.object(Client, "create_sfc_port_chain")
+ def test_new_sfp_default_sfc_encap_spi(self, create_sfc_port_chain):
+ self._test_new_sfp(create_sfc_port_chain, None, 25)
+
+ @mock.patch.object(Client, "list_sfc_flow_classifiers")
+ def test_get_classification_list(self, list_sfc_flow_classifiers):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_flow_classifiers.return_value = {
+ "flow_classifiers": [
+ {
+ "source_port_range_min": 2000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "description": "",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 2000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "destination_port_range_max": None,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
+ "name": "fc1",
+ }
+ ]
+ }
+
+ # call the VIM connector
+ filter_dict = {"protocol": "tcp", "ethertype": "IPv4"}
+ result = self.vimconn.get_classification_list(filter_dict.copy())
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_flow_classifiers.assert_called_with(**filter_dict)
+ # assert that the VIM connector successfully
+ # translated and returned the OpenStack result
+ self.assertEqual(
+ result,
+ [
+ {
+ "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
+ "name": "fc1",
+ "description": "",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "ctype": "legacy_flow_classifier",
+ "definition": {
+ "source_port_range_min": 2000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 2000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "destination_port_range_max": None,
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ },
+ }
+ ],
+ )
+
+ def _test_get_sfi_list(self, list_port_pair, correlation, sfc_encap):
+ # what OpenStack is assumed to return to the VIM connector
+ list_port_pair.return_value = {
+ "port_pairs": [
+ {
+ "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "service_function_parameters": {"correlation": correlation},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
+ "name": "osm_sfi",
+ }
+ ]
+ }
+
+ # call the VIM connector
+ filter_dict = {"name": "osm_sfi", "description": ""}
+ result = self.vimconn.get_sfi_list(filter_dict.copy())
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_port_pair.assert_called_with(**filter_dict)
+ # assert that the VIM connector successfully
+ # translated and returned the OpenStack result
+ self.assertEqual(
+ result,
+ [
+ {
+ "ingress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "egress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
+ "sfc_encap": sfc_encap,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
+ "name": "osm_sfi",
+ }
+ ],
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_pairs")
+ def test_get_sfi_list_with_sfc_encap(self, list_sfc_port_pairs):
+ self._test_get_sfi_list(list_sfc_port_pairs, "nsh", True)
+
+ @mock.patch.object(Client, "list_sfc_port_pairs")
+ def test_get_sfi_list_without_sfc_encap(self, list_sfc_port_pairs):
+ self._test_get_sfi_list(list_sfc_port_pairs, None, False)
+
+ @mock.patch.object(Client, "list_sfc_port_pair_groups")
+ def test_get_sf_list(self, list_sfc_port_pair_groups):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_port_pair_groups.return_value = {
+ "port_pair_groups": [
+ {
+ "port_pairs": [
+ "08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2",
+ "0d63799c-82d6-11e7-8deb-a746bb3ae9f5",
+ ],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "port_pair_group_parameters": {},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "f4a0bde8-82d5-11e7-90e1-a72b762fa27f",
+ "name": "osm_sf",
+ }
+ ]
+ }
+
+ # call the VIM connector
+ filter_dict = {"name": "osm_sf", "description": ""}
+ result = self.vimconn.get_sf_list(filter_dict.copy())
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_port_pair_groups.assert_called_with(**filter_dict)
+ # assert that the VIM connector successfully
+ # translated and returned the OpenStack result
+ self.assertEqual(
+ result,
+ [
+ {
+ "sfis": [
+ "08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2",
+ "0d63799c-82d6-11e7-8deb-a746bb3ae9f5",
+ ],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "f4a0bde8-82d5-11e7-90e1-a72b762fa27f",
+ "name": "osm_sf",
+ }
+ ],
+ )
+
+ def _test_get_sfp_list(self, list_sfc_port_chains, correlation, sfc_encap):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_port_chains.return_value = {
+ "port_chains": [
+ {
+ "port_pair_groups": [
+ "7d8e3bf8-82d6-11e7-a032-8ff028839d25",
+ "7dc9013e-82d6-11e7-a5a6-a3a8d78a5518",
+ ],
+ "flow_classifiers": [
+ "1333c2f4-82d7-11e7-a5df-9327f33d104e",
+ "1387ab44-82d7-11e7-9bb0-476337183905",
+ ],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "chain_parameters": {"correlation": correlation},
+ "chain_id": 40,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
+ "name": "osm_sfp",
+ }
+ ]
+ }
+
+ # call the VIM connector
+ filter_dict = {"name": "osm_sfp", "description": ""}
+ result = self.vimconn.get_sfp_list(filter_dict.copy())
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_port_chains.assert_called_with(**filter_dict)
+ # assert that the VIM connector successfully
+ # translated and returned the OpenStack result
+ self.assertEqual(
+ result,
+ [
+ {
+ "service_functions": [
+ "7d8e3bf8-82d6-11e7-a032-8ff028839d25",
+ "7dc9013e-82d6-11e7-a5a6-a3a8d78a5518",
+ ],
+ "classifications": [
+ "1333c2f4-82d7-11e7-a5df-9327f33d104e",
+ "1387ab44-82d7-11e7-9bb0-476337183905",
+ ],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "sfc_encap": sfc_encap,
+ "spi": 40,
+ "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
+ "name": "osm_sfp",
+ }
+ ],
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_chains")
+ def test_get_sfp_list_with_sfc_encap(self, list_sfc_port_chains):
+ self._test_get_sfp_list(list_sfc_port_chains, "nsh", True)
+
+ @mock.patch.object(Client, "list_sfc_port_chains")
+ def test_get_sfp_list_without_sfc_encap(self, list_sfc_port_chains):
+ self._test_get_sfp_list(list_sfc_port_chains, None, False)
+
+ @mock.patch.object(Client, "list_sfc_flow_classifiers")
+ def test_get_classification(self, list_sfc_flow_classifiers):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_flow_classifiers.return_value = {
+ "flow_classifiers": [
+ {
+ "source_port_range_min": 2000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "description": "",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 2000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "destination_port_range_max": None,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
+ "name": "fc1",
+ }
+ ]
+ }
+
+ # call the VIM connector
+ result = self.vimconn.get_classification("22198366-d4e8-4d6b-b4d2-637d5d6cbb7d")
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_flow_classifiers.assert_called_with(
+ id="22198366-d4e8-4d6b-b4d2-637d5d6cbb7d"
+ )
+ # assert that VIM connector successfully returned the OpenStack result
+ self.assertEqual(
+ result,
+ {
+ "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
+ "name": "fc1",
+ "description": "",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "ctype": "legacy_flow_classifier",
+ "definition": {
+ "source_port_range_min": 2000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 2000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "destination_port_range_max": None,
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ },
+ },
+ )
+
+ @mock.patch.object(Client, "list_sfc_flow_classifiers")
+ def test_get_classification_many_results(self, list_sfc_flow_classifiers):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_flow_classifiers.return_value = {
+ "flow_classifiers": [
+ {
+ "source_port_range_min": 2000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "description": "",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 2000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "destination_port_range_max": None,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d",
+ "name": "fc1",
+ },
+ {
+ "source_port_range_min": 1000,
+ "destination_ip_prefix": "192.168.3.0/24",
+ "protocol": "udp",
+ "description": "",
+ "ethertype": "IPv4",
+ "l7_parameters": {},
+ "source_port_range_max": 1000,
+ "destination_port_range_min": 3000,
+ "source_ip_prefix": "192.168.2.0/24",
+ "logical_destination_port": None,
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "destination_port_range_max": None,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b",
+ "id": "3196bafc-82dd-11e7-a205-9bf6c14b0721",
+ "name": "fc2",
+ },
+ ]
+ }
+
+ # call the VIM connector
+ self.assertRaises(
+ vimconn.VimConnConflictException,
+ self.vimconn.get_classification,
+ "3196bafc-82dd-11e7-a205-9bf6c14b0721",
+ )
+
+ # assert the VIM connector called OpenStack with the expected filter
+ list_sfc_flow_classifiers.assert_called_with(
+ id="3196bafc-82dd-11e7-a205-9bf6c14b0721"
+ )
+
+ @mock.patch.object(Client, "list_sfc_flow_classifiers")
+ def test_get_classification_no_results(self, list_sfc_flow_classifiers):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_flow_classifiers.return_value = {"flow_classifiers": []}
+
+ # call the VIM connector
+ self.assertRaises(
+ vimconn.VimConnNotFoundException,
+ self.vimconn.get_classification,
+ "3196bafc-82dd-11e7-a205-9bf6c14b0721",
+ )
+
+ # assert the VIM connector called OpenStack with the expected filter
+ list_sfc_flow_classifiers.assert_called_with(
+ id="3196bafc-82dd-11e7-a205-9bf6c14b0721"
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_pairs")
+ def test_get_sfi(self, list_sfc_port_pairs):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_port_pairs.return_value = {
+ "port_pairs": [
+ {
+ "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "service_function_parameters": {"correlation": "nsh"},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
+ "name": "osm_sfi1",
+ },
+ ]
+ }
+
+ # call the VIM connector
+ result = self.vimconn.get_sfi("c121ebdd-7f2d-4213-b933-3325298a6966")
+
+ # assert the VIM connector called OpenStack with the expected filter
+ list_sfc_port_pairs.assert_called_with(
+ id="c121ebdd-7f2d-4213-b933-3325298a6966"
+ )
+ # assert the VIM connector successfully returned the OpenStack result
+ self.assertEqual(
+ result,
+ {
+ "ingress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
+ "egress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"],
+ "sfc_encap": True,
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
+ "name": "osm_sfi1",
+ },
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_pairs")
+ def test_get_sfi_many_results(self, list_sfc_port_pairs):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_port_pairs.return_value = {
+ "port_pairs": [
+ {
+ "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "service_function_parameters": {"correlation": "nsh"},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c121ebdd-7f2d-4213-b933-3325298a6966",
+ "name": "osm_sfi1",
+ },
+ {
+ "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc",
+ "service_function_parameters": {"correlation": "nsh"},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "c0436d92-82db-11e7-8f9c-5fa535f1261f",
+ "name": "osm_sfi2",
+ },
+ ]
+ }
+
+ # call the VIM connector
+ self.assertRaises(
+ vimconn.VimConnConflictException,
+ self.vimconn.get_sfi,
+ "c0436d92-82db-11e7-8f9c-5fa535f1261f",
+ )
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_port_pairs.assert_called_with(
+ id="c0436d92-82db-11e7-8f9c-5fa535f1261f"
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_pairs")
+ def test_get_sfi_no_results(self, list_sfc_port_pairs):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_port_pairs.return_value = {"port_pairs": []}
+
+ # call the VIM connector
+ self.assertRaises(
+ vimconn.VimConnNotFoundException,
+ self.vimconn.get_sfi,
+ "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
+ )
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_port_pairs.assert_called_with(
+ id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_pair_groups")
+ def test_get_sf(self, list_sfc_port_pair_groups):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_port_pair_groups.return_value = {
+ "port_pair_groups": [
+ {
+ "port_pairs": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "port_pair_group_parameters": {},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d",
+ "name": "osm_sf1",
+ }
+ ]
+ }
+
+ # call the VIM connector
+ result = self.vimconn.get_sf("b22892fc-82d9-11e7-ae85-0fea6a3b3757")
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_port_pair_groups.assert_called_with(
+ id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
+ )
+ # assert that VIM connector successfully returned the OpenStack result
+ self.assertEqual(
+ result,
+ {
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "sfis": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"],
+ "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d",
+ "name": "osm_sf1",
+ },
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_pair_groups")
+ def test_get_sf_many_results(self, list_sfc_port_pair_groups):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_port_pair_groups.return_value = {
+ "port_pair_groups": [
+ {
+ "port_pairs": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "port_pair_group_parameters": {},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d",
+ "name": "osm_sf1",
+ },
+ {
+ "port_pairs": ["0d63799c-82d6-11e7-8deb-a746bb3ae9f5"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "port_pair_group_parameters": {},
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
+ "name": "osm_sf2",
+ },
+ ]
+ }
+
+ # call the VIM connector
+ self.assertRaises(
+ vimconn.VimConnConflictException,
+ self.vimconn.get_sf,
+ "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
+ )
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_port_pair_groups.assert_called_with(
+ id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_pair_groups")
+ def test_get_sf_no_results(self, list_sfc_port_pair_groups):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_port_pair_groups.return_value = {"port_pair_groups": []}
+
+ # call the VIM connector
+ self.assertRaises(
+ vimconn.VimConnNotFoundException,
+ self.vimconn.get_sf,
+ "b22892fc-82d9-11e7-ae85-0fea6a3b3757",
+ )
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_port_pair_groups.assert_called_with(
+ id="b22892fc-82d9-11e7-ae85-0fea6a3b3757"
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_chains")
+ def test_get_sfp(self, list_sfc_port_chains):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_port_chains.return_value = {
+ "port_chains": [
+ {
+ "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
+ "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "chain_parameters": {"correlation": "nsh"},
+ "chain_id": 40,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
+ "name": "osm_sfp1",
+ }
+ ]
+ }
+
+ # call the VIM connector
+ result = self.vimconn.get_sfp("821bc9be-82d7-11e7-8ce3-23a08a27ab47")
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_port_chains.assert_called_with(
+ id="821bc9be-82d7-11e7-8ce3-23a08a27ab47"
+ )
+ # assert that VIM connector successfully returned the OpenStack result
+ self.assertEqual(
+ result,
+ {
+ "service_functions": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
+ "classifications": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "sfc_encap": True,
+ "spi": 40,
+ "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
+ "name": "osm_sfp1",
+ },
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_chains")
+ def test_get_sfp_many_results(self, list_sfc_port_chains):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_port_chains.return_value = {
+ "port_chains": [
+ {
+ "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
+ "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "chain_parameters": {"correlation": "nsh"},
+ "chain_id": 40,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47",
+ "name": "osm_sfp1",
+ },
+ {
+ "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"],
+ "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"],
+ "description": "",
+ "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "chain_parameters": {"correlation": "nsh"},
+ "chain_id": 50,
+ "project_id": "8f3019ef06374fa880a0144ad4bc1d7b",
+ "id": "5d002f38-82de-11e7-a770-f303f11ce66a",
+ "name": "osm_sfp2",
+ },
+ ]
+ }
+
+ # call the VIM connector
+ self.assertRaises(
+ vimconn.VimConnConflictException,
+ self.vimconn.get_sfp,
+ "5d002f38-82de-11e7-a770-f303f11ce66a",
+ )
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_port_chains.assert_called_with(
+ id="5d002f38-82de-11e7-a770-f303f11ce66a"
+ )
+
+ @mock.patch.object(Client, "list_sfc_port_chains")
+ def test_get_sfp_no_results(self, list_sfc_port_chains):
+ # what OpenStack is assumed to return to the VIM connector
+ list_sfc_port_chains.return_value = {"port_chains": []}
+
+ # call the VIM connector
+ self.assertRaises(
+ vimconn.VimConnNotFoundException,
+ self.vimconn.get_sfp,
+ "5d002f38-82de-11e7-a770-f303f11ce66a",
+ )
+
+ # assert that VIM connector called OpenStack with the expected filter
+ list_sfc_port_chains.assert_called_with(
+ id="5d002f38-82de-11e7-a770-f303f11ce66a"
+ )
+
+ @mock.patch.object(Client, "delete_sfc_flow_classifier")
+ def test_delete_classification(self, delete_sfc_flow_classifier):
+ result = self.vimconn.delete_classification(
+ "638f957c-82df-11e7-b7c8-132706021464"
+ )
+ delete_sfc_flow_classifier.assert_called_with(
+ "638f957c-82df-11e7-b7c8-132706021464"
+ )
+ self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
+
+ @mock.patch.object(Client, "delete_sfc_port_pair")
+ def test_delete_sfi(self, delete_sfc_port_pair):
+ result = self.vimconn.delete_sfi("638f957c-82df-11e7-b7c8-132706021464")
+ delete_sfc_port_pair.assert_called_with("638f957c-82df-11e7-b7c8-132706021464")
+ self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
+
+ @mock.patch.object(Client, "delete_sfc_port_pair_group")
+ def test_delete_sf(self, delete_sfc_port_pair_group):
+ result = self.vimconn.delete_sf("638f957c-82df-11e7-b7c8-132706021464")
+ delete_sfc_port_pair_group.assert_called_with(
+ "638f957c-82df-11e7-b7c8-132706021464"
+ )
+ self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
+
+ @mock.patch.object(Client, "delete_sfc_port_chain")
+ def test_delete_sfp(self, delete_sfc_port_chain):
+ result = self.vimconn.delete_sfp("638f957c-82df-11e7-b7c8-132706021464")
+ delete_sfc_port_chain.assert_called_with("638f957c-82df-11e7-b7c8-132706021464")
+ self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464")
+
+
class Status:
def __init__(self, s):
self.status = s
)
)
+ def delete_user(self, user_id):
+ """Delete a user from openstack VIM
+ Returns the user identifier"""
+ if self.debug:
+ print("osconnector: Deleting a user from VIM")
+
+ try:
+ self._reload_connection()
+ self.keystone.users.delete(user_id)
+
+ return 1, user_id
+ except ksExceptions.ConnectionError as e:
+ error_value = -vimconn.HTTP_Bad_Request
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ )
+ except ksExceptions.NotFound as e:
+ error_value = -vimconn.HTTP_Not_Found
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ )
+ except ksExceptions.ClientException as e: # TODO remove
+ error_value = -vimconn.HTTP_Bad_Request
+ error_text = (
+ type(e).__name__
+ + ": "
+ + (str(e) if len(e.args) == 0 else str(e.args[0]))
+ )
+
+ # TODO insert exception vimconn.HTTP_Unauthorized
+ # if reaching here is because an exception
+ self.logger.debug("delete_tenant " + error_text)
+
+ return error_value, error_text
+
def get_hosts_info(self):
"""Get the information of deployed hosts
Returns the hosts content"""
return error_value, error_text
+ def new_classification(self, name, ctype, definition):
+ self.logger.debug(
+ "Adding a new (Traffic) Classification to VIM, named %s", name
+ )
+
+ try:
+ new_class = None
+ self._reload_connection()
+
+ if ctype not in supportedClassificationTypes:
+ raise vimconn.VimConnNotSupportedException(
+ "OpenStack VIM connector does not support provided "
+ "Classification Type {}, supported ones are: {}".format(
+ ctype, supportedClassificationTypes
+ )
+ )
+
+ if not self._validate_classification(ctype, definition):
+ raise vimconn.VimConnException(
+ "Incorrect Classification definition for the type specified."
+ )
+
+ classification_dict = definition
+ classification_dict["name"] = name
+ new_class = self.neutron.create_sfc_flow_classifier(
+ {"flow_classifier": classification_dict}
+ )
+
+ return new_class["flow_classifier"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self.logger.error("Creation of Classification failed.")
+ self._format_exception(e)
+
+ def get_classification(self, class_id):
+ self.logger.debug(" Getting Classification %s from VIM", class_id)
+ filter_dict = {"id": class_id}
+ class_list = self.get_classification_list(filter_dict)
+
+ if len(class_list) == 0:
+ raise vimconn.VimConnNotFoundException(
+ "Classification '{}' not found".format(class_id)
+ )
+ elif len(class_list) > 1:
+ raise vimconn.VimConnConflictException(
+ "Found more than one Classification with this criteria"
+ )
+
+ classification = class_list[0]
+
+ return classification
+
+ def get_classification_list(self, filter_dict={}):
+ self.logger.debug(
+ "Getting Classifications from VIM filter: '%s'", str(filter_dict)
+ )
+
+ try:
+ filter_dict_os = filter_dict.copy()
+ self._reload_connection()
+
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+ classification_dict = self.neutron.list_sfc_flow_classifiers(
+ **filter_dict_os
+ )
+ classification_list = classification_dict["flow_classifiers"]
+ self.__classification_os2mano(classification_list)
+
+ return classification_list
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def delete_classification(self, class_id):
+ self.logger.debug("Deleting Classification '%s' from VIM", class_id)
+
+ try:
+ self._reload_connection()
+ self.neutron.delete_sfc_flow_classifier(class_id)
+
+ return class_id
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
+ self.logger.debug(
+ "Adding a new Service Function Instance to VIM, named '%s'", name
+ )
+
+ try:
+ new_sfi = None
+ self._reload_connection()
+ correlation = None
+
+ if sfc_encap:
+ correlation = "nsh"
+
+ if len(ingress_ports) != 1:
+ raise vimconn.VimConnNotSupportedException(
+ "OpenStack VIM connector can only have 1 ingress port per SFI"
+ )
+
+ if len(egress_ports) != 1:
+ raise vimconn.VimConnNotSupportedException(
+ "OpenStack VIM connector can only have 1 egress port per SFI"
+ )
+
+ sfi_dict = {
+ "name": name,
+ "ingress": ingress_ports[0],
+ "egress": egress_ports[0],
+ "service_function_parameters": {"correlation": correlation},
+ }
+ new_sfi = self.neutron.create_sfc_port_pair({"port_pair": sfi_dict})
+
+ return new_sfi["port_pair"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ if new_sfi:
+ try:
+ self.neutron.delete_sfc_port_pair(new_sfi["port_pair"]["id"])
+ except Exception:
+ self.logger.error(
+ "Creation of Service Function Instance failed, with "
+ "subsequent deletion failure as well."
+ )
+
+ self._format_exception(e)
+
+ def get_sfi(self, sfi_id):
+ self.logger.debug("Getting Service Function Instance %s from VIM", sfi_id)
+ filter_dict = {"id": sfi_id}
+ sfi_list = self.get_sfi_list(filter_dict)
+
+ if len(sfi_list) == 0:
+ raise vimconn.VimConnNotFoundException(
+ "Service Function Instance '{}' not found".format(sfi_id)
+ )
+ elif len(sfi_list) > 1:
+ raise vimconn.VimConnConflictException(
+ "Found more than one Service Function Instance with this criteria"
+ )
+
+ sfi = sfi_list[0]
+
+ return sfi
+
+ def get_sfi_list(self, filter_dict={}):
+ self.logger.debug(
+ "Getting Service Function Instances from VIM filter: '%s'", str(filter_dict)
+ )
+
+ try:
+ self._reload_connection()
+ filter_dict_os = filter_dict.copy()
+
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+ sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
+ sfi_list = sfi_dict["port_pairs"]
+ self.__sfi_os2mano(sfi_list)
+
+ return sfi_list
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def delete_sfi(self, sfi_id):
+ self.logger.debug("Deleting Service Function Instance '%s' from VIM", sfi_id)
+
+ try:
+ self._reload_connection()
+ self.neutron.delete_sfc_port_pair(sfi_id)
+
+ return sfi_id
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def new_sf(self, name, sfis, sfc_encap=True):
+ self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
+
+ try:
+ new_sf = None
+ self._reload_connection()
+ # correlation = None
+ # if sfc_encap:
+ # correlation = "nsh"
+
+ for instance in sfis:
+ sfi = self.get_sfi(instance)
+
+ if sfi.get("sfc_encap") != sfc_encap:
+ raise vimconn.VimConnNotSupportedException(
+ "OpenStack VIM connector requires all SFIs of the "
+ "same SF to share the same SFC Encapsulation"
+ )
+
+ sf_dict = {"name": name, "port_pairs": sfis}
+ new_sf = self.neutron.create_sfc_port_pair_group(
+ {"port_pair_group": sf_dict}
+ )
+
+ return new_sf["port_pair_group"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ if new_sf:
+ try:
+ self.neutron.delete_sfc_port_pair_group(
+ new_sf["port_pair_group"]["id"]
+ )
+ except Exception:
+ self.logger.error(
+ "Creation of Service Function failed, with "
+ "subsequent deletion failure as well."
+ )
+
+ self._format_exception(e)
+
+ def get_sf(self, sf_id):
+ self.logger.debug("Getting Service Function %s from VIM", sf_id)
+ filter_dict = {"id": sf_id}
+ sf_list = self.get_sf_list(filter_dict)
+
+ if len(sf_list) == 0:
+ raise vimconn.VimConnNotFoundException(
+ "Service Function '{}' not found".format(sf_id)
+ )
+ elif len(sf_list) > 1:
+ raise vimconn.VimConnConflictException(
+ "Found more than one Service Function with this criteria"
+ )
+
+ sf = sf_list[0]
+
+ return sf
+
+ def get_sf_list(self, filter_dict={}):
+ self.logger.debug(
+ "Getting Service Function from VIM filter: '%s'", str(filter_dict)
+ )
+
+ try:
+ self._reload_connection()
+ filter_dict_os = filter_dict.copy()
+
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+ sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
+ sf_list = sf_dict["port_pair_groups"]
+ self.__sf_os2mano(sf_list)
+
+ return sf_list
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def delete_sf(self, sf_id):
+ self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
+
+ try:
+ self._reload_connection()
+ self.neutron.delete_sfc_port_pair_group(sf_id)
+
+ return sf_id
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
+ self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
+
+ try:
+ new_sfp = None
+ self._reload_connection()
+ # In networking-sfc the MPLS encapsulation is legacy
+ # should be used when no full SFC Encapsulation is intended
+ correlation = "mpls"
+
+ if sfc_encap:
+ correlation = "nsh"
+
+ sfp_dict = {
+ "name": name,
+ "flow_classifiers": classifications,
+ "port_pair_groups": sfs,
+ "chain_parameters": {"correlation": correlation},
+ }
+
+ if spi:
+ sfp_dict["chain_id"] = spi
+
+ new_sfp = self.neutron.create_sfc_port_chain({"port_chain": sfp_dict})
+
+ return new_sfp["port_chain"]["id"]
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ if new_sfp:
+ try:
+ self.neutron.delete_sfc_port_chain(new_sfp["port_chain"]["id"])
+ except Exception:
+ self.logger.error(
+ "Creation of Service Function Path failed, with "
+ "subsequent deletion failure as well."
+ )
+
+ self._format_exception(e)
+
+ def get_sfp(self, sfp_id):
+ self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
+
+ filter_dict = {"id": sfp_id}
+ sfp_list = self.get_sfp_list(filter_dict)
+
+ if len(sfp_list) == 0:
+ raise vimconn.VimConnNotFoundException(
+ "Service Function Path '{}' not found".format(sfp_id)
+ )
+ elif len(sfp_list) > 1:
+ raise vimconn.VimConnConflictException(
+ "Found more than one Service Function Path with this criteria"
+ )
+
+ sfp = sfp_list[0]
+
+ return sfp
+
+ def get_sfp_list(self, filter_dict={}):
+ self.logger.debug(
+ "Getting Service Function Paths from VIM filter: '%s'", str(filter_dict)
+ )
+
+ try:
+ self._reload_connection()
+ filter_dict_os = filter_dict.copy()
+
+ if self.api_version3 and "tenant_id" in filter_dict_os:
+ filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id")
+
+ sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
+ sfp_list = sfp_dict["port_chains"]
+ self.__sfp_os2mano(sfp_list)
+
+ return sfp_list
+ except (
+ neExceptions.ConnectionFailed,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def delete_sfp(self, sfp_id):
+ self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
+
+ try:
+ self._reload_connection()
+ self.neutron.delete_sfc_port_chain(sfp_id)
+
+ return sfp_id
+ except (
+ neExceptions.ConnectionFailed,
+ neExceptions.NeutronException,
+ ksExceptions.ClientException,
+ neExceptions.NeutronException,
+ ConnectionError,
+ ) as e:
+ self._format_exception(e)
+
+ def refresh_sfps_status(self, sfp_list):
+ """Get the status of the service function path
+ Params: the list of sfp identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function path
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
+ """
+ sfp_dict = {}
+ self.logger.debug(
+ "refresh_sfps status: Getting tenant SFP information from VIM"
+ )
+
+ for sfp_id in sfp_list:
+ sfp = {}
+
+ try:
+ sfp_vim = self.get_sfp(sfp_id)
+
+ if sfp_vim["spi"]:
+ sfp["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sfp["status"] = "OTHER"
+ sfp["error_msg"] = "VIM status reported " + sfp["status"]
+
+ sfp["vim_info"] = self.serialize(sfp_vim)
+
+ if sfp_vim.get("fault"):
+ sfp["error_msg"] = str(sfp_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sfp status: %s", str(e))
+ sfp["status"] = "DELETED"
+ sfp["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sfp status: %s", str(e))
+ sfp["status"] = "VIM_ERROR"
+ sfp["error_msg"] = str(e)
+
+ sfp_dict[sfp_id] = sfp
+
+ return sfp_dict
+
+ def refresh_sfis_status(self, sfi_list):
+ """Get the status of the service function instances
+ Params: the list of sfi identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function instance
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ sfi_dict = {}
+ self.logger.debug(
+ "refresh_sfis status: Getting tenant sfi information from VIM"
+ )
+
+ for sfi_id in sfi_list:
+ sfi = {}
+
+ try:
+ sfi_vim = self.get_sfi(sfi_id)
+
+ if sfi_vim:
+ sfi["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sfi["status"] = "OTHER"
+ sfi["error_msg"] = "VIM status reported " + sfi["status"]
+
+ sfi["vim_info"] = self.serialize(sfi_vim)
+
+ if sfi_vim.get("fault"):
+ sfi["error_msg"] = str(sfi_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sfi status: %s", str(e))
+ sfi["status"] = "DELETED"
+ sfi["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sfi status: %s", str(e))
+ sfi["status"] = "VIM_ERROR"
+ sfi["error_msg"] = str(e)
+
+ sfi_dict[sfi_id] = sfi
+
+ return sfi_dict
+
+ def refresh_sfs_status(self, sf_list):
+ """Get the status of the service functions
+ Params: the list of sf identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ sf_dict = {}
+ self.logger.debug("refresh_sfs status: Getting tenant sf information from VIM")
+
+ for sf_id in sf_list:
+ sf = {}
+
+ try:
+ sf_vim = self.get_sf(sf_id)
+
+ if sf_vim:
+ sf["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ sf["status"] = "OTHER"
+ sf["error_msg"] = "VIM status reported " + sf_vim["status"]
+
+ sf["vim_info"] = self.serialize(sf_vim)
+
+ if sf_vim.get("fault"):
+ sf["error_msg"] = str(sf_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting sf status: %s", str(e))
+ sf["status"] = "DELETED"
+ sf["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting sf status: %s", str(e))
+ sf["status"] = "VIM_ERROR"
+ sf["error_msg"] = str(e)
+
+ sf_dict[sf_id] = sf
+
+ return sf_dict
+
+ def refresh_classifications_status(self, classification_list):
+ """Get the status of the classifications
+ Params: the list of classification identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this classifier
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ classification_dict = {}
+ self.logger.debug(
+ "refresh_classifications status: Getting tenant classification information from VIM"
+ )
+
+ for classification_id in classification_list:
+ classification = {}
+
+ try:
+ classification_vim = self.get_classification(classification_id)
+
+ if classification_vim:
+ classification["status"] = vmStatus2manoFormat["ACTIVE"]
+ else:
+ classification["status"] = "OTHER"
+ classification["error_msg"] = (
+ "VIM status reported " + classification["status"]
+ )
+
+ classification["vim_info"] = self.serialize(classification_vim)
+
+ if classification_vim.get("fault"):
+ classification["error_msg"] = str(classification_vim["fault"])
+ except vimconn.VimConnNotFoundException as e:
+ self.logger.error("Exception getting classification status: %s", str(e))
+ classification["status"] = "DELETED"
+ classification["error_msg"] = str(e)
+ except vimconn.VimConnException as e:
+ self.logger.error("Exception getting classification status: %s", str(e))
+ classification["status"] = "VIM_ERROR"
+ classification["error_msg"] = str(e)
+
+ classification_dict[classification_id] = classification
+
+ return classification_dict
+
def new_affinity_group(self, affinity_group_data):
"""Adds a server group to VIM
affinity_group_data contains a dictionary with information, keys:
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
+ def new_vminstancefromJSON(self, vm_data):
+ """Adds a VM instance to VIM"""
+ """Returns the instance identifier"""
+ try:
+ self._get_my_tenant()
+ except Exception as e:
+ return -vimconn.HTTP_Not_Found, str(e)
+ print("VIMConnector: Adding a new VM instance from JSON to VIM")
+ payload_req = vm_data
+ try:
+ vim_response = requests.post(
+ self.url + "/" + self.tenant + "/servers",
+ headers=self.headers_req,
+ data=payload_req,
+ )
+ except requests.exceptions.RequestException as e:
+ print("new_vminstancefromJSON Exception: ", e.args)
+ return -vimconn.HTTP_Not_Found, str(e.args[0])
+ # print vim_response
+ # print vim_response.status_code
+ if vim_response.status_code == 200:
+ # print vim_response.json()
+ # print json.dumps(vim_response.json(), indent=4)
+ res, http_content = self._format_in(vim_response, new_image_response_schema)
+ # print http_content
+ if res:
+ r = self._remove_extra_items(http_content, new_image_response_schema)
+ if r is not None:
+ print("Warning: remove extra items ", r)
+ # print http_content
+ vminstance_id = http_content["server"]["id"]
+ print("Tenant image id: ", vminstance_id)
+ return vim_response.status_code, vminstance_id
+ else:
+ return -vimconn.HTTP_Bad_Request, http_content
+ else:
+ # print vim_response.text
+ jsonerror = self._format_jsonerror(vim_response)
+ text = 'Error in VIM "{}": not possible to add new vm instance. HTTP Response: {}. Error: {}'.format(
+ self.url, vim_response.status_code, jsonerror
+ )
+ # print text
+ return -vim_response.status_code, text
+
def new_vminstance(
self,
name,
except (requests.exceptions.RequestException, js_e.ValidationError) as e:
self._format_request_exception(e)
+ # NOT USED METHODS in current version
+
+ def host_vim2gui(self, host, server_dict):
+ """Transform host dictionary from VIM format to GUI format,
+ and append to the server_dict
+ """
+ if type(server_dict) is not dict:
+ print(
+ "vimconnector.host_vim2gui() ERROR, param server_dict must be a dictionary"
+ )
+ return
+ RAD = {}
+ occupation = {}
+ for numa in host["host"]["numas"]:
+ RAD_item = {}
+ occupation_item = {}
+ # memory
+ RAD_item["memory"] = {
+ "size": str(numa["memory"]) + "GB",
+ "eligible": str(numa["hugepages"]) + "GB",
+ }
+ occupation_item["memory"] = str(numa["hugepages_consumed"]) + "GB"
+ # cpus
+ RAD_item["cpus"] = {}
+ RAD_item["cpus"]["cores"] = []
+ RAD_item["cpus"]["eligible_cores"] = []
+ occupation_item["cores"] = []
+ for _ in range(0, len(numa["cores"]) // 2):
+ RAD_item["cpus"]["cores"].append([])
+ for core in numa["cores"]:
+ RAD_item["cpus"]["cores"][core["core_id"]].append(core["thread_id"])
+ if "status" not in core:
+ RAD_item["cpus"]["eligible_cores"].append(core["thread_id"])
+ if "instance_id" in core:
+ occupation_item["cores"].append(core["thread_id"])
+ # ports
+ RAD_item["ports"] = {}
+ occupation_item["ports"] = {}
+ for iface in numa["interfaces"]:
+ RAD_item["ports"][iface["pci"]] = "speed:" + str(iface["Mbps"]) + "M"
+ occupation_item["ports"][iface["pci"]] = {
+ "occupied": str(100 * iface["Mbps_consumed"] // iface["Mbps"]) + "%"
+ }
+
+ RAD[numa["numa_socket"]] = RAD_item
+ occupation[numa["numa_socket"]] = occupation_item
+ server_dict[host["host"]["name"]] = {"RAD": RAD, "occupation": occupation}
+
+ def get_hosts_info(self):
+ """Get the information of deployed hosts
+ Returns the hosts content"""
+ # obtain hosts list
+ url = self.url + "/hosts"
+ try:
+ vim_response = requests.get(url)
+ except requests.exceptions.RequestException as e:
+ print("get_hosts_info Exception: ", e.args)
+ return -vimconn.HTTP_Not_Found, str(e.args[0])
+ print(
+ "vim get", url, "response:", vim_response.status_code, vim_response.json()
+ )
+ # print vim_response.status_code
+ # print json.dumps(vim_response.json(), indent=4)
+ if vim_response.status_code != 200:
+ # TODO: get error
+ print(
+ "vimconnector.get_hosts_info error getting host list {} {}".format(
+ vim_response.status_code, vim_response.json()
+ )
+ )
+ return -vim_response.status_code, "Error getting host list"
+
+ res, hosts = self._format_in(vim_response, get_hosts_response_schema)
+
+ if not res:
+ print(
+ "vimconnector.get_hosts_info error parsing GET HOSTS vim response",
+ hosts,
+ )
+ return vimconn.HTTP_Internal_Server_Error, hosts
+ # obtain hosts details
+ hosts_dict = {}
+ for host in hosts["hosts"]:
+ url = self.url + "/hosts/" + host["id"]
+ try:
+ vim_response = requests.get(url)
+ except requests.exceptions.RequestException as e:
+ print("get_hosts_info Exception: ", e.args)
+ return -vimconn.HTTP_Not_Found, str(e.args[0])
+ print(
+ "vim get",
+ url,
+ "response:",
+ vim_response.status_code,
+ vim_response.json(),
+ )
+ if vim_response.status_code != 200:
+ print(
+ "vimconnector.get_hosts_info error getting detailed host {} {}".format(
+ vim_response.status_code, vim_response.json()
+ )
+ )
+ continue
+ res, host_detail = self._format_in(
+ vim_response, get_host_detail_response_schema
+ )
+ if not res:
+ print(
+ "vimconnector.get_hosts_info error parsing GET HOSTS/{} vim response {}".format(
+ host["id"], host_detail
+ ),
+ )
+ continue
+ # print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+ self.host_vim2gui(host_detail, hosts_dict)
+ return 200, hosts_dict
+
+ def get_hosts(self, vim_tenant):
+ """Get the hosts and deployed instances
+ Returns the hosts content"""
+ # obtain hosts list
+ url = self.url + "/hosts"
+ try:
+ vim_response = requests.get(url)
+ except requests.exceptions.RequestException as e:
+ print("get_hosts Exception: ", e.args)
+ return -vimconn.HTTP_Not_Found, str(e.args[0])
+ print(
+ "vim get", url, "response:", vim_response.status_code, vim_response.json()
+ )
+ # print vim_response.status_code
+ # print json.dumps(vim_response.json(), indent=4)
+ if vim_response.status_code != 200:
+ # TODO: get error
+ print(
+ "vimconnector.get_hosts error getting host list {} {}".format(
+ vim_response.status_code, vim_response.json()
+ )
+ )
+ return -vim_response.status_code, "Error getting host list"
+
+ res, hosts = self._format_in(vim_response, get_hosts_response_schema)
+
+ if not res:
+ print("vimconnector.get_host error parsing GET HOSTS vim response", hosts)
+ return vimconn.HTTP_Internal_Server_Error, hosts
+ # obtain instances from hosts
+ for host in hosts["hosts"]:
+ url = self.url + "/" + vim_tenant + "/servers?hostId=" + host["id"]
+ try:
+ vim_response = requests.get(url)
+ except requests.exceptions.RequestException as e:
+ print("get_hosts Exception: ", e.args)
+ return -vimconn.HTTP_Not_Found, str(e.args[0])
+ print(
+ "vim get",
+ url,
+ "response:",
+ vim_response.status_code,
+ vim_response.json(),
+ )
+ if vim_response.status_code != 200:
+ print(
+ "vimconnector.get_hosts error getting instances at host {} {}".format(
+ vim_response.status_code, vim_response.json()
+ )
+ )
+ continue
+ res, servers = self._format_in(vim_response, get_server_response_schema)
+ if not res:
+ print(
+ "vimconnector.get_host error parsing GET SERVERS/{} vim response {}".format(
+ host["id"], servers
+ ),
+ )
+ continue
+ # print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+ host["instances"] = servers["servers"]
+ return 200, hosts["hosts"]
+
+ def get_processor_rankings(self):
+ """Get the processor rankings in the VIM database"""
+ url = self.url + "/processor_ranking"
+ try:
+ vim_response = requests.get(url)
+ except requests.exceptions.RequestException as e:
+ print("get_processor_rankings Exception: ", e.args)
+ return -vimconn.HTTP_Not_Found, str(e.args[0])
+ print(
+ "vim get", url, "response:", vim_response.status_code, vim_response.json()
+ )
+ # print vim_response.status_code
+ # print json.dumps(vim_response.json(), indent=4)
+ if vim_response.status_code != 200:
+ # TODO: get error
+ print(
+ "vimconnector.get_processor_rankings error getting processor rankings {} {}".format(
+ vim_response.status_code, vim_response.json()
+ )
+ )
+ return -vim_response.status_code, "Error getting processor rankings"
+
+ res, rankings = self._format_in(
+ vim_response, get_processor_rankings_response_schema
+ )
+ return res, rankings["rankings"]
+
+ def new_host(self, host_data):
+ """Adds a new host to VIM"""
+ """Returns status code of the VIM response"""
+ payload_req = host_data
+ try:
+ url = self.url_admin + "/hosts"
+ self.logger.info("Adding a new host POST %s", url)
+ vim_response = requests.post(
+ url, headers=self.headers_req, data=payload_req
+ )
+ self._check_http_request_response(vim_response)
+ self.logger.debug(vim_response.text)
+ # print json.dumps(vim_response.json(), indent=4)
+ response = vim_response.json()
+ js_v(response, new_host_response_schema)
+ r = self._remove_extra_items(response, new_host_response_schema)
+ if r is not None:
+ self.logger.warn("Warning: remove extra items %s", str(r))
+ host_id = response["host"]["id"]
+ return host_id
+ except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+ self._format_request_exception(e)
+
+ def new_external_port(self, port_data):
+ """Adds a external port to VIM"""
+ """Returns the port identifier"""
+ # TODO change to logging exception code policies
+ print("VIMConnector: Adding a new external port")
+ payload_req = port_data
+ try:
+ vim_response = requests.post(
+ self.url_admin + "/ports", headers=self.headers_req, data=payload_req
+ )
+ except requests.exceptions.RequestException as e:
+ self.logger.error("new_external_port Exception: ", str(e))
+ return -vimconn.HTTP_Not_Found, str(e.args[0])
+ print(vim_response)
+ # print vim_response.status_code
+ if vim_response.status_code == 200:
+ # print vim_response.json()
+ # print json.dumps(vim_response.json(), indent=4)
+ res, http_content = self._format_in(vim_response, new_port_response_schema)
+ # print http_content
+ if res:
+ r = self._remove_extra_items(http_content, new_port_response_schema)
+ if r is not None:
+ print("Warning: remove extra items ", r)
+ # print http_content
+ port_id = http_content["port"]["id"]
+ print("Port id: ", port_id)
+ return vim_response.status_code, port_id
+ else:
+ return -vimconn.HTTP_Bad_Request, http_content
+ else:
+ # print vim_response.text
+ jsonerror = self._format_jsonerror(vim_response)
+ text = 'Error in VIM "{}": not possible to add new external port. HTTP Response: {}. Error: {}'.format(
+ self.url_admin, vim_response.status_code, jsonerror
+ )
+ # print text
+ return -vim_response.status_code, text
+
+ def new_external_network(self, net_name, net_type):
+ """Adds a external network to VIM (shared)"""
+ """Returns the network identifier"""
+ # TODO change to logging exception code policies
+ print(
+ "VIMConnector: Adding external shared network to VIM (type "
+ + net_type
+ + "): "
+ + net_name
+ )
+
+ payload_req = (
+ '{"network":{"name": "'
+ + net_name
+ + '","shared":true,"type": "'
+ + net_type
+ + '"}}'
+ )
+ try:
+ vim_response = requests.post(
+ self.url + "/networks", headers=self.headers_req, data=payload_req
+ )
+ except requests.exceptions.RequestException as e:
+ self.logger.error("new_external_network Exception: ", e.args)
+ return -vimconn.HTTP_Not_Found, str(e.args[0])
+ print(vim_response)
+ # print vim_response.status_code
+ if vim_response.status_code == 200:
+ # print vim_response.json()
+ # print json.dumps(vim_response.json(), indent=4)
+ res, http_content = self._format_in(
+ vim_response, new_network_response_schema
+ )
+ # print http_content
+ if res:
+ r = self._remove_extra_items(http_content, new_network_response_schema)
+ if r is not None:
+ print("Warning: remove extra items ", r)
+ # print http_content
+ network_id = http_content["network"]["id"]
+ print("Network id: ", network_id)
+ return vim_response.status_code, network_id
+ else:
+ return -vimconn.HTTP_Bad_Request, http_content
+ else:
+ # print vim_response.text
+ jsonerror = self._format_jsonerror(vim_response)
+ text = 'Error in VIM "{}": not possible to add new external network. HTTP Response: {}. Error: {}'.format(
+ self.url, vim_response.status_code, jsonerror
+ )
+ # print text
+ return -vim_response.status_code, text
+
+ def connect_port_network(self, port_id, network_id, admin=False):
+ """Connects a external port to a network"""
+ """Returns status code of the VIM response"""
+ # TODO change to logging exception code policies
+ print("VIMConnector: Connecting external port to network")
+
+ payload_req = '{"port":{"network_id":"' + network_id + '"}}'
+ if admin:
+ if self.url_admin is None:
+ return (
+ -vimconn.HTTP_Unauthorized,
+ "datacenter cannot contain admin URL",
+ )
+ url = self.url_admin
+ else:
+ url = self.url
+ try:
+ vim_response = requests.put(
+ url + "/ports/" + port_id, headers=self.headers_req, data=payload_req
+ )
+ except requests.exceptions.RequestException as e:
+ print("connect_port_network Exception: ", e.args)
+ return -vimconn.HTTP_Not_Found, str(e.args[0])
+ print(vim_response)
+ # print vim_response.status_code
+ if vim_response.status_code == 200:
+ # print vim_response.json()
+ # print json.dumps(vim_response.json(), indent=4)
+ res, http_content = self._format_in(vim_response, new_port_response_schema)
+ # print http_content
+ if res:
+ r = self._remove_extra_items(http_content, new_port_response_schema)
+ if r is not None:
+ print("Warning: remove extra items ", r)
+ # print http_content
+ port_id = http_content["port"]["id"]
+ print("Port id: ", port_id)
+ return vim_response.status_code, port_id
+ else:
+ return -vimconn.HTTP_Bad_Request, http_content
+ else:
+ print(vim_response.text)
+ jsonerror = self._format_jsonerror(vim_response)
+ text = (
+ 'Error in VIM "{}": not possible to connect external port to network. HTTP Response: {}.'
+ " Error: {}".format(self.url_admin, vim_response.status_code, jsonerror)
+ )
+ print(text)
+ return -vim_response.status_code, text
+
def migrate_instance(self, vm_id, compute_host=None):
"""
Migrate a vdu
"Failed create a new network {}".format(net_name)
)
+ def get_vcd_network_list(self):
+ """Method available organization for a logged in tenant
+
+ Returns:
+ The return vca object that letter can be used to connect to vcloud direct as admin
+ """
+
+ self.logger.debug(
+ "get_vcd_network_list(): retrieving network list for vcd {}".format(
+ self.tenant_name
+ )
+ )
+
+ if not self.tenant_name:
+ raise vimconn.VimConnConnectionException("Tenant name is empty.")
+
+ _, vdc = self.get_vdc_details()
+ if vdc is None:
+ raise vimconn.VimConnConnectionException(
+ "Can't retrieve information for a VDC {}".format(self.tenant_name)
+ )
+
+ vdc_uuid = vdc.get("id").split(":")[3]
+ if self.client._session:
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=vdc.get("href"), headers=headers
+ )
+
+ if response.status_code != 200:
+ self.logger.error("Failed to get vdc content")
+ raise vimconn.VimConnNotFoundException("Failed to get vdc content")
+ else:
+ content = XmlElementTree.fromstring(response.text)
+
+ network_list = []
+ try:
+ for item in content:
+ if item.tag.split("}")[-1] == "AvailableNetworks":
+ for net in item:
+ response = self.perform_request(
+ req_type="GET", url=net.get("href"), headers=headers
+ )
+
+ if response.status_code != 200:
+ self.logger.error("Failed to get network content")
+ raise vimconn.VimConnNotFoundException(
+ "Failed to get network content"
+ )
+ else:
+ net_details = XmlElementTree.fromstring(response.text)
+
+ filter_dict = {}
+ net_uuid = net_details.get("id").split(":")
+
+ if len(net_uuid) != 4:
+ continue
+ else:
+ net_uuid = net_uuid[3]
+ # create dict entry
+ self.logger.debug(
+ "get_vcd_network_list(): Adding network {} "
+ "to a list vcd id {} network {}".format(
+ net_uuid, vdc_uuid, net_details.get("name")
+ )
+ )
+ filter_dict["name"] = net_details.get("name")
+ filter_dict["id"] = net_uuid
+
+ if [
+ i.text
+ for i in net_details
+ if i.tag.split("}")[-1] == "IsShared"
+ ][0] == "true":
+ shared = True
+ else:
+ shared = False
+
+ filter_dict["shared"] = shared
+ filter_dict["tenant_id"] = vdc_uuid
+
+ if int(net_details.get("status")) == 1:
+ filter_dict["admin_state_up"] = True
+ else:
+ filter_dict["admin_state_up"] = False
+
+ filter_dict["status"] = "ACTIVE"
+ filter_dict["type"] = "bridge"
+ network_list.append(filter_dict)
+ self.logger.debug(
+ "get_vcd_network_list adding entry {}".format(
+ filter_dict
+ )
+ )
+ except Exception:
+ self.logger.debug("Error in get_vcd_network_list", exc_info=True)
+ pass
+
+ self.logger.debug("get_vcd_network_list returning {}".format(network_list))
+
+ return network_list
+
def get_network_list(self, filter_dict={}):
"""Obtain tenant networks of VIM
Filter_dict can be:
:param created_items: dictionary with extra items to be deleted. provided by method new_network
Returns the network identifier or raises an exception upon error or when network is not found
"""
+
+ # ############# Stub code for SRIOV #################
+ # dvport_group = self.get_dvport_group(net_id)
+ # if dvport_group:
+ # #delete portgroup
+ # status = self.destroy_dvport_group(net_id)
+ # if status:
+ # # Remove vlanID from persistent info
+ # if net_id in self.persistent_info["used_vlanIDs"]:
+ # del self.persistent_info["used_vlanIDs"][net_id]
+ #
+ # return net_id
+
vcd_network = self.get_vcd_network(network_uuid=net_id)
if vcd_network is not None and vcd_network:
if self.delete_network_action(network_uuid=net_id):
"Exception occured while retriving catalog items {}".format(exp)
)
+ def get_vappid(self, vdc=None, vapp_name=None):
+ """Method takes vdc object and vApp name and returns vapp uuid or None
+
+ Args:
+ vdc: The VDC object.
+ vapp_name: is application vappp name identifier
+
+ Returns:
+ The return vApp name otherwise None
+ """
+ if vdc is None or vapp_name is None:
+ return None
+
+ # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
+ try:
+ refs = [
+ ref
+ for ref in vdc.ResourceEntities.ResourceEntity
+ if ref.name == vapp_name
+ and ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
+ ]
+
+ if len(refs) == 1:
+ return refs[0].href.split("vapp")[1][1:]
+ except Exception as e:
+ self.logger.exception(e)
+ return False
+
+ return None
+
+ def check_vapp(self, vdc=None, vapp_uuid=None):
+ """Method Method returns True or False if vapp deployed in vCloud director
+
+ Args:
+ vca: Connector to VCA
+ vdc: The VDC object.
+ vappid: vappid is application identifier
+
+ Returns:
+ The return True if vApp deployed
+ :param vdc:
+ :param vapp_uuid:
+ """
+ try:
+ refs = [
+ ref
+ for ref in vdc.ResourceEntities.ResourceEntity
+ if ref.type_ == "application/vnd.vmware.vcloud.vApp+xml"
+ ]
+
+ for ref in refs:
+ vappid = ref.href.split("vapp")[1][1:]
+ # find vapp with respected vapp uuid
+
+ if vappid == vapp_uuid:
+ return True
+ except Exception as e:
+ self.logger.exception(e)
+
+ return False
+
+ return False
+
def get_namebyvappid(self, vapp_uuid=None):
"""Method returns vApp name from vCD and lookup done by vapp_id.
"The upload iso task failed with status {}".format(result.get("status"))
)
+ def get_vcd_availibility_zones(self, respool_href, headers):
+ """Method to find presence of av zone is VIM resource pool
+
+ Args:
+ respool_href - resource pool href
+ headers - header information
+
+ Returns:
+ vcd_az - list of azone present in vCD
+ """
+ vcd_az = []
+ url = respool_href
+ resp = self.perform_request(req_type="GET", url=respool_href, headers=headers)
+
+ if resp.status_code != requests.codes.ok:
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ url, resp.status_code
+ )
+ )
+ else:
+ # Get the href to hostGroups and find provided hostGroup is present in it
+ resp_xml = XmlElementTree.fromstring(resp.content)
+ for child in resp_xml:
+ if "VMWProviderVdcResourcePool" in child.tag:
+ for schild in child:
+ if "Link" in schild.tag:
+ if (
+ schild.attrib.get("type")
+ == "application/vnd.vmware.admin.vmwHostGroupsType+xml"
+ ):
+ hostGroup = schild.attrib.get("href")
+ hg_resp = self.perform_request(
+ req_type="GET", url=hostGroup, headers=headers
+ )
+
+ if hg_resp.status_code != requests.codes.ok:
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ hostGroup, hg_resp.status_code
+ )
+ )
+ else:
+ hg_resp_xml = XmlElementTree.fromstring(
+ hg_resp.content
+ )
+ for hostGroup in hg_resp_xml:
+ if "HostGroup" in hostGroup.tag:
+ # append host group name to the list
+ vcd_az.append(hostGroup.attrib.get("name"))
+
+ return vcd_az
+
def set_availability_zones(self):
"""
Set vim availability zone
raise vimconn.VimConnException(msg)
+ # #
+ # #
+ # # based on current discussion
+ # #
+ # #
+ # # server:
+ # created: '2016-09-08T11:51:58'
+ # description: simple-instance.linux1.1
+ # flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
+ # hostId: e836c036-74e7-11e6-b249-0800273e724c
+ # image: dde30fe6-75a9-11e6-ad5f-0800273e724c
+ # status: ACTIVE
+ # error_msg:
+ # interfaces: …
+ #
def get_vminstance(self, vim_vm_uuid=None):
"""Returns the VM instance information from VIM"""
self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
return console_dict
+ # NOT USED METHODS in current version
+
+ def host_vim2gui(self, host, server_dict):
+ """Transform host dictionary from VIM format to GUI format,
+ and append to the server_dict
+ """
+ raise vimconn.VimConnNotImplemented("Should have implemented this")
+
def get_hosts_info(self):
"""Get the information of deployed hosts
Returns the hosts content"""
Returns the hosts content"""
raise vimconn.VimConnNotImplemented("Should have implemented this")
+ def get_processor_rankings(self):
+ """Get the processor rankings in the VIM database"""
+ raise vimconn.VimConnNotImplemented("Should have implemented this")
+
+ def new_host(self, host_data):
+ """Adds a new host to VIM"""
+ """Returns status code of the VIM response"""
+ raise vimconn.VimConnNotImplemented("Should have implemented this")
+
+ def new_external_port(self, port_data):
+ """Adds a external port to VIM"""
+ """Returns the port identifier"""
+ raise vimconn.VimConnNotImplemented("Should have implemented this")
+
+ def new_external_network(self, net_name, net_type):
+ """Adds a external network to VIM (shared)"""
+ """Returns the network identifier"""
+ raise vimconn.VimConnNotImplemented("Should have implemented this")
+
+ def connect_port_network(self, port_id, network_id, admin=False):
+ """Connects a external port to a network"""
+ """Returns status code of the VIM response"""
+ raise vimconn.VimConnNotImplemented("Should have implemented this")
+
+ def new_vminstancefromJSON(self, vm_data):
+ """Adds a VM instance to VIM"""
+ """Returns the instance identifier"""
+ raise vimconn.VimConnNotImplemented("Should have implemented this")
+
def get_network_name_by_id(self, network_uuid=None):
"""Method gets vcloud director network named based on supplied uuid.
return None
+ def get_vapp_list(self, vdc_name=None):
+ """
+ Method retrieves vApp list deployed vCloud director and returns a dictionary
+ contains a list of all vapp deployed for queried VDC.
+ The key for a dictionary is vApp UUID
+
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return dictionary and key for each entry vapp UUID
+ """
+ vapp_dict = {}
+
+ if vdc_name is None:
+ return vapp_dict
+
+ content = self.vms_view_action(vdc_name=vdc_name)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for vm_xml in vm_list_xmlroot:
+ if vm_xml.tag.split("}")[1] == "VMRecord":
+ if vm_xml.attrib["isVAppTemplate"] == "true":
+ rawuuid = vm_xml.attrib["container"].split("/")[-1:]
+ if "vappTemplate-" in rawuuid[0]:
+ # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+ # vm and use raw UUID as key
+ vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
+ except Exception:
+ pass
+
+ return vapp_dict
+
+ def get_vm_list(self, vdc_name=None):
+ """
+ Method retrieves VM's list deployed vCloud director. It returns a dictionary
+ contains a list of all VM's deployed for queried VDC.
+ The key for a dictionary is VM UUID
+
+
+ Args:
+ vca - is active VCA connection.
+ vdc_name - is a vdc name that will be used to query vms action
+
+ Returns:
+ The return dictionary and key for each entry vapp UUID
+ """
+ vm_dict = {}
+
+ if vdc_name is None:
+ return vm_dict
+
+ content = self.vms_view_action(vdc_name=vdc_name)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(content)
+ for vm_xml in vm_list_xmlroot:
+ if vm_xml.tag.split("}")[1] == "VMRecord":
+ if vm_xml.attrib["isVAppTemplate"] == "false":
+ rawuuid = vm_xml.attrib["href"].split("/")[-1:]
+ if "vm-" in rawuuid[0]:
+ # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+ # vm and use raw UUID as key
+ vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+ except Exception:
+ pass
+
+ return vm_dict
+
def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
"""
Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
return None
- def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
+ def create_vdc_rest(self, vdc_name=None):
"""
- Method retrieve vapp detail from vCloud director
+ Method create network in vCloud director
Args:
- vapp_uuid - is vapp identifier.
-
+ vdc_name - vdc name to be created
Returns:
- The return network uuid or return None
+ The return response
"""
- parsed_respond = {}
- vca = None
-
- if need_admin_access:
- vca = self.connect_as_admin()
- else:
- vca = self.client
+ self.logger.info("Creating new vdc {}".format(vdc_name))
+ vca = self.connect_as_admin()
if not vca:
raise vimconn.VimConnConnectionException("Failed to connect vCD")
- if vapp_uuid is None:
+
+ if vdc_name is None:
return None
- url_list = [self.url, "/api/vApp/vapp-", vapp_uuid]
- get_vapp_restcall = "".join(url_list)
+ url_list = [self.url, "/api/admin/org/", self.org_uuid]
+ vm_list_rest_call = "".join(url_list)
if vca._session:
headers = {
"Accept": "application/*+xml;version=" + API_VERSION,
- "x-vcloud-authorization": vca._session.headers[
+ "x-vcloud-authorization": self.client._session.headers[
"x-vcloud-authorization"
],
}
response = self.perform_request(
- req_type="GET", url=get_vapp_restcall, headers=headers
+ req_type="GET", url=vm_list_rest_call, headers=headers
+ )
+ provider_vdc_ref = None
+ add_vdc_rest_url = None
+ # available_networks = None
+
+ if response.status_code != requests.codes.ok:
+ self.logger.debug(
+ "REST API call {} failed. Return status code {}".format(
+ vm_list_rest_call, response.status_code
+ )
+ )
+
+ return None
+ else:
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response.text)
+ for child in vm_list_xmlroot:
+ # application/vnd.vmware.admin.providervdc+xml
+ if child.tag.split("}")[1] == "Link":
+ if (
+ child.attrib.get("type")
+ == "application/vnd.vmware.admin.createVdcParams+xml"
+ and child.attrib.get("rel") == "add"
+ ):
+ add_vdc_rest_url = child.attrib.get("href")
+ except Exception:
+ self.logger.debug(
+ "Failed parse respond for rest api call {}".format(
+ vm_list_rest_call
+ )
+ )
+ self.logger.debug("Respond body {}".format(response.text))
+
+ return None
+
+ response = self.get_provider_rest(vca=vca)
+ try:
+ vm_list_xmlroot = XmlElementTree.fromstring(response)
+ for child in vm_list_xmlroot:
+ if child.tag.split("}")[1] == "ProviderVdcReferences":
+ for sub_child in child:
+ provider_vdc_ref = sub_child.attrib.get("href")
+ except Exception:
+ self.logger.debug(
+ "Failed parse respond for rest api call {}".format(
+ vm_list_rest_call
+ )
+ )
+ self.logger.debug("Respond body {}".format(response))
+
+ return None
+
+ if add_vdc_rest_url is not None and provider_vdc_ref is not None:
+ data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
+ <AllocationModel>ReservationPool</AllocationModel>
+ <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
+ <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
+ </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
+ <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
+ <ProviderVdcReference
+ name="Main Provider"
+ href="{2:s}" />
+ <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(
+ escape(vdc_name), escape(vdc_name), provider_vdc_ref
+ )
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.admin.createVdcParams+xml"
+ response = self.perform_request(
+ req_type="POST",
+ url=add_vdc_rest_url,
+ headers=headers,
+ data=data,
+ )
+
+ # if we all ok we respond with content otherwise by default None
+ if response.status_code == 201:
+ return response.text
+
+ return None
+
+ def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
+ """
+ Method retrieve vapp detail from vCloud director
+
+ Args:
+ vapp_uuid - is vapp identifier.
+
+ Returns:
+ The return network uuid or return None
+ """
+ parsed_respond = {}
+ vca = None
+
+ if need_admin_access:
+ vca = self.connect_as_admin()
+ else:
+ vca = self.client
+
+ if not vca:
+ raise vimconn.VimConnConnectionException("Failed to connect vCD")
+ if vapp_uuid is None:
+ return None
+
+ url_list = [self.url, "/api/vApp/vapp-", vapp_uuid]
+ get_vapp_restcall = "".join(url_list)
+
+ if vca._session:
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": vca._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ response = self.perform_request(
+ req_type="GET", url=get_vapp_restcall, headers=headers
)
if response.status_code == 403:
return parsed_respond
+ def acquire_console(self, vm_uuid=None):
+ if vm_uuid is None:
+ return None
+
+ if self.client._session:
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+ vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
+ console_dict = vm_dict["acquireTicket"]
+ console_rest_call = console_dict["href"]
+
+ response = self.perform_request(
+ req_type="POST", url=console_rest_call, headers=headers
+ )
+
+ if response.status_code == 403:
+ response = self.retry_rest("POST", console_rest_call)
+
+ if response.status_code == requests.codes.ok:
+ return response.text
+
+ return None
+
def modify_vm_disk(self, vapp_uuid, flavor_disk):
"""
Method retrieve vm disk details
"affinity".format(exp)
)
+ def cloud_init(self, vapp, cloud_config):
+ """
+ Method to inject ssh-key
+ vapp - vapp object
+ cloud_config a dictionary with:
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+ 'users': (optional) list of users to be inserted, each item is a dict with:
+ 'name': (mandatory) user name,
+ 'key-pairs': (optional) list of strings with the public key to be inserted to the user
+ 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+ or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+ 'config-files': (optional). List of files to be transferred. Each item is a dict with:
+ 'dest': (mandatory) string with the destination absolute path
+ 'encoding': (optional, by default text). Can be one of:
+ 'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+ 'content' (mandatory): string with the content of the file
+ 'permissions': (optional) string with file permissions, typically octal notation '0644'
+ 'owner': (optional) file owner, string with the format 'owner:group'
+ 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
+ """
+ try:
+ if not isinstance(cloud_config, dict):
+ raise Exception(
+ "cloud_init : parameter cloud_config is not a dictionary"
+ )
+ else:
+ key_pairs = []
+ userdata = []
+
+ if "key-pairs" in cloud_config:
+ key_pairs = cloud_config["key-pairs"]
+
+ if "users" in cloud_config:
+ userdata = cloud_config["users"]
+
+ self.logger.debug("cloud_init : Guest os customization started..")
+ customize_script = self.format_script(
+ key_pairs=key_pairs, users_list=userdata
+ )
+ customize_script = customize_script.replace("&", "&")
+ self.guest_customization(vapp, customize_script)
+ except Exception as exp:
+ self.logger.error(
+ "cloud_init : exception occurred while injecting " "ssh-key"
+ )
+
+ raise vimconn.VimConnException(
+ "cloud_init : Error {} failed to inject " "ssh-key".format(exp)
+ )
+
+ def format_script(self, key_pairs=[], users_list=[]):
+ bash_script = """#!/bin/sh
+echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"`>> /root/customization.log
+if [ "$1" = "precustomization" ];then
+ echo performing precustomization tasks on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+"""
+
+ keys = "\n".join(key_pairs)
+ if keys:
+ keys_data = """
+ if [ ! -d /root/.ssh ];then
+ mkdir /root/.ssh
+ chown root:root /root/.ssh
+ chmod 700 /root/.ssh
+ touch /root/.ssh/authorized_keys
+ chown root:root /root/.ssh/authorized_keys
+ chmod 600 /root/.ssh/authorized_keys
+ # make centos with selinux happy
+ which restorecon && restorecon -Rv /root/.ssh
+ else
+ touch /root/.ssh/authorized_keys
+ chown root:root /root/.ssh/authorized_keys
+ chmod 600 /root/.ssh/authorized_keys
+ fi
+ echo '{key}' >> /root/.ssh/authorized_keys
+ """.format(
+ key=keys
+ )
+
+ bash_script += keys_data
+
+ for user in users_list:
+ if "name" in user:
+ user_name = user["name"]
+
+ if "key-pairs" in user:
+ user_keys = "\n".join(user["key-pairs"])
+ else:
+ user_keys = None
+
+ add_user_name = """
+ useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
+ """.format(
+ user_name=user_name
+ )
+
+ bash_script += add_user_name
+
+ if user_keys:
+ user_keys_data = """
+ mkdir /home/{user_name}/.ssh
+ chown {user_name}:{user_name} /home/{user_name}/.ssh
+ chmod 700 /home/{user_name}/.ssh
+ touch /home/{user_name}/.ssh/authorized_keys
+ chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+ chmod 600 /home/{user_name}/.ssh/authorized_keys
+ # make centos with selinux happy
+ which restorecon && restorecon -Rv /home/{user_name}/.ssh
+ echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+ """.format(
+ user_name=user_name, user_key=user_keys
+ )
+ bash_script += user_keys_data
+
+ return bash_script + "\n\tfi"
+
+ def guest_customization(self, vapp, customize_script):
+ """
+ Method to customize guest os
+ vapp - Vapp object
+ customize_script - Customize script to be run at first boot of VM.
+ """
+ for vm in vapp.get_all_vms():
+ vm_id = vm.get("id").split(":")[-1]
+ vm_name = vm.get("name")
+ vm_name = vm_name.replace("_", "-")
+
+ vm_customization_url = (
+ "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
+ )
+ headers = {
+ "Accept": "application/*+xml;version=" + API_VERSION,
+ "x-vcloud-authorization": self.client._session.headers[
+ "x-vcloud-authorization"
+ ],
+ }
+
+ headers[
+ "Content-Type"
+ ] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
+
+ data = """<GuestCustomizationSection
+ xmlns="http://www.vmware.com/vcloud/v1.5"
+ xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
+ ovf:required="false" href="{}"
+ type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
+ <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
+ <Enabled>true</Enabled>
+ <ChangeSid>false</ChangeSid>
+ <VirtualMachineId>{}</VirtualMachineId>
+ <JoinDomainEnabled>false</JoinDomainEnabled>
+ <UseOrgSettings>false</UseOrgSettings>
+ <AdminPasswordEnabled>false</AdminPasswordEnabled>
+ <AdminPasswordAuto>true</AdminPasswordAuto>
+ <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
+ <AdminAutoLogonCount>0</AdminAutoLogonCount>
+ <ResetPasswordRequired>false</ResetPasswordRequired>
+ <CustomizationScript>{}</CustomizationScript>
+ <ComputerName>{}</ComputerName>
+ <Link href="{}"
+ type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
+ </GuestCustomizationSection>
+ """.format(
+ vm_customization_url,
+ vm_id,
+ customize_script,
+ vm_name,
+ vm_customization_url,
+ )
+
+ response = self.perform_request(
+ req_type="PUT", url=vm_customization_url, headers=headers, data=data
+ )
+ if response.status_code == 202:
+ guest_task = self.get_task_from_response(response.text)
+ self.client.get_task_monitor().wait_for_success(task=guest_task)
+ self.logger.info(
+ "guest_customization : customized guest os task "
+ "completed for VM {}".format(vm_name)
+ )
+ else:
+ self.logger.error(
+ "guest_customization : task for customized guest os"
+ "failed for VM {}".format(vm_name)
+ )
+
+ raise vimconn.VimConnException(
+ "guest_customization : failed to perform"
+ "guest os customization on VM {}".format(vm_name)
+ )
+
def add_new_disk(self, vapp_uuid, disk_size):
"""
Method to create an empty vm disk
elif exp_type == "NotFound":
raise vimconn.VimConnNotFoundException(message=msg)
+ def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
+ """
+ Method to attach SRIOV adapters to VM
+
+ Args:
+ vapp_uuid - uuid of vApp/VM
+ sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
+ vmname_andid - vmname
+
+ Returns:
+ The status of add SRIOV adapter task , vm object and
+ vcenter_conect object
+ """
+ vm_obj = None
+ vcenter_conect, content = self.get_vcenter_content()
+ vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+
+ if vm_moref_id:
+ try:
+ no_of_sriov_devices = len(sriov_nets)
+ if no_of_sriov_devices > 0:
+ # Get VM and its host
+ host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+ self.logger.info(
+ "VM {} is currently on host {}".format(vm_obj, host_obj)
+ )
+
+ if host_obj and vm_obj:
+ # get SRIOV devies from host on which vapp is currently installed
+ avilable_sriov_devices = self.get_sriov_devices(
+ host_obj,
+ no_of_sriov_devices,
+ )
+
+ if len(avilable_sriov_devices) == 0:
+ # find other hosts with active pci devices
+ (
+ new_host_obj,
+ avilable_sriov_devices,
+ ) = self.get_host_and_sriov_devices(
+ content,
+ no_of_sriov_devices,
+ )
+
+ if (
+ new_host_obj is not None
+ and len(avilable_sriov_devices) > 0
+ ):
+ # Migrate vm to the host where SRIOV devices are available
+ self.logger.info(
+ "Relocate VM {} on new host {}".format(
+ vm_obj, new_host_obj
+ )
+ )
+ task = self.relocate_vm(new_host_obj, vm_obj)
+
+ if task is not None:
+ result = self.wait_for_vcenter_task(
+ task, vcenter_conect
+ )
+ self.logger.info(
+ "Migrate VM status: {}".format(result)
+ )
+ host_obj = new_host_obj
+ else:
+ self.logger.info(
+ "Fail to migrate VM : {}".format(result)
+ )
+
+ raise vimconn.VimConnNotFoundException(
+ "Fail to migrate VM : {} to host {}".format(
+ vmname_andid, new_host_obj
+ )
+ )
+
+ if (
+ host_obj is not None
+ and avilable_sriov_devices is not None
+ and len(avilable_sriov_devices) > 0
+ ):
+ # Add SRIOV devices one by one
+ for sriov_net in sriov_nets:
+ network_name = sriov_net.get("net_id")
+ self.create_dvPort_group(network_name)
+
+ if (
+ sriov_net.get("type") == "VF"
+ or sriov_net.get("type") == "SR-IOV"
+ ):
+ # add vlan ID ,Modify portgroup for vlan ID
+ self.configure_vlanID(
+ content, vcenter_conect, network_name
+ )
+
+ task = self.add_sriov_to_vm(
+ content,
+ vm_obj,
+ host_obj,
+ network_name,
+ avilable_sriov_devices[0],
+ )
+
+ if task:
+ status = self.wait_for_vcenter_task(
+ task, vcenter_conect
+ )
+
+ if status:
+ self.logger.info(
+ "Added SRIOV {} to VM {}".format(
+ no_of_sriov_devices, str(vm_obj)
+ )
+ )
+ else:
+ self.logger.error(
+ "Fail to add SRIOV {} to VM {}".format(
+ no_of_sriov_devices, str(vm_obj)
+ )
+ )
+
+ raise vimconn.VimConnUnexpectedResponse(
+ "Fail to add SRIOV adapter in VM {}".format(
+ str(vm_obj)
+ )
+ )
+
+ return True, vm_obj, vcenter_conect
+ else:
+ self.logger.error(
+ "Currently there is no host with"
+ " {} number of avaialble SRIOV "
+ "VFs required for VM {}".format(
+ no_of_sriov_devices, vmname_andid
+ )
+ )
+
+ raise vimconn.VimConnNotFoundException(
+ "Currently there is no host with {} "
+ "number of avaialble SRIOV devices required for VM {}".format(
+ no_of_sriov_devices, vmname_andid
+ )
+ )
+ else:
+ self.logger.debug(
+ "No infromation about SRIOV devices {} ", sriov_nets
+ )
+ except vmodl.MethodFault as error:
+ self.logger.error("Error occurred while adding SRIOV {} ", error)
+
+ return None, vm_obj, vcenter_conect
+
def get_sriov_devices(self, host, no_of_vfs):
"""
Method to get the details of SRIOV devices on given host
return sriovInfo
+ def get_host_and_sriov_devices(self, content, no_of_vfs):
+ """
+ Method to get the details of SRIOV devices infromation on all hosts
+
+ Args:
+ content - vSphere host object
+ no_of_vfs - number of pci VFs needed on host
+
+ Returns:
+ array of SRIOV devices and host object
+ """
+ host_obj = None
+ sriov_device_objs = None
+
+ try:
+ if content:
+ container = content.viewManager.CreateContainerView(
+ content.rootFolder, [vim.HostSystem], True
+ )
+
+ for host in container.view:
+ devices = self.get_sriov_devices(host, no_of_vfs)
+
+ if devices:
+ host_obj = host
+ sriov_device_objs = devices
+ break
+ except Exception as exp:
+ self.logger.error(
+ "Error {} occurred while finding SRIOV devices on host: {}".format(
+ exp, host_obj
+ )
+ )
+
+ return host_obj, sriov_device_objs
+
+ def add_sriov_to_vm(self, content, vm_obj, host_obj, network_name, sriov_device):
+ """
+ Method to add SRIOV adapter to vm
+
+ Args:
+ host_obj - vSphere host object
+ vm_obj - vSphere vm object
+ content - vCenter content object
+ network_name - name of distributed virtaul portgroup
+ sriov_device - SRIOV device info
+
+ Returns:
+ task object
+ """
+ devices = []
+ vnic_label = "sriov nic"
+
+ try:
+ dvs_portgr = self.get_dvport_group(network_name)
+ network_name = dvs_portgr.name
+ nic = vim.vm.device.VirtualDeviceSpec()
+ # VM device
+ nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+ nic.device = vim.vm.device.VirtualSriovEthernetCard()
+ nic.device.addressType = "assigned"
+ # nic.device.key = 13016
+ nic.device.deviceInfo = vim.Description()
+ nic.device.deviceInfo.label = vnic_label
+ nic.device.deviceInfo.summary = network_name
+ nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
+
+ nic.device.backing.network = self.get_obj(
+ content, [vim.Network], network_name
+ )
+ nic.device.backing.deviceName = network_name
+ nic.device.backing.useAutoDetect = False
+ nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+ nic.device.connectable.startConnected = True
+ nic.device.connectable.allowGuestControl = True
+
+ nic.device.sriovBacking = (
+ vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
+ )
+ nic.device.sriovBacking.physicalFunctionBacking = (
+ vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+ )
+ nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
+
+ devices.append(nic)
+ vmconf = vim.vm.ConfigSpec(deviceChange=devices)
+ task = vm_obj.ReconfigVM_Task(vmconf)
+
+ return task
+ except Exception as exp:
+ self.logger.error(
+ "Error {} occurred while adding SRIOV adapter in VM: {}".format(
+ exp, vm_obj
+ )
+ )
+
+ return None
+
+ def create_dvPort_group(self, network_name):
+ """
+ Method to create disributed virtual portgroup
+
+ Args:
+ network_name - name of network/portgroup
+
+ Returns:
+ portgroup key
+ """
+ try:
+ new_network_name = [network_name, "-", str(uuid.uuid4())]
+ network_name = "".join(new_network_name)
+ vcenter_conect, content = self.get_vcenter_content()
+
+ dv_switch = self.get_obj(
+ content, [vim.DistributedVirtualSwitch], self.dvs_name
+ )
+
+ if dv_switch:
+ dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+ dv_pg_spec.name = network_name
+
+ dv_pg_spec.type = (
+ vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
+ )
+ dv_pg_spec.defaultPortConfig = (
+ vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+ )
+ dv_pg_spec.defaultPortConfig.securityPolicy = (
+ vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
+ )
+ dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = (
+ vim.BoolPolicy(value=False)
+ )
+ dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = (
+ vim.BoolPolicy(value=False)
+ )
+ dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(
+ value=False
+ )
+
+ task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
+ self.wait_for_vcenter_task(task, vcenter_conect)
+
+ dvPort_group = self.get_obj(
+ content, [vim.dvs.DistributedVirtualPortgroup], network_name
+ )
+
+ if dvPort_group:
+ self.logger.info(
+ "Created disributed virtaul port group: {}".format(dvPort_group)
+ )
+ return dvPort_group.key
+ else:
+ self.logger.debug(
+ "No disributed virtual switch found with name {}".format(
+ network_name
+ )
+ )
+
+ except Exception as exp:
+ self.logger.error(
+ "Error occurred while creating disributed virtaul port group {}"
+ " : {}".format(network_name, exp)
+ )
+
+ return None
+
def reconfig_portgroup(self, content, dvPort_group_name, config_info={}):
"""
Method to reconfigure disributed virtual portgroup
return None
+ def destroy_dvport_group(self, dvPort_group_name):
+ """
+ Method to destroy disributed virtual portgroup
+
+ Args:
+ network_name - name of network/portgroup
+
+ Returns:
+ True if portgroup successfully got deleted else false
+ """
+ vcenter_conect, _ = self.get_vcenter_content()
+
+ try:
+ status = None
+ dvPort_group = self.get_dvport_group(dvPort_group_name)
+
+ if dvPort_group:
+ task = dvPort_group.Destroy_Task()
+ status = self.wait_for_vcenter_task(task, vcenter_conect)
+
+ return status
+ except vmodl.MethodFault as exp:
+ self.logger.error(
+ "Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
+ exp, dvPort_group_name
+ )
+ )
+
+ return None
+
def get_dvport_group(self, dvPort_group_name):
"""
Method to get disributed virtual portgroup
return vlanId
+ def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
+ """
+ Method to configure vlanID in disributed virtual portgroup vlanID
+
+ Args:
+ network_name - name of network/portgroup
+
+ Returns:
+ None
+ """
+ vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
+
+ if vlanID == 0:
+ # configure vlanID
+ vlanID = self.genrate_vlanID(dvPort_group_name)
+ config = {"vlanID": vlanID}
+ task = self.reconfig_portgroup(
+ content, dvPort_group_name, config_info=config
+ )
+
+ if task:
+ status = self.wait_for_vcenter_task(task, vcenter_conect)
+
+ if status:
+ self.logger.info(
+ "Reconfigured Port group {} for vlan ID {}".format(
+ dvPort_group_name, vlanID
+ )
+ )
+ else:
+ self.logger.error(
+ "Fail reconfigure portgroup {} for vlanID{}".format(
+ dvPort_group_name, vlanID
+ )
+ )
+
+ def genrate_vlanID(self, network_name):
+ """
+ Method to get unused vlanID
+ Args:
+ network_name - name of network/portgroup
+ Returns:
+ vlanID
+ """
+ vlan_id = None
+ used_ids = []
+
+ if self.config.get("vlanID_range") is None:
+ raise vimconn.VimConnConflictException(
+ "You must provide a 'vlanID_range' "
+ "at config value before creating sriov network with vlan tag"
+ )
+
+ if "used_vlanIDs" not in self.persistent_info:
+ self.persistent_info["used_vlanIDs"] = {}
+ else:
+ used_ids = list(self.persistent_info["used_vlanIDs"].values())
+
+ for vlanID_range in self.config.get("vlanID_range"):
+ start_vlanid, end_vlanid = vlanID_range.split("-")
+
+ if start_vlanid > end_vlanid:
+ raise vimconn.VimConnConflictException(
+ "Invalid vlan ID range {}".format(vlanID_range)
+ )
+
+ for vid in range(int(start_vlanid), int(end_vlanid) + 1):
+ if vid not in used_ids:
+ vlan_id = vid
+ self.persistent_info["used_vlanIDs"][network_name] = vlan_id
+ return vlan_id
+
+ if vlan_id is None:
+ raise vimconn.VimConnConflictException("All Vlan IDs are in use")
+
+ def get_obj(self, content, vimtype, name):
+ """
+ Get the vsphere object associated with a given text name
+ """
+ obj = None
+ container = content.viewManager.CreateContainerView(
+ content.rootFolder, vimtype, True
+ )
+
+ for item in container.view:
+ if item.name == name:
+ obj = item
+ break
+
+ return obj
+
def insert_media_to_vm(self, vapp, image_id):
"""
Method to insert media CD-ROM (ISO image) from catalog to vm.
"""
raise VimConnNotImplemented("Should have implemented this")
+ def new_classification(self, name, ctype, definition):
+ """Creates a traffic classification in the VIM
+ Params:
+ 'name': name of this classification
+ 'ctype': type of this classification
+ 'definition': definition of this classification (type-dependent free-form text)
+ Returns the VIM's classification ID on success or raises an exception on failure
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def get_classification(self, classification_id):
+ """Obtain classification details of the VIM's classification with ID='classification_id'
+ Return a dict that contains:
+ 'id': VIM's classification ID (same as classification_id)
+ 'name': VIM's classification name
+ 'type': type of this classification
+ 'definition': definition of the classification
+ 'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+ 'error_msg': (optional) text that explains the ERROR status
+ other VIM specific fields: (optional) whenever possible
+ Raises an exception upon error or when classification is not found
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def get_classification_list(self, filter_dict={}):
+ """Obtain classifications from the VIM
+ Params:
+ 'filter_dict' (optional): contains the entries to filter the classifications on and only return those that
+ match ALL:
+ id: string => returns classifications with this VIM's classification ID, which implies a return of one
+ classification at most
+ name: string => returns only classifications with this name
+ type: string => returns classifications of this type
+ definition: string => returns classifications that have this definition
+ tenant_id: string => returns only classifications that belong to this tenant/project
+ Returns a list of classification dictionaries, each dictionary contains:
+ 'id': (mandatory) VIM's classification ID
+ 'name': (mandatory) VIM's classification name
+ 'type': type of this classification
+ 'definition': definition of the classification
+ other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+ List can be empty if no classification matches the filter_dict. Raise an exception only upon VIM connectivity,
+ authorization, or some other unspecific error
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def refresh_classifications_status(self, classification_list):
+ """Get the status of the classifications
+ Params: the list of classification identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this classifier
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ raise VimConnNotImplemented("Should have implemented this")
+
+ def delete_classification(self, classification_id):
+ """Deletes a classification from the VIM
+ Returns the classification ID (classification_id) or raises an exception upon error or when classification is
+ not found
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
+ """Creates a service function instance in the VIM
+ Params:
+ 'name': name of this service function instance
+ 'ingress_ports': set of ingress ports (VIM's port IDs)
+ 'egress_ports': set of egress ports (VIM's port IDs)
+ 'sfc_encap': boolean stating whether this specific instance supports IETF SFC Encapsulation
+ Returns the VIM's service function instance ID on success or raises an exception on failure
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def get_sfi(self, sfi_id):
+ """Obtain service function instance details of the VIM's service function instance with ID='sfi_id'
+ Return a dict that contains:
+ 'id': VIM's sfi ID (same as sfi_id)
+ 'name': VIM's sfi name
+ 'ingress_ports': set of ingress ports (VIM's port IDs)
+ 'egress_ports': set of egress ports (VIM's port IDs)
+ 'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+ 'error_msg': (optional) text that explains the ERROR status
+ other VIM specific fields: (optional) whenever possible
+ Raises an exception upon error or when service function instance is not found
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def get_sfi_list(self, filter_dict={}):
+ """Obtain service function instances from the VIM
+ Params:
+ 'filter_dict' (optional): contains the entries to filter the sfis on and only return those that match ALL:
+ id: string => returns sfis with this VIM's sfi ID, which implies a return of one sfi at most
+ name: string => returns only service function instances with this name
+ tenant_id: string => returns only service function instances that belong to this tenant/project
+ Returns a list of service function instance dictionaries, each dictionary contains:
+ 'id': (mandatory) VIM's sfi ID
+ 'name': (mandatory) VIM's sfi name
+ 'ingress_ports': set of ingress ports (VIM's port IDs)
+ 'egress_ports': set of egress ports (VIM's port IDs)
+ other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+ List can be empty if no sfi matches the filter_dict. Raise an exception only upon VIM connectivity,
+ authorization, or some other unspecific error
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def delete_sfi(self, sfi_id):
+ """Deletes a service function instance from the VIM
+ Returns the service function instance ID (sfi_id) or raises an exception upon error or when sfi is not found
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def refresh_sfis_status(self, sfi_list):
+ """Get the status of the service function instances
+ Params: the list of sfi identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function instance
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ raise VimConnNotImplemented("Should have implemented this")
+
+ def new_sf(self, name, sfis, sfc_encap=True):
+ """Creates (an abstract) service function in the VIM
+ Params:
+ 'name': name of this service function
+ 'sfis': set of service function instances of this (abstract) service function
+ 'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
+ Returns the VIM's service function ID on success or raises an exception on failure
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def get_sf(self, sf_id):
+ """Obtain service function details of the VIM's service function with ID='sf_id'
+ Return a dict that contains:
+ 'id': VIM's sf ID (same as sf_id)
+ 'name': VIM's sf name
+ 'sfis': VIM's sf's set of VIM's service function instance IDs
+ 'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
+ 'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+ 'error_msg': (optional) text that explains the ERROR status
+ other VIM specific fields: (optional) whenever possible
+ Raises an exception upon error or when sf is not found
+ """
+
+ def get_sf_list(self, filter_dict={}):
+ """Obtain service functions from the VIM
+ Params:
+ 'filter_dict' (optional): contains the entries to filter the sfs on and only return those that match ALL:
+ id: string => returns sfs with this VIM's sf ID, which implies a return of one sf at most
+ name: string => returns only service functions with this name
+ tenant_id: string => returns only service functions that belong to this tenant/project
+ Returns a list of service function dictionaries, each dictionary contains:
+ 'id': (mandatory) VIM's sf ID
+ 'name': (mandatory) VIM's sf name
+ 'sfis': VIM's sf's set of VIM's service function instance IDs
+ 'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
+ other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+ List can be empty if no sf matches the filter_dict. Raise an exception only upon VIM connectivity,
+ authorization, or some other unspecific error
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def delete_sf(self, sf_id):
+ """Deletes (an abstract) service function from the VIM
+ Returns the service function ID (sf_id) or raises an exception upon error or when sf is not found
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def refresh_sfs_status(self, sf_list):
+ """Get the status of the service functions
+ Params: the list of sf identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)
+ """
+ raise VimConnNotImplemented("Should have implemented this")
+
+ def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
+ """Creates a service function path
+ Params:
+ 'name': name of this service function path
+ 'classifications': set of traffic classifications that should be matched on to get into this sfp
+ 'sfs': list of every service function that constitutes this path , from first to last
+ 'sfc_encap': whether this is an SFC-Encapsulated chain (i.e using NSH), True by default
+ 'spi': (optional) the Service Function Path identifier (SPI: Service Path Identifier) for this path
+ Returns the VIM's sfp ID on success or raises an exception on failure
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def get_sfp(self, sfp_id):
+ """Obtain service function path details of the VIM's sfp with ID='sfp_id'
+ Return a dict that contains:
+ 'id': VIM's sfp ID (same as sfp_id)
+ 'name': VIM's sfp name
+ 'classifications': VIM's sfp's list of VIM's classification IDs
+ 'sfs': VIM's sfp's list of VIM's service function IDs
+ 'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+ 'error_msg': (optional) text that explains the ERROR status
+ other VIM specific fields: (optional) whenever possible
+ Raises an exception upon error or when sfp is not found
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def get_sfp_list(self, filter_dict={}):
+ """Obtain service function paths from VIM
+ Params:
+ 'filter_dict' (optional): contains the entries to filter the sfps on, and only return those that match ALL:
+ id: string => returns sfps with this VIM's sfp ID , which implies a return of one sfp at most
+ name: string => returns only sfps with this name
+ tenant_id: string => returns only sfps that belong to this tenant/project
+ Returns a list of service function path dictionaries, each dictionary contains:
+ 'id': (mandatory) VIM's sfp ID
+ 'name': (mandatory) VIM's sfp name
+ 'classifications': VIM's sfp's list of VIM's classification IDs
+ 'sfs': VIM's sfp's list of VIM's service function IDs
+ other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+ List can be empty if no sfp matches the filter_dict. Raise an exception only upon VIM connectivity,
+ authorization, or some other unspecific error
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
+ def refresh_sfps_status(self, sfp_list):
+ """Get the status of the service function path
+ Params: the list of sfp identifiers
+ Returns a dictionary with:
+ vm_id: #VIM id of this service function path
+ status: #Mandatory. Text with one of:
+ # DELETED (not found at vim)
+ # VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+ # OTHER (Vim reported other status not understood)
+ # ERROR (VIM indicates an ERROR status)
+ # ACTIVE,
+ # CREATING (on building process)
+ error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR
+ vim_info: #Text with plain information obtained from vim (yaml.safe_dump)F
+ """
+ raise VimConnNotImplemented("Should have implemented this")
+
+ def delete_sfp(self, sfp_id):
+ """Deletes a service function path from the VIM
+ Returns the sfp ID (sfp_id) or raises an exception upon error or when sf is not found
+ """
+ raise VimConnNotImplemented("SFC support not implemented")
+
def migrate_instance(self, vm_id, compute_host=None):
"""Migrate a vdu
Params:
flavor_id: flavor_id to resize the vdu to
"""
raise VimConnNotImplemented("Should have implemented this")
+
+ # NOT USED METHODS in current version. Deprecated
+ @deprecated
+ def host_vim2gui(self, host, server_dict):
+ """Transform host dictionary from VIM format to GUI format,
+ and append to the server_dict
+ """
+ raise VimConnNotImplemented("Should have implemented this")
+
+ @deprecated
+ def get_hosts_info(self):
+ """Get the information of deployed hosts
+ Returns the hosts content"""
+ raise VimConnNotImplemented("Should have implemented this")
+
+ @deprecated
+ def get_hosts(self, vim_tenant):
+ """Get the hosts and deployed instances
+ Returns the hosts content"""
+ raise VimConnNotImplemented("Should have implemented this")
+
+ @deprecated
+ def get_processor_rankings(self):
+ """Get the processor rankings in the VIM database"""
+ raise VimConnNotImplemented("Should have implemented this")
+
+ @deprecated
+ def new_host(self, host_data):
+ """Adds a new host to VIM"""
+ """Returns status code of the VIM response"""
+ raise VimConnNotImplemented("Should have implemented this")
+
+ @deprecated
+ def new_external_port(self, port_data):
+ """Adds a external port to VIM"""
+ """Returns the port identifier"""
+ raise VimConnNotImplemented("Should have implemented this")
+
+ @deprecated
+ def new_external_network(self, net_name, net_type):
+ """Adds a external network to VIM (shared)"""
+ """Returns the network identifier"""
+ raise VimConnNotImplemented("Should have implemented this")
+
+ @deprecated
+ def connect_port_network(self, port_id, network_id, admin=False):
+ """Connects a external port to a network"""
+ """Returns status code of the VIM response"""
+ raise VimConnNotImplemented("Should have implemented this")
+
+ @deprecated
+ def new_vminstancefromJSON(self, vm_data):
+ """Adds a VM instance to VIM"""
+ """Returns the instance identifier"""
+ raise VimConnNotImplemented("Should have implemented this")
---
other:
- |
- Removing unused methods from RO module to get rid of unmaintained code.
+ Revert change 12910 "Removing unused methods from RO module to get rid of unmaintained code."
--- /dev/null
+#######################################################################################
+# Copyright ETSI Contributors and Others.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#######################################################################################
+---
+prelude: >
+ Replace this text with content to appear at the top of the section for this
+ release. All of the prelude content is merged together and then rendered
+ separately from the items listed in other parts of the file, so the text
+ needs to be worded so that both the prelude and the other items make sense
+ when read independently. This may mean repeating some details. Not every
+ release note requires a prelude. Usually only notes describing major
+ features or adding release theme details should have a prelude.
+features:
+ - |
+ List new features here, or remove this section. All of the list items in
+ this section are combined when the release notes are rendered, so the text
+ needs to be worded so that it does not depend on any information only
+ available in another section, such as the prelude. This may mean repeating
+ some details.
+issues:
+ - |
+ List known issues here, or remove this section. All of the list items in
+ this section are combined when the release notes are rendered, so the text
+ needs to be worded so that it does not depend on any information only
+ available in another section, such as the prelude. This may mean repeating
+ some details.
+upgrade:
+ - |
+ List upgrade notes here, or remove this section. All of the list items in
+ this section are combined when the release notes are rendered, so the text
+ needs to be worded so that it does not depend on any information only
+ available in another section, such as the prelude. This may mean repeating
+ some details.
+deprecations:
+ - |
+ List deprecations notes here, or remove this section. All of the list
+ items in this section are combined when the release notes are rendered, so
+ the text needs to be worded so that it does not depend on any information
+ only available in another section, such as the prelude. This may mean
+ repeating some details.
+critical:
+ - |
+ Add critical notes here, or remove this section. All of the list items in
+ this section are combined when the release notes are rendered, so the text
+ needs to be worded so that it does not depend on any information only
+ available in another section, such as the prelude. This may mean repeating
+ some details.
+security:
+ - |
+ Add security notes here, or remove this section. All of the list items in
+ this section are combined when the release notes are rendered, so the text
+ needs to be worded so that it does not depend on any information only
+ available in another section, such as the prelude. This may mean repeating
+ some details.
+fixes:
+ - |
+ Add normal bug fixes here, or remove this section. All of the list items
+ in this section are combined when the release notes are rendered, so the
+ text needs to be worded so that it does not depend on any information only
+ available in another section, such as the prelude. This may mean repeating
+ some details.
+other:
+ - |
+ Add other notes here, or remove this section. All of the list items in
+ this section are combined when the release notes are rendered, so the text
+ needs to be worded so that it does not depend on any information only
+ available in another section, such as the prelude. This may mean repeating
+ some details.