From: sousaedu Date: Wed, 17 Feb 2021 14:05:18 +0000 (+0100) Subject: Reformatting RO X-Git-Tag: branch-sol006v331-start~14 X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2FRO.git;a=commitdiff_plain;h=80135b928ab442c38898750b4751480205b4affc Reformatting RO Change-Id: I86e6a102b5bf2e0221b29096bbb132ca656844c5 Signed-off-by: sousaedu --- diff --git a/NG-RO/osm_ng_ro/__init__.py b/NG-RO/osm_ng_ro/__init__.py index d2ac4c4b..3f156297 100644 --- a/NG-RO/osm_ng_ro/__init__.py +++ b/NG-RO/osm_ng_ro/__init__.py @@ -12,12 +12,13 @@ # under the License. ## -version = '8.0.1.post0' -version_date = '2020-06-29' +version = "8.0.1.post0" +version_date = "2020-06-29" # Obtain installed package version. Ignore if error, e.g. pkg_resources not installed try: from pkg_resources import get_distribution + version = get_distribution("osm_ng_ro").version except Exception: pass diff --git a/NG-RO/osm_ng_ro/html_out.py b/NG-RO/osm_ng_ro/html_out.py index 40594006..132bf68a 100644 --- a/NG-RO/osm_ng_ro/html_out.py +++ b/NG-RO/osm_ng_ro/html_out.py @@ -118,18 +118,37 @@ def format(data, request, response, toke_info): :param response: cherrypy response :return: string with teh html response """ - response.headers["Content-Type"] = 'text/html' + response.headers["Content-Type"] = "text/html" + if response.status == HTTPStatus.UNAUTHORIZED.value: - if response.headers.get("WWW-Authenticate") and request.config.get("auth.allow_basic_authentication"): - response.headers["WWW-Authenticate"] = "Basic" + response.headers["WWW-Authenticate"][6:] + if response.headers.get("WWW-Authenticate") and request.config.get( + "auth.allow_basic_authentication" + ): + response.headers["WWW-Authenticate"] = ( + "Basic" + response.headers["WWW-Authenticate"][6:] + ) + return else: return html_auth2.format(error=data) + if request.path_info in ("/version", "/system"): - return "
" + yaml.safe_dump(data, explicit_start=False, indent=4, default_flow_style=False) + "
" + return ( + "
"
+            + yaml.safe_dump(
+                data, explicit_start=False, indent=4, default_flow_style=False
+            )
+            + "
" + ) + body = html_body.format(item=request.path_info) + if response.status and response.status > 202: - body += html_body_error.format(yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False)) + body += html_body_error.format( + yaml.safe_dump( + data, explicit_start=True, indent=4, default_flow_style=False + ) + ) elif isinstance(data, (list, tuple)): # if request.path_info == "/ns/v1/deploy": # body += html_upload_body.format(request.path_info + "_content", "VNFD") @@ -142,40 +161,61 @@ def format(data, request, response, toke_info): data_id = k.pop("_id", None) elif isinstance(k, str): data_id = k + if request.path_info == "/ns/v1/deploy": - body += '

{id}: {t}

' \ - .format(url=request.path_info, id=data_id, t=html_escape(str(k))) + body += ( + '

{id}: {t}

'.format( + url=request.path_info, id=data_id, t=html_escape(str(k)) + ) + ) else: - body += '

{id}: {t}

'.format(url=request.path_info, id=data_id, - t=html_escape(str(k))) + body += '

{id}: {t}

'.format( + url=request.path_info, id=data_id, t=html_escape(str(k)) + ) elif isinstance(data, dict): if "Location" in response.headers: body += ' show '.format(response.headers["Location"]) else: - body += ' '\ - .format(request.path_info[:request.path_info.rfind("/")]) - if request.path_info.startswith("/nslcm/v1/ns_instances_content/") or \ - request.path_info.startswith("/nslcm/v1/ns_instances/"): - _id = request.path_info[request.path_info.rfind("/")+1:] + body += ( + ' ' + ).format(request.path_info[: request.path_info.rfind("/")]) + + if request.path_info.startswith( + "/nslcm/v1/ns_instances_content/" + ) or request.path_info.startswith("/nslcm/v1/ns_instances/"): + _id = request.path_info[request.path_info.rfind("/") + 1 :] body += html_nslcmop_body.format(id=_id) - elif request.path_info.startswith("/nsilcm/v1/netslice_instances_content/") or \ - request.path_info.startswith("/nsilcm/v1/netslice_instances/"): - _id = request.path_info[request.path_info.rfind("/")+1:] + elif request.path_info.startswith( + "/nsilcm/v1/netslice_instances_content/" + ) or request.path_info.startswith("/nsilcm/v1/netslice_instances/"): + _id = request.path_info[request.path_info.rfind("/") + 1 :] body += html_nsilcmop_body.format(id=_id) - body += "
" + html_escape(yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False)) + \
-                "
" + + body += ( + "
"
+            + html_escape(
+                yaml.safe_dump(
+                    data, explicit_start=True, indent=4, default_flow_style=False
+                )
+            )
+            + "
" + ) elif data is None: if request.method == "DELETE" or "METHOD=DELETE" in request.query_string: body += "
 deleted 
" else: body = html_escape(str(data)) + user_text = " " + if toke_info: if toke_info.get("username"): user_text += "user: {}".format(toke_info.get("username")) + if toke_info.get("project_id"): user_text += ", project: {}".format(toke_info.get("project_name")) + return html_start.format(user_text) + body + html_end # yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False) # tags=False, diff --git a/NG-RO/osm_ng_ro/ns.py b/NG-RO/osm_ng_ro/ns.py index 1c2e960a..660a5f57 100644 --- a/NG-RO/osm_ng_ro/ns.py +++ b/NG-RO/osm_ng_ro/ns.py @@ -16,12 +16,20 @@ # limitations under the License. ## -import logging # import yaml +import logging from traceback import format_exc as traceback_format_exc from osm_ng_ro.ns_thread import NsWorker, NsWorkerException, deep_get from osm_ng_ro.validation import validate_input, deploy_schema -from osm_common import dbmongo, dbmemory, fslocal, fsmongo, msglocal, msgkafka, version as common_version +from osm_common import ( + dbmongo, + dbmemory, + fslocal, + fsmongo, + msglocal, + msgkafka, + version as common_version, +) from osm_common.dbbase import DbException from osm_common.fsbase import FsException from osm_common.msgbase import MsgException @@ -30,7 +38,13 @@ from uuid import uuid4 from threading import Lock from random import choice as random_choice from time import time -from jinja2 import Environment, TemplateError, TemplateNotFound, StrictUndefined, UndefinedError +from jinja2 import ( + Environment, + TemplateError, + TemplateNotFound, + StrictUndefined, + UndefinedError, +) from cryptography.hazmat.primitives import serialization as crypto_serialization from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.backends import default_backend as crypto_default_backend @@ -40,7 +54,6 @@ min_common_version = "0.1.16" class NsException(Exception): - def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST): self.http_code = http_code super(Exception, self).__init__(message) @@ -58,10 +71,12 @@ def get_process_id(): text_id_ = f.readline() _, _, text_id = text_id_.rpartition("/") text_id = text_id.replace("\n", "")[:12] + if text_id: return text_id except Exception: pass + # Return a random id return "".join(random_choice("0123456789abcdef") for _ in range(12)) @@ -69,13 +84,14 @@ def get_process_id(): def versiontuple(v): """utility for compare dot separate versions. Fills with zeros to proper number comparison""" filled = [] + for point in v.split("."): filled.append(point.zfill(8)) + return tuple(filled) class Ns(object): - def __init__(self): self.db = None self.fs = None @@ -105,10 +121,14 @@ class Ns(object): self.config = config self.config["process_id"] = get_process_id() # used for HA identity self.logger = logging.getLogger("ro.ns") + # check right version of common if versiontuple(common_version) < versiontuple(min_common_version): - raise NsException("Not compatible osm/common version '{}'. Needed '{}' or higher".format( - common_version, min_common_version)) + raise NsException( + "Not compatible osm/common version '{}'. Needed '{}' or higher".format( + common_version, min_common_version + ) + ) try: if not self.db: @@ -119,8 +139,12 @@ class Ns(object): self.db = dbmemory.DbMemory() self.db.db_connect(config["database"]) else: - raise NsException("Invalid configuration param '{}' at '[database]':'driver'".format( - config["database"]["driver"])) + raise NsException( + "Invalid configuration param '{}' at '[database]':'driver'".format( + config["database"]["driver"] + ) + ) + if not self.fs: if config["storage"]["driver"] == "local": self.fs = fslocal.FsLocal() @@ -131,8 +155,12 @@ class Ns(object): elif config["storage"]["driver"] is None: pass else: - raise NsException("Invalid configuration param '{}' at '[storage]':'driver'".format( - config["storage"]["driver"])) + raise NsException( + "Invalid configuration param '{}' at '[storage]':'driver'".format( + config["storage"]["driver"] + ) + ) + if not self.msg: if config["message"]["driver"] == "local": self.msg = msglocal.MsgLocal() @@ -141,15 +169,18 @@ class Ns(object): self.msg = msgkafka.MsgKafka() self.msg.connect(config["message"]) else: - raise NsException("Invalid configuration param '{}' at '[message]':'driver'".format( - config["message"]["driver"])) + raise NsException( + "Invalid configuration param '{}' at '[message]':'driver'".format( + config["message"]["driver"] + ) + ) # TODO load workers to deal with exising database tasks self.write_lock = Lock() except (DbException, FsException, MsgException) as e: raise NsException(str(e), http_code=e.http_code) - + def get_assigned_vims(self): return list(self.vims_assigned.keys()) @@ -157,13 +188,17 @@ class Ns(object): try: if self.db: self.db.db_disconnect() + if self.fs: self.fs.fs_disconnect() + if self.msg: self.msg.disconnect() + self.write_lock = None except (DbException, FsException, MsgException) as e: raise NsException(str(e), http_code=e.http_code) + for worker in self.workers: worker.insert_task(("terminate",)) @@ -174,20 +209,34 @@ class Ns(object): return the index of the assigned worker thread. Worker threads are storead at self.workers """ # Look for a thread in idle status - worker_id = next((i for i in range(len(self.workers)) if self.workers[i] and self.workers[i].idle), None) + worker_id = next( + ( + i + for i in range(len(self.workers)) + if self.workers[i] and self.workers[i].idle + ), + None, + ) + if worker_id is not None: # unset idle status to avoid race conditions self.workers[worker_id].idle = False else: worker_id = len(self.workers) + if worker_id < self.config["global"]["server.ns_threads"]: # create a new worker - self.workers.append(NsWorker(worker_id, self.config, self.plugins, self.db)) + self.workers.append( + NsWorker(worker_id, self.config, self.plugins, self.db) + ) self.workers[worker_id].start() else: # reached maximum number of threads, assign VIM to an existing one worker_id = self.next_worker - self.next_worker = (self.next_worker + 1) % self.config["global"]["server.ns_threads"] + self.next_worker = (self.next_worker + 1) % self.config["global"][ + "server.ns_threads" + ] + return worker_id def assign_vim(self, target_id): @@ -230,12 +279,18 @@ class Ns(object): def unload_unused_vims(self): with self.write_lock: vims_to_unload = [] + for target_id in self.vims_assigned: - if not self.db.get_one("ro_tasks", - q_filter={"target_id": target_id, - "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED']}, - fail_on_empty=False): + if not self.db.get_one( + "ro_tasks", + q_filter={ + "target_id": target_id, + "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"], + }, + fail_on_empty=False, + ): vims_to_unload.append(target_id) + for target_id in vims_to_unload: self._unload_vim(target_id) @@ -248,70 +303,86 @@ class Ns(object): vnfd_id, _, other = where.partition(":") _type, _, name = other.partition(":") vnfd = self.db.get_one("vnfds", {"_id": vnfd_id}) + if _type == "file": base_folder = vnfd["_admin"]["storage"] - cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"], name) + cloud_init_file = "{}/{}/cloud_init/{}".format( + base_folder["folder"], base_folder["pkg-dir"], name + ) + if not self.fs: - raise NsException("Cannot read file '{}'. Filesystem not loaded, change configuration at storage.driver" - .format(cloud_init_file)) + raise NsException( + "Cannot read file '{}'. Filesystem not loaded, change configuration at storage.driver".format( + cloud_init_file + ) + ) + with self.fs.file_open(cloud_init_file, "r") as ci_file: cloud_init_content = ci_file.read() elif _type == "vdu": cloud_init_content = vnfd["vdu"][int(name)]["cloud-init"] else: raise NsException("Mismatch descriptor for cloud init: {}".format(where)) + return cloud_init_content def _parse_jinja2(self, cloud_init_content, params, context): - try: env = Environment(undefined=StrictUndefined) template = env.from_string(cloud_init_content) + return template.render(params or {}) except UndefinedError as e: raise NsException( "Variable '{}' defined at vnfd='{}' must be provided in the instantiation parameters" - "inside the 'additionalParamsForVnf' block".format(e, context)) + "inside the 'additionalParamsForVnf' block".format(e, context) + ) except (TemplateError, TemplateNotFound) as e: - raise NsException("Error parsing Jinja2 to cloud-init content at vnfd='{}': {}".format(context, e)) + raise NsException( + "Error parsing Jinja2 to cloud-init content at vnfd='{}': {}".format( + context, e + ) + ) def _create_db_ro_nsrs(self, nsr_id, now): try: key = rsa.generate_private_key( - backend=crypto_default_backend(), - public_exponent=65537, - key_size=2048 + backend=crypto_default_backend(), public_exponent=65537, key_size=2048 ) private_key = key.private_bytes( crypto_serialization.Encoding.PEM, crypto_serialization.PrivateFormat.PKCS8, - crypto_serialization.NoEncryption()) + crypto_serialization.NoEncryption(), + ) public_key = key.public_key().public_bytes( crypto_serialization.Encoding.OpenSSH, - crypto_serialization.PublicFormat.OpenSSH + crypto_serialization.PublicFormat.OpenSSH, ) - private_key = private_key.decode('utf8') + private_key = private_key.decode("utf8") # Change first line because Paramiko needs a explicit start with 'BEGIN RSA PRIVATE KEY' i = private_key.find("\n") private_key = "-----BEGIN RSA PRIVATE KEY-----" + private_key[i:] - public_key = public_key.decode('utf8') + public_key = public_key.decode("utf8") except Exception as e: raise NsException("Cannot create ssh-keys: {}".format(e)) schema_version = "1.1" - private_key_encrypted = self.db.encrypt(private_key, schema_version=schema_version, salt=nsr_id) + private_key_encrypted = self.db.encrypt( + private_key, schema_version=schema_version, salt=nsr_id + ) db_content = { "_id": nsr_id, "_admin": { "created": now, "modified": now, - "schema_version": schema_version + "schema_version": schema_version, }, "public_key": public_key, "private_key": private_key_encrypted, - "actions": [] + "actions": [], } self.db.create("ro_nsrs", db_content) + return db_content def deploy(self, session, indata, version, nsr_id, *args, **kwargs): @@ -320,14 +391,15 @@ class Ns(object): action_id = indata.get("action_id", str(uuid4())) task_index = 0 # get current deployment - db_nsr_update = {} # update operation on nsrs + db_nsr_update = {} # update operation on nsrs db_vnfrs_update = {} - db_vnfrs = {} # vnf's info indexed by _id + db_vnfrs = {} # vnf's info indexed by _id nb_ro_tasks = 0 # for logging vdu2cloud_init = indata.get("cloud_init_content") or {} - step = '' + step = "" logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id) self.logger.debug(logging_text + "Enter") + try: step = "Getting ns and vnfr record from db" db_nsr = self.db.get_one("nsrs", {"_id": nsr_id}) @@ -336,29 +408,47 @@ class Ns(object): # read from db: vnf's of this ns step = "Getting vnfrs from db" db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id}) + if not db_vnfrs_list: raise NsException("Cannot obtain associated VNF for ns") + for vnfr in db_vnfrs_list: db_vnfrs[vnfr["_id"]] = vnfr db_vnfrs_update[vnfr["_id"]] = {} + now = time() db_ro_nsr = self.db.get_one("ro_nsrs", {"_id": nsr_id}, fail_on_empty=False) + if not db_ro_nsr: db_ro_nsr = self._create_db_ro_nsrs(nsr_id, now) + ro_nsr_public_key = db_ro_nsr["public_key"] # check that action_id is not in the list of actions. Suffixed with :index if action_id in db_ro_nsr["actions"]: index = 1 + while True: new_action_id = "{}:{}".format(action_id, index) + if new_action_id not in db_ro_nsr["actions"]: action_id = new_action_id - self.logger.debug(logging_text + "Changing action_id in use to {}".format(action_id)) + self.logger.debug( + logging_text + + "Changing action_id in use to {}".format(action_id) + ) break + index += 1 - def _create_task(target_id, item, action, target_record, target_record_id, extra_dict=None): + def _create_task( + target_id, + item, + action, + target_record, + target_record_id, + extra_dict=None, + ): nonlocal task_index nonlocal action_id nonlocal nsr_id @@ -374,9 +464,12 @@ class Ns(object): "target_record": target_record, "target_record_id": target_record_id, } + if extra_dict: - task.update(extra_dict) # params, find_params, depends_on + task.update(extra_dict) # params, find_params, depends_on + task_index += 1 + return task def _create_ro_task(target_id, task): @@ -404,20 +497,28 @@ class Ns(object): "to_check_at": now, "tasks": [task], } + return db_ro_task def _process_image_params(target_image, vim_info, target_record_id): find_params = {} + if target_image.get("image"): find_params["filter_dict"] = {"name": target_image.get("image")} + if target_image.get("vim_image_id"): - find_params["filter_dict"] = {"id": target_image.get("vim_image_id")} + find_params["filter_dict"] = { + "id": target_image.get("vim_image_id") + } + if target_image.get("image_checksum"): - find_params["filter_dict"] = {"checksum": target_image.get("image_checksum")} + find_params["filter_dict"] = { + "checksum": target_image.get("image_checksum") + } + return {"find_params": find_params} def _process_flavor_params(target_flavor, vim_info, target_record_id): - def _get_resource_allocation_params(quota_descriptor): """ read the quota_descriptor from vnfd and fetch the resource allocation properties from the @@ -426,12 +527,16 @@ class Ns(object): :return: quota params for limit, reserve, shares from the descriptor object """ quota = {} + if quota_descriptor.get("limit"): quota["limit"] = int(quota_descriptor["limit"]) + if quota_descriptor.get("reserve"): quota["reserve"] = int(quota_descriptor["reserve"]) + if quota_descriptor.get("shares"): quota["shares"] = int(quota_descriptor["shares"]) + return quota flavor_data = { @@ -441,59 +546,121 @@ class Ns(object): } numa = {} extended = {} + if target_flavor.get("guest-epa"): extended = {} epa_vcpu_set = False + if target_flavor["guest-epa"].get("numa-node-policy"): - numa_node_policy = target_flavor["guest-epa"].get("numa-node-policy") + numa_node_policy = target_flavor["guest-epa"].get( + "numa-node-policy" + ) + if numa_node_policy.get("node"): numa_node = numa_node_policy["node"][0] + if numa_node.get("num-cores"): numa["cores"] = numa_node["num-cores"] epa_vcpu_set = True + if numa_node.get("paired-threads"): - if numa_node["paired-threads"].get("num-paired-threads"): - numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"]) + if numa_node["paired-threads"].get( + "num-paired-threads" + ): + numa["paired-threads"] = int( + numa_node["paired-threads"][ + "num-paired-threads" + ] + ) epa_vcpu_set = True - if len(numa_node["paired-threads"].get("paired-thread-ids")): + + if len( + numa_node["paired-threads"].get("paired-thread-ids") + ): numa["paired-threads-id"] = [] - for pair in numa_node["paired-threads"]["paired-thread-ids"]: + + for pair in numa_node["paired-threads"][ + "paired-thread-ids" + ]: numa["paired-threads-id"].append( - (str(pair["thread-a"]), str(pair["thread-b"])) + ( + str(pair["thread-a"]), + str(pair["thread-b"]), + ) ) + if numa_node.get("num-threads"): numa["threads"] = int(numa_node["num-threads"]) epa_vcpu_set = True + if numa_node.get("memory-mb"): - numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1) + numa["memory"] = max( + int(numa_node["memory-mb"] / 1024), 1 + ) + if target_flavor["guest-epa"].get("mempage-size"): - extended["mempage-size"] = target_flavor["guest-epa"].get("mempage-size") - if target_flavor["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set: - if target_flavor["guest-epa"]["cpu-pinning-policy"] == "DEDICATED": - if target_flavor["guest-epa"].get("cpu-thread-pinning-policy") and \ - target_flavor["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER": + extended["mempage-size"] = target_flavor["guest-epa"].get( + "mempage-size" + ) + + if ( + target_flavor["guest-epa"].get("cpu-pinning-policy") + and not epa_vcpu_set + ): + if ( + target_flavor["guest-epa"]["cpu-pinning-policy"] + == "DEDICATED" + ): + if ( + target_flavor["guest-epa"].get( + "cpu-thread-pinning-policy" + ) + and target_flavor["guest-epa"][ + "cpu-thread-pinning-policy" + ] + != "PREFER" + ): numa["cores"] = max(flavor_data["vcpus"], 1) else: numa["threads"] = max(flavor_data["vcpus"], 1) + epa_vcpu_set = True + if target_flavor["guest-epa"].get("cpu-quota") and not epa_vcpu_set: - cpuquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("cpu-quota")) + cpuquota = _get_resource_allocation_params( + target_flavor["guest-epa"].get("cpu-quota") + ) + if cpuquota: extended["cpu-quota"] = cpuquota + if target_flavor["guest-epa"].get("mem-quota"): - vduquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("mem-quota")) + vduquota = _get_resource_allocation_params( + target_flavor["guest-epa"].get("mem-quota") + ) + if vduquota: extended["mem-quota"] = vduquota + if target_flavor["guest-epa"].get("disk-io-quota"): - diskioquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("disk-io-quota")) + diskioquota = _get_resource_allocation_params( + target_flavor["guest-epa"].get("disk-io-quota") + ) + if diskioquota: extended["disk-io-quota"] = diskioquota + if target_flavor["guest-epa"].get("vif-quota"): - vifquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("vif-quota")) + vifquota = _get_resource_allocation_params( + target_flavor["guest-epa"].get("vif-quota") + ) + if vifquota: extended["vif-quota"] = vifquota + if numa: extended["numas"] = [numa] + if extended: flavor_data["extended"] = extended @@ -501,25 +668,38 @@ class Ns(object): flavor_data_name = flavor_data.copy() flavor_data_name["name"] = target_flavor["name"] extra_dict["params"] = {"flavor_data": flavor_data_name} + return extra_dict def _ip_profile_2_ro(ip_profile): if not ip_profile: return None + ro_ip_profile = { - "ip_version": "IPv4" if "v4" in ip_profile.get("ip-version", "ipv4") else "IPv6", + "ip_version": "IPv4" + if "v4" in ip_profile.get("ip-version", "ipv4") + else "IPv6", "subnet_address": ip_profile.get("subnet-address"), "gateway_address": ip_profile.get("gateway-address"), "dhcp_enabled": ip_profile["dhcp-params"].get("enabled", True) - if "dhcp_params" in ip_profile else False, + if "dhcp_params" in ip_profile + else False, "dhcp_start_address": ip_profile["dhcp-params"].get("start-address") - if "dhcp_params" in ip_profile else None, - "dhcp_count": ip_profile["dhcp-params"].get("count") if "dhcp_params" in ip_profile else None, + if "dhcp_params" in ip_profile + else None, + "dhcp_count": ip_profile["dhcp-params"].get("count") + if "dhcp_params" in ip_profile + else None, } + if ip_profile.get("dns-server"): - ro_ip_profile["dns_address"] = ";".join([v["address"] for v in ip_profile["dns-server"]]) - if ip_profile.get('security-group'): - ro_ip_profile["security_group"] = ip_profile['security-group'] + ro_ip_profile["dns_address"] = ";".join( + [v["address"] for v in ip_profile["dns-server"]] + ) + + if ip_profile.get("security-group"): + ro_ip_profile["security_group"] = ip_profile["security-group"] + return ro_ip_profile def _process_net_params(target_vld, vim_info, target_record_id): @@ -529,31 +709,50 @@ class Ns(object): if vim_info.get("sdn"): # vnf_preffix = "vnfrs:{}".format(vnfr_id) # ns_preffix = "nsrs:{}".format(nsr_id) - vld_target_record_id, _, _ = target_record_id.rpartition(".") # remove the ending ".sdn - extra_dict["params"] = {k: vim_info[k] for k in ("sdn-ports", "target_vim", "vlds", "type") - if vim_info.get(k)} + # remove the ending ".sdn + vld_target_record_id, _, _ = target_record_id.rpartition(".") + extra_dict["params"] = { + k: vim_info[k] + for k in ("sdn-ports", "target_vim", "vlds", "type") + if vim_info.get(k) + } + # TODO needed to add target_id in the dependency. if vim_info.get("target_vim"): - extra_dict["depends_on"] = [vim_info.get("target_vim") + " " + vld_target_record_id] + extra_dict["depends_on"] = [ + vim_info.get("target_vim") + " " + vld_target_record_id + ] + return extra_dict if vim_info.get("vim_network_name"): - extra_dict["find_params"] = {"filter_dict": {"name": vim_info.get("vim_network_name")}} + extra_dict["find_params"] = { + "filter_dict": {"name": vim_info.get("vim_network_name")} + } elif vim_info.get("vim_network_id"): - extra_dict["find_params"] = {"filter_dict": {"id": vim_info.get("vim_network_id")}} + extra_dict["find_params"] = { + "filter_dict": {"id": vim_info.get("vim_network_id")} + } elif target_vld.get("mgmt-network"): extra_dict["find_params"] = {"mgmt": True, "name": target_vld["id"]} else: # create extra_dict["params"] = { - "net_name": "{}-{}".format(indata["name"][:16], target_vld.get("name", target_vld["id"])[:16]), - "ip_profile": _ip_profile_2_ro(vim_info.get('ip_profile')), - "provider_network_profile": vim_info.get('provider_network'), + "net_name": "{}-{}".format( + indata["name"][:16], + target_vld.get("name", target_vld["id"])[:16], + ), + "ip_profile": _ip_profile_2_ro(vim_info.get("ip_profile")), + "provider_network_profile": vim_info.get("provider_network"), } + if not target_vld.get("underlay"): extra_dict["params"]["net_type"] = "bridge" else: - extra_dict["params"]["net_type"] = "ptp" if target_vld.get("type") == "ELINE" else "data" + extra_dict["params"]["net_type"] = ( + "ptp" if target_vld.get("type") == "ELINE" else "data" + ) + return extra_dict def _process_vdu_params(target_vdu, vim_info, target_record_id): @@ -563,71 +762,119 @@ class Ns(object): nonlocal vnfr nonlocal vdu2cloud_init nonlocal tasks_by_target_record_id + vnf_preffix = "vnfrs:{}".format(vnfr_id) ns_preffix = "nsrs:{}".format(nsr_id) image_text = ns_preffix + ":image." + target_vdu["ns-image-id"] flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"] extra_dict = {"depends_on": [image_text, flavor_text]} net_list = [] + for iface_index, interface in enumerate(target_vdu["interfaces"]): if interface.get("ns-vld-id"): net_text = ns_preffix + ":vld." + interface["ns-vld-id"] elif interface.get("vnf-vld-id"): net_text = vnf_preffix + ":vld." + interface["vnf-vld-id"] else: - self.logger.error("Interface {} from vdu {} not connected to any vld".format( - iface_index, target_vdu["vdu-name"])) - continue # interface not connected to any vld + self.logger.error( + "Interface {} from vdu {} not connected to any vld".format( + iface_index, target_vdu["vdu-name"] + ) + ) + + continue # interface not connected to any vld + extra_dict["depends_on"].append(net_text) - net_item = {x: v for x, v in interface.items() if x in - ("name", "vpci", "port_security", "port_security_disable_strategy", "floating_ip")} + net_item = { + x: v + for x, v in interface.items() + if x + in ( + "name", + "vpci", + "port_security", + "port_security_disable_strategy", + "floating_ip", + ) + } net_item["net_id"] = "TASK-" + net_text net_item["type"] = "virtual" + # TODO mac_address: used for SR-IOV ifaces #TODO for other types # TODO floating_ip: True/False (or it can be None) if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"): # mark the net create task as type data - if deep_get(tasks_by_target_record_id, net_text, "params", "net_type"): - tasks_by_target_record_id[net_text]["params"]["net_type"] = "data" + if deep_get( + tasks_by_target_record_id, net_text, "params", "net_type" + ): + tasks_by_target_record_id[net_text]["params"][ + "net_type" + ] = "data" + net_item["use"] = "data" net_item["model"] = interface["type"] net_item["type"] = interface["type"] - elif interface.get("type") == "OM-MGMT" or interface.get("mgmt-interface") or \ - interface.get("mgmt-vnf"): + elif ( + interface.get("type") == "OM-MGMT" + or interface.get("mgmt-interface") + or interface.get("mgmt-vnf") + ): net_item["use"] = "mgmt" - else: # if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"): + else: + # if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"): net_item["use"] = "bridge" net_item["model"] = interface.get("type") + if interface.get("ip-address"): net_item["ip_address"] = interface["ip-address"] + if interface.get("mac-address"): net_item["mac_address"] = interface["mac-address"] + net_list.append(net_item) + if interface.get("mgmt-vnf"): extra_dict["mgmt_vnf_interface"] = iface_index elif interface.get("mgmt-interface"): extra_dict["mgmt_vdu_interface"] = iface_index + # cloud config cloud_config = {} + if target_vdu.get("cloud-init"): if target_vdu["cloud-init"] not in vdu2cloud_init: - vdu2cloud_init[target_vdu["cloud-init"]] = self._get_cloud_init(target_vdu["cloud-init"]) + vdu2cloud_init[target_vdu["cloud-init"]] = self._get_cloud_init( + target_vdu["cloud-init"] + ) + cloud_content_ = vdu2cloud_init[target_vdu["cloud-init"]] - cloud_config["user-data"] = self._parse_jinja2(cloud_content_, target_vdu.get("additionalParams"), - target_vdu["cloud-init"]) + cloud_config["user-data"] = self._parse_jinja2( + cloud_content_, + target_vdu.get("additionalParams"), + target_vdu["cloud-init"], + ) + if target_vdu.get("boot-data-drive"): cloud_config["boot-data-drive"] = target_vdu.get("boot-data-drive") + ssh_keys = [] + if target_vdu.get("ssh-keys"): ssh_keys += target_vdu.get("ssh-keys") + if target_vdu.get("ssh-access-required"): ssh_keys.append(ro_nsr_public_key) + if ssh_keys: cloud_config["key-pairs"] = ssh_keys extra_dict["params"] = { - "name": "{}-{}-{}-{}".format(indata["name"][:16], vnfr["member-vnf-index-ref"][:16], - target_vdu["vdu-name"][:32], target_vdu.get("count-index") or 0), + "name": "{}-{}-{}-{}".format( + indata["name"][:16], + vnfr["member-vnf-index-ref"][:16], + target_vdu["vdu-name"][:32], + target_vdu.get("count-index") or 0, + ), "description": target_vdu["vdu-name"], "start": True, "image_id": "TASK-" + image_text, @@ -638,9 +885,18 @@ class Ns(object): "availability_zone_index": None, # TODO "availability_zone_list": None, # TODO } + return extra_dict - def _process_items(target_list, existing_list, db_record, db_update, db_path, item, process_params): + def _process_items( + target_list, + existing_list, + db_record, + db_update, + db_path, + item, + process_params, + ): nonlocal db_new_tasks nonlocal tasks_by_target_record_id nonlocal task_index @@ -652,27 +908,45 @@ class Ns(object): # step 1 items (networks,vdus,...) to be deleted/updated for item_index, existing_item in enumerate(existing_list): - target_item = next((t for t in target_list if t["id"] == existing_item["id"]), None) - for target_vim, existing_viminfo in existing_item.get("vim_info", {}).items(): + target_item = next( + (t for t in target_list if t["id"] == existing_item["id"]), None + ) + + for target_vim, existing_viminfo in existing_item.get( + "vim_info", {} + ).items(): if existing_viminfo is None: continue + if target_item: - target_viminfo = target_item.get("vim_info", {}).get(target_vim) + target_viminfo = target_item.get("vim_info", {}).get( + target_vim + ) else: target_viminfo = None + if target_viminfo is None: # must be deleted self._assign_vim(target_vim) - target_record_id = "{}.{}".format(db_record, existing_item["id"]) + target_record_id = "{}.{}".format( + db_record, existing_item["id"] + ) item_ = item + if target_vim.startswith("sdn"): # item must be sdn-net instead of net if target_vim is a sdn item_ = "sdn_net" target_record_id += ".sdn" + task = _create_task( - target_vim, item_, "DELETE", - target_record="{}.{}.vim_info.{}".format(db_record, item_index, target_vim), - target_record_id=target_record_id) + target_vim, + item_, + "DELETE", + target_record="{}.{}.vim_info.{}".format( + db_record, item_index, target_vim + ), + target_record_id=target_record_id, + ) tasks_by_target_record_id[target_record_id] = task db_new_tasks.append(task) # TODO delete @@ -681,6 +955,7 @@ class Ns(object): # step 2 items (networks,vdus,...) to be created for target_item in target_list: item_index = -1 + for item_index, existing_item in enumerate(existing_list): if existing_item["id"] == target_item["id"]: break @@ -690,10 +965,16 @@ class Ns(object): existing_list.append(target_item) existing_item = None - for target_vim, target_viminfo in target_item.get("vim_info", {}).items(): + for target_vim, target_viminfo in target_item.get( + "vim_info", {} + ).items(): existing_viminfo = None + if existing_item: - existing_viminfo = existing_item.get("vim_info", {}).get(target_vim) + existing_viminfo = existing_item.get("vim_info", {}).get( + target_vim + ) + # TODO check if different. Delete and create??? # TODO delete if not exist if existing_viminfo is not None: @@ -701,20 +982,29 @@ class Ns(object): target_record_id = "{}.{}".format(db_record, target_item["id"]) item_ = item + if target_vim.startswith("sdn"): # item must be sdn-net instead of net if target_vim is a sdn item_ = "sdn_net" target_record_id += ".sdn" - extra_dict = process_params(target_item, target_viminfo, target_record_id) + extra_dict = process_params( + target_item, target_viminfo, target_record_id + ) self._assign_vim(target_vim) task = _create_task( - target_vim, item_, "CREATE", - target_record="{}.{}.vim_info.{}".format(db_record, item_index, target_vim), + target_vim, + item_, + "CREATE", + target_record="{}.{}.vim_info.{}".format( + db_record, item_index, target_vim + ), target_record_id=target_record_id, - extra_dict=extra_dict) + extra_dict=extra_dict, + ) tasks_by_target_record_id[target_record_id] = task db_new_tasks.append(task) + if target_item.get("common_id"): task["common_id"] = target_item["common_id"] @@ -730,20 +1020,41 @@ class Ns(object): key = indata["action"].get("key") user = indata["action"].get("user") password = indata["action"].get("password") + for vnf in indata.get("vnf", ()): if vnf["_id"] not in db_vnfrs: raise NsException("Invalid vnf={}".format(vnf["_id"])) + db_vnfr = db_vnfrs[vnf["_id"]] + for target_vdu in vnf.get("vdur", ()): - vdu_index, vdur = next((i_v for i_v in enumerate(db_vnfr["vdur"]) if - i_v[1]["id"] == target_vdu["id"]), (None, None)) + vdu_index, vdur = next( + ( + i_v + for i_v in enumerate(db_vnfr["vdur"]) + if i_v[1]["id"] == target_vdu["id"] + ), + (None, None), + ) + if not vdur: - raise NsException("Invalid vdu vnf={}.{}".format(vnf["_id"], target_vdu["id"])) - target_vim, vim_info = next(k_v for k_v in vdur["vim_info"].items()) + raise NsException( + "Invalid vdu vnf={}.{}".format( + vnf["_id"], target_vdu["id"] + ) + ) + + target_vim, vim_info = next( + k_v for k_v in vdur["vim_info"].items() + ) self._assign_vim(target_vim) - target_record = "vnfrs:{}:vdur.{}.ssh_keys".format(vnf["_id"], vdu_index) + target_record = "vnfrs:{}:vdur.{}.ssh_keys".format( + vnf["_id"], vdu_index + ) extra_dict = { - "depends_on": ["vnfrs:{}:vdur.{}".format(vnf["_id"], vdur["id"])], + "depends_on": [ + "vnfrs:{}:vdur.{}".format(vnf["_id"], vdur["id"]) + ], "params": { "ip_address": vdur.get("ip-address"), "user": user, @@ -751,13 +1062,19 @@ class Ns(object): "password": password, "private_key": db_ro_nsr["private_key"], "salt": db_ro_nsr["_id"], - "schema_version": db_ro_nsr["_admin"]["schema_version"] - } + "schema_version": db_ro_nsr["_admin"][ + "schema_version" + ], + }, } - task = _create_task(target_vim, "vdu", "EXEC", - target_record=target_record, - target_record_id=None, - extra_dict=extra_dict) + task = _create_task( + target_vim, + "vdu", + "EXEC", + target_record=target_record, + target_record_id=None, + extra_dict=extra_dict, + ) db_new_tasks.append(task) with self.write_lock: @@ -767,88 +1084,152 @@ class Ns(object): # compute network differences # NS.vld step = "process NS VLDs" - _process_items(target_list=indata["ns"]["vld"] or [], existing_list=db_nsr.get("vld") or [], - db_record="nsrs:{}:vld".format(nsr_id), db_update=db_nsr_update, - db_path="vld", item="net", process_params=_process_net_params) + _process_items( + target_list=indata["ns"]["vld"] or [], + existing_list=db_nsr.get("vld") or [], + db_record="nsrs:{}:vld".format(nsr_id), + db_update=db_nsr_update, + db_path="vld", + item="net", + process_params=_process_net_params, + ) step = "process NS images" - _process_items(target_list=indata.get("image") or [], existing_list=db_nsr.get("image") or [], - db_record="nsrs:{}:image".format(nsr_id), - db_update=db_nsr_update, db_path="image", item="image", - process_params=_process_image_params) + _process_items( + target_list=indata.get("image") or [], + existing_list=db_nsr.get("image") or [], + db_record="nsrs:{}:image".format(nsr_id), + db_update=db_nsr_update, + db_path="image", + item="image", + process_params=_process_image_params, + ) step = "process NS flavors" - _process_items(target_list=indata.get("flavor") or [], existing_list=db_nsr.get("flavor") or [], - db_record="nsrs:{}:flavor".format(nsr_id), - db_update=db_nsr_update, db_path="flavor", item="flavor", - process_params=_process_flavor_params) + _process_items( + target_list=indata.get("flavor") or [], + existing_list=db_nsr.get("flavor") or [], + db_record="nsrs:{}:flavor".format(nsr_id), + db_update=db_nsr_update, + db_path="flavor", + item="flavor", + process_params=_process_flavor_params, + ) # VNF.vld for vnfr_id, vnfr in db_vnfrs.items(): # vnfr_id need to be set as global variable for among others nested method _process_vdu_params step = "process VNF={} VLDs".format(vnfr_id) - target_vnf = next((vnf for vnf in indata.get("vnf", ()) if vnf["_id"] == vnfr_id), None) + target_vnf = next( + ( + vnf + for vnf in indata.get("vnf", ()) + if vnf["_id"] == vnfr_id + ), + None, + ) target_list = target_vnf.get("vld") if target_vnf else None - _process_items(target_list=target_list or [], existing_list=vnfr.get("vld") or [], - db_record="vnfrs:{}:vld".format(vnfr_id), db_update=db_vnfrs_update[vnfr["_id"]], - db_path="vld", item="net", process_params=_process_net_params) + _process_items( + target_list=target_list or [], + existing_list=vnfr.get("vld") or [], + db_record="vnfrs:{}:vld".format(vnfr_id), + db_update=db_vnfrs_update[vnfr["_id"]], + db_path="vld", + item="net", + process_params=_process_net_params, + ) target_list = target_vnf.get("vdur") if target_vnf else None step = "process VNF={} VDUs".format(vnfr_id) - _process_items(target_list=target_list or [], existing_list=vnfr.get("vdur") or [], - db_record="vnfrs:{}:vdur".format(vnfr_id), - db_update=db_vnfrs_update[vnfr["_id"]], db_path="vdur", item="vdu", - process_params=_process_vdu_params) + _process_items( + target_list=target_list or [], + existing_list=vnfr.get("vdur") or [], + db_record="vnfrs:{}:vdur".format(vnfr_id), + db_update=db_vnfrs_update[vnfr["_id"]], + db_path="vdur", + item="vdu", + process_params=_process_vdu_params, + ) for db_task in db_new_tasks: step = "Updating database, Appending tasks to ro_tasks" target_id = db_task.pop("target_id") common_id = db_task.get("common_id") + if common_id: - if self.db.set_one("ro_tasks", - q_filter={"target_id": target_id, - "tasks.common_id": common_id}, - update_dict={"to_check_at": now, "modified_at": now}, - push={"tasks": db_task}, fail_on_empty=False): + if self.db.set_one( + "ro_tasks", + q_filter={ + "target_id": target_id, + "tasks.common_id": common_id, + }, + update_dict={"to_check_at": now, "modified_at": now}, + push={"tasks": db_task}, + fail_on_empty=False, + ): continue - if not self.db.set_one("ro_tasks", - q_filter={"target_id": target_id, - "tasks.target_record": db_task["target_record"]}, - update_dict={"to_check_at": now, "modified_at": now}, - push={"tasks": db_task}, fail_on_empty=False): + + if not self.db.set_one( + "ro_tasks", + q_filter={ + "target_id": target_id, + "tasks.target_record": db_task["target_record"], + }, + update_dict={"to_check_at": now, "modified_at": now}, + push={"tasks": db_task}, + fail_on_empty=False, + ): # Create a ro_task step = "Updating database, Creating ro_tasks" db_ro_task = _create_ro_task(target_id, db_task) nb_ro_tasks += 1 self.db.create("ro_tasks", db_ro_task) + step = "Updating database, nsrs" if db_nsr_update: self.db.set_one("nsrs", {"_id": nsr_id}, db_nsr_update) + for vnfr_id, db_vnfr_update in db_vnfrs_update.items(): if db_vnfr_update: step = "Updating database, vnfrs={}".format(vnfr_id) self.db.set_one("vnfrs", {"_id": vnfr_id}, db_vnfr_update) - self.logger.debug(logging_text + "Exit. Created {} ro_tasks; {} tasks".format(nb_ro_tasks, - len(db_new_tasks))) - return {"status": "ok", "nsr_id": nsr_id, "action_id": action_id}, action_id, True + self.logger.debug( + logging_text + + "Exit. Created {} ro_tasks; {} tasks".format( + nb_ro_tasks, len(db_new_tasks) + ) + ) + return ( + {"status": "ok", "nsr_id": nsr_id, "action_id": action_id}, + action_id, + True, + ) except Exception as e: if isinstance(e, (DbException, NsException)): - self.logger.error(logging_text + "Exit Exception while '{}': {}".format(step, e)) + self.logger.error( + logging_text + "Exit Exception while '{}': {}".format(step, e) + ) else: e = traceback_format_exc() - self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(step, e), exc_info=True) + self.logger.critical( + logging_text + "Exit Exception while '{}': {}".format(step, e), + exc_info=True, + ) + raise NsException(e) def delete(self, session, indata, version, nsr_id, *args, **kwargs): self.logger.debug("ns.delete version={} nsr_id={}".format(version, nsr_id)) # self.db.del_list({"_id": ro_task["_id"], "tasks.nsr_id.ne": nsr_id}) + with self.write_lock: try: NsWorker.delete_db_tasks(self.db, nsr_id, None) except NsWorkerException as e: raise NsException(e) + return None, None, True def status(self, session, indata, version, nsr_id, action_id, *args, **kwargs): @@ -860,47 +1241,64 @@ class Ns(object): ro_tasks = self.db.get_list("ro_tasks", {"tasks.action_id": action_id}) global_status = "DONE" details = [] + for ro_task in ro_tasks: for task in ro_task["tasks"]: if task and task["action_id"] == action_id: task_list.append(task) total += 1 + if task["status"] == "FAILED": global_status = "FAILED" - error_text = "Error at {} {}: {}".format(task["action"].lower(), task["item"], - ro_task["vim_info"].get("vim_details") or "unknown") + error_text = "Error at {} {}: {}".format( + task["action"].lower(), + task["item"], + ro_task["vim_info"].get("vim_details") or "unknown", + ) details.append(error_text) elif task["status"] in ("SCHEDULED", "BUILD"): if global_status != "FAILED": global_status = "BUILD" else: done += 1 + return_data = { "status": global_status, - "details": ". ".join(details) if details else "progress {}/{}".format(done, total), + "details": ". ".join(details) + if details + else "progress {}/{}".format(done, total), "nsr_id": nsr_id, "action_id": action_id, - "tasks": task_list + "tasks": task_list, } + return return_data, None, True def cancel(self, session, indata, version, nsr_id, action_id, *args, **kwargs): - print("ns.cancel session={} indata={} version={} nsr_id={}, action_id={}".format(session, indata, version, - nsr_id, action_id)) + print( + "ns.cancel session={} indata={} version={} nsr_id={}, action_id={}".format( + session, indata, version, nsr_id, action_id + ) + ) + return None, None, True def get_deploy(self, session, indata, version, nsr_id, action_id, *args, **kwargs): nsrs = self.db.get_list("nsrs", {}) return_data = [] + for ns in nsrs: return_data.append({"_id": ns["_id"], "name": ns["name"]}) + return return_data, None, True def get_actions(self, session, indata, version, nsr_id, action_id, *args, **kwargs): ro_tasks = self.db.get_list("ro_tasks", {"tasks.nsr_id": nsr_id}) return_data = [] + for ro_task in ro_tasks: for task in ro_task["tasks"]: if task["action_id"] not in return_data: return_data.append(task["action_id"]) + return return_data, None, True diff --git a/NG-RO/osm_ng_ro/ns_thread.py b/NG-RO/osm_ng_ro/ns_thread.py index f967f832..f15831aa 100644 --- a/NG-RO/osm_ng_ro/ns_thread.py +++ b/NG-RO/osm_ng_ro/ns_thread.py @@ -24,23 +24,25 @@ A single ro_task refers to a VIM element (flavor, image, network, ...). A ro_task can contain several 'tasks', each one with a target, where to store the results """ +import logging +import queue import threading import time -import queue -import logging import yaml +from copy import deepcopy +from http import HTTPStatus +from os import mkdir from pkg_resources import iter_entry_points +from shutil import rmtree +from unittest.mock import Mock + # from osm_common import dbmongo, dbmemory, fslocal, fsmongo, msglocal, msgkafka, version as common_version from osm_common.dbbase import DbException from osm_ro_plugin.vim_dummy import VimDummyConnector from osm_ro_plugin.sdn_dummy import SdnDummyConnector from osm_ro_plugin import vimconn, sdnconn from osm_ng_ro.vim_admin import LockRenew -from copy import deepcopy -from unittest.mock import Mock -from http import HTTPStatus -from os import mkdir -from shutil import rmtree + __author__ = "Alfonso Tierno" __date__ = "$28-Sep-2017 12:07:15$" @@ -69,12 +71,18 @@ class NsWorkerException(Exception): class FailingConnector: def __init__(self, error_msg): self.error_msg = error_msg + for method in dir(vimconn.VimConnector): if method[0] != "_": - setattr(self, method, Mock(side_effect=vimconn.VimConnException(error_msg))) + setattr( + self, method, Mock(side_effect=vimconn.VimConnException(error_msg)) + ) + for method in dir(sdnconn.SdnConnectorBase): if method[0] != "_": - setattr(self, method, Mock(side_effect=sdnconn.SdnConnectorError(error_msg))) + setattr( + self, method, Mock(side_effect=sdnconn.SdnConnectorError(error_msg)) + ) class NsWorkerExceptionNotFound(NsWorkerException): @@ -82,8 +90,9 @@ class NsWorkerExceptionNotFound(NsWorkerException): class VimInteractionBase: - """ Base class to call VIM/SDN for creating, deleting and refresh networks, VMs, flavors, ... + """Base class to call VIM/SDN for creating, deleting and refresh networks, VMs, flavors, ... It implements methods that does nothing and return ok""" + def __init__(self, db, my_vims, db_vims, logger): self.db = db self.logger = logger @@ -97,6 +106,7 @@ class VimInteractionBase: """skip calling VIM to get image, flavor status. Assumes ok""" if ro_task["vim_info"]["vim_status"] == "VIM_ERROR": return "FAILED", {} + return "DONE", {} def delete(self, ro_task, task_index): @@ -108,7 +118,6 @@ class VimInteractionBase: class VimInteractionNet(VimInteractionBase): - def new(self, ro_task, task_index, task_depends): vim_net_id = None task = ro_task["tasks"][task_index] @@ -116,29 +125,56 @@ class VimInteractionNet(VimInteractionBase): created = False created_items = {} target_vim = self.my_vims[ro_task["target_id"]] + try: # FIND if task.get("find_params"): # if management, get configuration of VIM if task["find_params"].get("filter_dict"): vim_filter = task["find_params"]["filter_dict"] - elif task["find_params"].get("mgmt"): # mamagement network - if deep_get(self.db_vims[ro_task["target_id"]], "config", "management_network_id"): - vim_filter = {"id": self.db_vims[ro_task["target_id"]]["config"]["management_network_id"]} - elif deep_get(self.db_vims[ro_task["target_id"]], "config", "management_network_name"): - vim_filter = {"name": self.db_vims[ro_task["target_id"]]["config"]["management_network_name"]} + # mamagement network + elif task["find_params"].get("mgmt"): + if deep_get( + self.db_vims[ro_task["target_id"]], + "config", + "management_network_id", + ): + vim_filter = { + "id": self.db_vims[ro_task["target_id"]]["config"][ + "management_network_id" + ] + } + elif deep_get( + self.db_vims[ro_task["target_id"]], + "config", + "management_network_name", + ): + vim_filter = { + "name": self.db_vims[ro_task["target_id"]]["config"][ + "management_network_name" + ] + } else: vim_filter = {"name": task["find_params"]["name"]} else: - raise NsWorkerExceptionNotFound("Invalid find_params for new_net {}".format(task["find_params"])) + raise NsWorkerExceptionNotFound( + "Invalid find_params for new_net {}".format(task["find_params"]) + ) vim_nets = target_vim.get_network_list(vim_filter) if not vim_nets and not task.get("params"): - raise NsWorkerExceptionNotFound("Network not found with this criteria: '{}'".format( - task.get("find_params"))) + raise NsWorkerExceptionNotFound( + "Network not found with this criteria: '{}'".format( + task.get("find_params") + ) + ) elif len(vim_nets) > 1: raise NsWorkerException( - "More than one network found with this criteria: '{}'".format(task["find_params"])) + "More than one network found with this criteria: '{}'".format( + task["find_params"] + ) + ) + if vim_nets: vim_net_id = vim_nets[0]["id"] else: @@ -147,31 +183,43 @@ class VimInteractionNet(VimInteractionBase): vim_net_id, created_items = target_vim.new_network(**params) created = True - ro_vim_item_update = {"vim_id": vim_net_id, - "vim_status": "BUILD", - "created": created, - "created_items": created_items, - "vim_details": None} + ro_vim_item_update = { + "vim_id": vim_net_id, + "vim_status": "BUILD", + "created": created, + "created_items": created_items, + "vim_details": None, + } self.logger.debug( - "task={} {} new-net={} created={}".format(task_id, ro_task["target_id"], vim_net_id, created)) + "task={} {} new-net={} created={}".format( + task_id, ro_task["target_id"], vim_net_id, created + ) + ) + return "BUILD", ro_vim_item_update except (vimconn.VimConnException, NsWorkerException) as e: - self.logger.error("task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e)) - ro_vim_item_update = {"vim_status": "VIM_ERROR", - "created": created, - "vim_details": str(e)} + self.logger.error( + "task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e) + ) + ro_vim_item_update = { + "vim_status": "VIM_ERROR", + "created": created, + "vim_details": str(e), + } + return "FAILED", ro_vim_item_update def refresh(self, ro_task): """Call VIM to get network status""" ro_task_id = ro_task["_id"] target_vim = self.my_vims[ro_task["target_id"]] - vim_id = ro_task["vim_info"]["vim_id"] net_to_refresh_list = [vim_id] + try: vim_dict = target_vim.refresh_nets_status(net_to_refresh_list) vim_info = vim_dict[vim_id] + if vim_info["status"] == "ACTIVE": task_status = "DONE" elif vim_info["status"] == "BUILD": @@ -180,15 +228,21 @@ class VimInteractionNet(VimInteractionBase): task_status = "FAILED" except vimconn.VimConnException as e: # Mark all tasks at VIM_ERROR status - self.logger.error("ro_task={} vim={} get-net={}: {}".format(ro_task_id, ro_task["target_id"], vim_id, e)) + self.logger.error( + "ro_task={} vim={} get-net={}: {}".format( + ro_task_id, ro_task["target_id"], vim_id, e + ) + ) vim_info = {"status": "VIM_ERROR", "error_msg": str(e)} task_status = "FAILED" ro_vim_item_update = {} if ro_task["vim_info"]["vim_status"] != vim_info["status"]: ro_vim_item_update["vim_status"] = vim_info["status"] + if ro_task["vim_info"]["vim_name"] != vim_info.get("name"): ro_vim_item_update["vim_name"] = vim_info.get("name") + if vim_info["status"] in ("ERROR", "VIM_ERROR"): if ro_task["vim_info"]["vim_details"] != vim_info.get("error_msg"): ro_vim_item_update["vim_details"] = vim_info.get("error_msg") @@ -198,43 +252,69 @@ class VimInteractionNet(VimInteractionBase): else: if ro_task["vim_info"]["vim_details"] != vim_info["vim_info"]: ro_vim_item_update["vim_details"] = vim_info["vim_info"] + if ro_vim_item_update: - self.logger.debug("ro_task={} {} get-net={}: status={} {}".format( - ro_task_id, ro_task["target_id"], vim_id, ro_vim_item_update.get("vim_status"), - ro_vim_item_update.get("vim_details") if ro_vim_item_update.get("vim_status") != "ACTIVE" else '')) + self.logger.debug( + "ro_task={} {} get-net={}: status={} {}".format( + ro_task_id, + ro_task["target_id"], + vim_id, + ro_vim_item_update.get("vim_status"), + ro_vim_item_update.get("vim_details") + if ro_vim_item_update.get("vim_status") != "ACTIVE" + else "", + ) + ) + return task_status, ro_vim_item_update def delete(self, ro_task, task_index): task = ro_task["tasks"][task_index] task_id = task["task_id"] net_vim_id = ro_task["vim_info"]["vim_id"] - ro_vim_item_update_ok = {"vim_status": "DELETED", - "created": False, - "vim_details": "DELETED", - "vim_id": None} + ro_vim_item_update_ok = { + "vim_status": "DELETED", + "created": False, + "vim_details": "DELETED", + "vim_id": None, + } + try: if net_vim_id or ro_task["vim_info"]["created_items"]: target_vim = self.my_vims[ro_task["target_id"]] - target_vim.delete_network(net_vim_id, ro_task["vim_info"]["created_items"]) - + target_vim.delete_network( + net_vim_id, ro_task["vim_info"]["created_items"] + ) except vimconn.VimConnNotFoundException: ro_vim_item_update_ok["vim_details"] = "already deleted" - except vimconn.VimConnException as e: - self.logger.error("ro_task={} vim={} del-net={}: {}".format(ro_task["_id"], ro_task["target_id"], - net_vim_id, e)) - ro_vim_item_update = {"vim_status": "VIM_ERROR", - "vim_details": "Error while deleting: {}".format(e)} + self.logger.error( + "ro_task={} vim={} del-net={}: {}".format( + ro_task["_id"], ro_task["target_id"], net_vim_id, e + ) + ) + ro_vim_item_update = { + "vim_status": "VIM_ERROR", + "vim_details": "Error while deleting: {}".format(e), + } + return "FAILED", ro_vim_item_update - self.logger.debug("task={} {} del-net={} {}".format(task_id, ro_task["target_id"], net_vim_id, - ro_vim_item_update_ok.get("vim_details", ""))) + self.logger.debug( + "task={} {} del-net={} {}".format( + task_id, + ro_task["target_id"], + net_vim_id, + ro_vim_item_update_ok.get("vim_details", ""), + ) + ) + return "DONE", ro_vim_item_update_ok class VimInteractionVdu(VimInteractionBase): - max_retries_inject_ssh_key = 20 # 20 times - time_retries_inject_ssh_key = 30 # wevery 30 seconds + max_retries_inject_ssh_key = 20 # 20 times + time_retries_inject_ssh_key = 30 # wevery 30 seconds def new(self, ro_task, task_index, task_depends): task = ro_task["tasks"][task_index] @@ -242,89 +322,127 @@ class VimInteractionVdu(VimInteractionBase): created = False created_items = {} target_vim = self.my_vims[ro_task["target_id"]] + try: created = True params = task["params"] params_copy = deepcopy(params) net_list = params_copy["net_list"] + for net in net_list: - if "net_id" in net and net["net_id"].startswith("TASK-"): # change task_id into network_id + # change task_id into network_id + if "net_id" in net and net["net_id"].startswith("TASK-"): network_id = task_depends[net["net_id"]] + if not network_id: - raise NsWorkerException("Cannot create VM because depends on a network not created or found " - "for {}".format(net["net_id"])) + raise NsWorkerException( + "Cannot create VM because depends on a network not created or found " + "for {}".format(net["net_id"]) + ) + net["net_id"] = network_id + if params_copy["image_id"].startswith("TASK-"): params_copy["image_id"] = task_depends[params_copy["image_id"]] + if params_copy["flavor_id"].startswith("TASK-"): params_copy["flavor_id"] = task_depends[params_copy["flavor_id"]] vim_vm_id, created_items = target_vim.new_vminstance(**params_copy) interfaces = [iface["vim_id"] for iface in params_copy["net_list"]] - ro_vim_item_update = {"vim_id": vim_vm_id, - "vim_status": "BUILD", - "created": created, - "created_items": created_items, - "vim_details": None, - "interfaces_vim_ids": interfaces, - "interfaces": [], - } + ro_vim_item_update = { + "vim_id": vim_vm_id, + "vim_status": "BUILD", + "created": created, + "created_items": created_items, + "vim_details": None, + "interfaces_vim_ids": interfaces, + "interfaces": [], + } self.logger.debug( - "task={} {} new-vm={} created={}".format(task_id, ro_task["target_id"], vim_vm_id, created)) + "task={} {} new-vm={} created={}".format( + task_id, ro_task["target_id"], vim_vm_id, created + ) + ) + return "BUILD", ro_vim_item_update except (vimconn.VimConnException, NsWorkerException) as e: - self.logger.error("task={} {} new-vm: {}".format(task_id, ro_task["target_id"], e)) - ro_vim_item_update = {"vim_status": "VIM_ERROR", - "created": created, - "vim_details": str(e)} + self.logger.error( + "task={} {} new-vm: {}".format(task_id, ro_task["target_id"], e) + ) + ro_vim_item_update = { + "vim_status": "VIM_ERROR", + "created": created, + "vim_details": str(e), + } + return "FAILED", ro_vim_item_update def delete(self, ro_task, task_index): task = ro_task["tasks"][task_index] task_id = task["task_id"] vm_vim_id = ro_task["vim_info"]["vim_id"] - ro_vim_item_update_ok = {"vim_status": "DELETED", - "created": False, - "vim_details": "DELETED", - "vim_id": None} + ro_vim_item_update_ok = { + "vim_status": "DELETED", + "created": False, + "vim_details": "DELETED", + "vim_id": None, + } + try: if vm_vim_id or ro_task["vim_info"]["created_items"]: target_vim = self.my_vims[ro_task["target_id"]] - target_vim.delete_vminstance(vm_vim_id, ro_task["vim_info"]["created_items"]) - + target_vim.delete_vminstance( + vm_vim_id, ro_task["vim_info"]["created_items"] + ) except vimconn.VimConnNotFoundException: ro_vim_item_update_ok["vim_details"] = "already deleted" - except vimconn.VimConnException as e: - self.logger.error("ro_task={} vim={} del-vm={}: {}".format(ro_task["_id"], ro_task["target_id"], - vm_vim_id, e)) - ro_vim_item_update = {"vim_status": "VIM_ERROR", - "vim_details": "Error while deleting: {}".format(e)} + self.logger.error( + "ro_task={} vim={} del-vm={}: {}".format( + ro_task["_id"], ro_task["target_id"], vm_vim_id, e + ) + ) + ro_vim_item_update = { + "vim_status": "VIM_ERROR", + "vim_details": "Error while deleting: {}".format(e), + } + return "FAILED", ro_vim_item_update - self.logger.debug("task={} {} del-vm={} {}".format(task_id, ro_task["target_id"], vm_vim_id, - ro_vim_item_update_ok.get("vim_details", ""))) + self.logger.debug( + "task={} {} del-vm={} {}".format( + task_id, + ro_task["target_id"], + vm_vim_id, + ro_vim_item_update_ok.get("vim_details", ""), + ) + ) + return "DONE", ro_vim_item_update_ok def refresh(self, ro_task): """Call VIM to get vm status""" ro_task_id = ro_task["_id"] target_vim = self.my_vims[ro_task["target_id"]] - vim_id = ro_task["vim_info"]["vim_id"] + if not vim_id: return None, None + vm_to_refresh_list = [vim_id] try: vim_dict = target_vim.refresh_vms_status(vm_to_refresh_list) vim_info = vim_dict[vim_id] + if vim_info["status"] == "ACTIVE": task_status = "DONE" elif vim_info["status"] == "BUILD": task_status = "BUILD" else: task_status = "FAILED" + # try to load and parse vim_information try: vim_info_info = yaml.safe_load(vim_info["vim_info"]) @@ -334,34 +452,57 @@ class VimInteractionVdu(VimInteractionBase): pass except vimconn.VimConnException as e: # Mark all tasks at VIM_ERROR status - self.logger.error("ro_task={} vim={} get-vm={}: {}".format(ro_task_id, ro_task["target_id"], vim_id, e)) + self.logger.error( + "ro_task={} vim={} get-vm={}: {}".format( + ro_task_id, ro_task["target_id"], vim_id, e + ) + ) vim_info = {"status": "VIM_ERROR", "error_msg": str(e)} task_status = "FAILED" ro_vim_item_update = {} + # Interfaces cannot be present if e.g. VM is not present, that is status=DELETED vim_interfaces = [] if vim_info.get("interfaces"): for vim_iface_id in ro_task["vim_info"]["interfaces_vim_ids"]: - iface = next((iface for iface in vim_info["interfaces"] if vim_iface_id == iface["vim_interface_id"]), - None) + iface = next( + ( + iface + for iface in vim_info["interfaces"] + if vim_iface_id == iface["vim_interface_id"] + ), + None, + ) # if iface: # iface.pop("vim_info", None) vim_interfaces.append(iface) - task_create = next(t for t in ro_task["tasks"] if t and t["action"] == "CREATE" and t["status"] != "FINISHED") + task_create = next( + t + for t in ro_task["tasks"] + if t and t["action"] == "CREATE" and t["status"] != "FINISHED" + ) if vim_interfaces and task_create.get("mgmt_vnf_interface") is not None: - vim_interfaces[task_create["mgmt_vnf_interface"]]["mgmt_vnf_interface"] = True - mgmt_vdu_iface = task_create.get("mgmt_vdu_interface", task_create.get("mgmt_vnf_interface", 0)) + vim_interfaces[task_create["mgmt_vnf_interface"]][ + "mgmt_vnf_interface" + ] = True + + mgmt_vdu_iface = task_create.get( + "mgmt_vdu_interface", task_create.get("mgmt_vnf_interface", 0) + ) if vim_interfaces: vim_interfaces[mgmt_vdu_iface]["mgmt_vdu_interface"] = True if ro_task["vim_info"]["interfaces"] != vim_interfaces: ro_vim_item_update["interfaces"] = vim_interfaces + if ro_task["vim_info"]["vim_status"] != vim_info["status"]: ro_vim_item_update["vim_status"] = vim_info["status"] + if ro_task["vim_info"]["vim_name"] != vim_info.get("name"): ro_vim_item_update["vim_name"] = vim_info.get("name") + if vim_info["status"] in ("ERROR", "VIM_ERROR"): if ro_task["vim_info"]["vim_details"] != vim_info.get("error_msg"): ro_vim_item_update["vim_details"] = vim_info.get("error_msg") @@ -371,10 +512,20 @@ class VimInteractionVdu(VimInteractionBase): else: if ro_task["vim_info"]["vim_details"] != vim_info["vim_info"]: ro_vim_item_update["vim_details"] = vim_info["vim_info"] + if ro_vim_item_update: - self.logger.debug("ro_task={} {} get-vm={}: status={} {}".format( - ro_task_id, ro_task["target_id"], vim_id, ro_vim_item_update.get("vim_status"), - ro_vim_item_update.get("vim_details") if ro_vim_item_update.get("vim_status") != "ACTIVE" else '')) + self.logger.debug( + "ro_task={} {} get-vm={}: status={} {}".format( + ro_task_id, + ro_task["target_id"], + vim_id, + ro_vim_item_update.get("vim_status"), + ro_vim_item_update.get("vim_details") + if ro_vim_item_update.get("vim_status") != "ACTIVE" + else "", + ) + ) + return task_status, ro_vim_item_update def exec(self, ro_task, task_index, task_depends): @@ -383,89 +534,142 @@ class VimInteractionVdu(VimInteractionBase): target_vim = self.my_vims[ro_task["target_id"]] db_task_update = {"retries": 0} retries = task.get("retries", 0) + try: params = task["params"] params_copy = deepcopy(params) - params_copy["ro_key"] = self.db.decrypt(params_copy.pop("private_key"), - params_copy.pop("schema_version"), params_copy.pop("salt")) + params_copy["ro_key"] = self.db.decrypt( + params_copy.pop("private_key"), + params_copy.pop("schema_version"), + params_copy.pop("salt"), + ) params_copy["ip_addr"] = params_copy.pop("ip_address") target_vim.inject_user_key(**params_copy) self.logger.debug( - "task={} {} action-vm=inject_key".format(task_id, ro_task["target_id"])) - return "DONE", None, db_task_update, # params_copy["key"] + "task={} {} action-vm=inject_key".format(task_id, ro_task["target_id"]) + ) + + return ( + "DONE", + None, + db_task_update, + ) # params_copy["key"] except (vimconn.VimConnException, NsWorkerException) as e: retries += 1 + if retries < self.max_retries_inject_ssh_key: - return "BUILD", None, {"retries": retries, "next_retry": self.time_retries_inject_ssh_key} - self.logger.error("task={} {} inject-ssh-key: {}".format(task_id, ro_task["target_id"], e)) + return ( + "BUILD", + None, + { + "retries": retries, + "next_retry": self.time_retries_inject_ssh_key, + }, + ) + + self.logger.error( + "task={} {} inject-ssh-key: {}".format(task_id, ro_task["target_id"], e) + ) ro_vim_item_update = {"vim_details": str(e)} + return "FAILED", ro_vim_item_update, db_task_update class VimInteractionImage(VimInteractionBase): - def new(self, ro_task, task_index, task_depends): task = ro_task["tasks"][task_index] task_id = task["task_id"] created = False created_items = {} target_vim = self.my_vims[ro_task["target_id"]] + try: # FIND if task.get("find_params"): vim_images = target_vim.get_image_list(**task["find_params"]) + if not vim_images: - raise NsWorkerExceptionNotFound("Image not found with this criteria: '{}'".format( - task["find_params"])) + raise NsWorkerExceptionNotFound( + "Image not found with this criteria: '{}'".format( + task["find_params"] + ) + ) elif len(vim_images) > 1: raise NsWorkerException( - "More than one network found with this criteria: '{}'".format(task["find_params"])) + "More than one network found with this criteria: '{}'".format( + task["find_params"] + ) + ) else: vim_image_id = vim_images[0]["id"] - ro_vim_item_update = {"vim_id": vim_image_id, - "vim_status": "DONE", - "created": created, - "created_items": created_items, - "vim_details": None} + ro_vim_item_update = { + "vim_id": vim_image_id, + "vim_status": "DONE", + "created": created, + "created_items": created_items, + "vim_details": None, + } self.logger.debug( - "task={} {} new-image={} created={}".format(task_id, ro_task["target_id"], vim_image_id, created)) + "task={} {} new-image={} created={}".format( + task_id, ro_task["target_id"], vim_image_id, created + ) + ) + return "DONE", ro_vim_item_update except (NsWorkerException, vimconn.VimConnException) as e: - self.logger.error("task={} {} new-image: {}".format(task_id, ro_task["target_id"], e)) - ro_vim_item_update = {"vim_status": "VIM_ERROR", - "created": created, - "vim_details": str(e)} + self.logger.error( + "task={} {} new-image: {}".format(task_id, ro_task["target_id"], e) + ) + ro_vim_item_update = { + "vim_status": "VIM_ERROR", + "created": created, + "vim_details": str(e), + } + return "FAILED", ro_vim_item_update class VimInteractionFlavor(VimInteractionBase): - def delete(self, ro_task, task_index): task = ro_task["tasks"][task_index] task_id = task["task_id"] flavor_vim_id = ro_task["vim_info"]["vim_id"] - ro_vim_item_update_ok = {"vim_status": "DELETED", - "created": False, - "vim_details": "DELETED", - "vim_id": None} + ro_vim_item_update_ok = { + "vim_status": "DELETED", + "created": False, + "vim_details": "DELETED", + "vim_id": None, + } + try: if flavor_vim_id: target_vim = self.my_vims[ro_task["target_id"]] target_vim.delete_flavor(flavor_vim_id) - except vimconn.VimConnNotFoundException: ro_vim_item_update_ok["vim_details"] = "already deleted" - except vimconn.VimConnException as e: - self.logger.error("ro_task={} vim={} del-flavor={}: {}".format( - ro_task["_id"], ro_task["target_id"], flavor_vim_id, e)) - ro_vim_item_update = {"vim_status": "VIM_ERROR", - "vim_details": "Error while deleting: {}".format(e)} + self.logger.error( + "ro_task={} vim={} del-flavor={}: {}".format( + ro_task["_id"], ro_task["target_id"], flavor_vim_id, e + ) + ) + ro_vim_item_update = { + "vim_status": "VIM_ERROR", + "vim_details": "Error while deleting: {}".format(e), + } + return "FAILED", ro_vim_item_update - self.logger.debug("task={} {} del-flavor={} {}".format( - task_id, ro_task["target_id"], flavor_vim_id, ro_vim_item_update_ok.get("vim_details", ""))) + self.logger.debug( + "task={} {} del-flavor={} {}".format( + task_id, + ro_task["target_id"], + flavor_vim_id, + ro_vim_item_update_ok.get("vim_details", ""), + ) + ) + return "DONE", ro_vim_item_update_ok def new(self, ro_task, task_index, task_depends): @@ -474,9 +678,11 @@ class VimInteractionFlavor(VimInteractionBase): created = False created_items = {} target_vim = self.my_vims[ro_task["target_id"]] + try: # FIND vim_flavor_id = None + if task.get("find_params"): try: flavor_data = task["find_params"]["flavor_data"] @@ -490,24 +696,34 @@ class VimInteractionFlavor(VimInteractionBase): vim_flavor_id = target_vim.new_flavor(flavor_data) created = True - ro_vim_item_update = {"vim_id": vim_flavor_id, - "vim_status": "DONE", - "created": created, - "created_items": created_items, - "vim_details": None} + ro_vim_item_update = { + "vim_id": vim_flavor_id, + "vim_status": "DONE", + "created": created, + "created_items": created_items, + "vim_details": None, + } self.logger.debug( - "task={} {} new-flavor={} created={}".format(task_id, ro_task["target_id"], vim_flavor_id, created)) + "task={} {} new-flavor={} created={}".format( + task_id, ro_task["target_id"], vim_flavor_id, created + ) + ) + return "DONE", ro_vim_item_update except (vimconn.VimConnException, NsWorkerException) as e: - self.logger.error("task={} vim={} new-flavor: {}".format(task_id, ro_task["target_id"], e)) - ro_vim_item_update = {"vim_status": "VIM_ERROR", - "created": created, - "vim_details": str(e)} + self.logger.error( + "task={} vim={} new-flavor: {}".format(task_id, ro_task["target_id"], e) + ) + ro_vim_item_update = { + "vim_status": "VIM_ERROR", + "created": created, + "vim_details": str(e), + } + return "FAILED", ro_vim_item_update class VimInteractionSdnNet(VimInteractionBase): - @staticmethod def _match_pci(port_pci, mapping): """ @@ -527,21 +743,34 @@ class VimInteractionSdnNet(VimInteractionBase): pci_index = 0 while True: bracket_start = mapping.find("[", mapping_index) + if bracket_start == -1: break + bracket_end = mapping.find("]", bracket_start) if bracket_end == -1: break + length = bracket_start - mapping_index - if length and port_pci[pci_index:pci_index + length] != mapping[mapping_index:bracket_start]: + if ( + length + and port_pci[pci_index : pci_index + length] + != mapping[mapping_index:bracket_start] + ): return False - if port_pci[pci_index + length] not in mapping[bracket_start+1:bracket_end]: + + if ( + port_pci[pci_index + length] + not in mapping[bracket_start + 1 : bracket_end] + ): return False + pci_index += length + 1 mapping_index = bracket_end + 1 if port_pci[pci_index:] != mapping[mapping_index:]: return False + return True def _get_interfaces(self, vlds_to_connect, vim_account_id): @@ -551,35 +780,49 @@ class VimInteractionSdnNet(VimInteractionBase): :return: """ interfaces = [] + for vld in vlds_to_connect: table, _, db_id = vld.partition(":") db_id, _, vld = db_id.partition(":") _, _, vld_id = vld.partition(".") + if table == "vnfrs": q_filter = {"vim-account-id": vim_account_id, "_id": db_id} iface_key = "vnf-vld-id" else: # table == "nsrs" q_filter = {"vim-account-id": vim_account_id, "nsr-id-ref": db_id} iface_key = "ns-vld-id" + db_vnfrs = self.db.get_list("vnfrs", q_filter=q_filter) + for db_vnfr in db_vnfrs: for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())): for iface_index, interface in enumerate(vdur["interfaces"]): - if interface.get(iface_key) == vld_id and \ - interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"): + if interface.get(iface_key) == vld_id and interface.get( + "type" + ) in ("SR-IOV", "PCI-PASSTHROUGH"): # only SR-IOV o PT interface_ = interface.copy() - interface_["id"] = "vnfrs:{}:vdu.{}.interfaces.{}".format(db_vnfr["_id"], vdu_index, - iface_index) + interface_["id"] = "vnfrs:{}:vdu.{}.interfaces.{}".format( + db_vnfr["_id"], vdu_index, iface_index + ) + if vdur.get("status") == "ERROR": interface_["status"] = "ERROR" + interfaces.append(interface_) + return interfaces def refresh(self, ro_task): # look for task create - task_create_index, _ = next(i_t for i_t in enumerate(ro_task["tasks"]) - if i_t[1] and i_t[1]["action"] == "CREATE" and i_t[1]["status"] != "FINISHED") + task_create_index, _ = next( + i_t + for i_t in enumerate(ro_task["tasks"]) + if i_t[1] + and i_t[1]["action"] == "CREATE" + and i_t[1]["status"] != "FINISHED" + ) return self.new(ro_task, task_create_index, None) @@ -600,17 +843,21 @@ class VimInteractionSdnNet(VimInteractionBase): created = ro_task["vim_info"].get("created", False) try: - # CREATE params = task["params"] vlds_to_connect = params["vlds"] associated_vim = params["target_vim"] - additional_ports = params.get("sdn-ports") or () # external additional ports + # external additional ports + additional_ports = params.get("sdn-ports") or () _, _, vim_account_id = associated_vim.partition(":") + if associated_vim: # get associated VIM if associated_vim not in self.db_vims: - self.db_vims[associated_vim] = self.db.get_one("vim_accounts", {"_id": vim_account_id}) + self.db_vims[associated_vim] = self.db.get_one( + "vim_accounts", {"_id": vim_account_id} + ) + db_vim = self.db_vims[associated_vim] # look for ports to connect @@ -621,8 +868,10 @@ class VimInteractionSdnNet(VimInteractionBase): pending_ports = error_ports = 0 vlan_used = None sdn_need_update = False + for port in ports: vlan_used = port.get("vlan") or vlan_used + # TODO. Do not connect if already done if not port.get("compute_node") or not port.get("pci"): if port.get("status") == "ERROR": @@ -630,33 +879,56 @@ class VimInteractionSdnNet(VimInteractionBase): else: pending_ports += 1 continue + pmap = None - compute_node_mappings = next((c for c in db_vim["config"].get("sdn-port-mapping", ()) - if c and c["compute_node"] == port["compute_node"]), None) + compute_node_mappings = next( + ( + c + for c in db_vim["config"].get("sdn-port-mapping", ()) + if c and c["compute_node"] == port["compute_node"] + ), + None, + ) + if compute_node_mappings: # process port_mapping pci of type 0000:af:1[01].[1357] - pmap = next((p for p in compute_node_mappings["ports"] - if self._match_pci(port["pci"], p.get("pci"))), None) + pmap = next( + ( + p + for p in compute_node_mappings["ports"] + if self._match_pci(port["pci"], p.get("pci")) + ), + None, + ) + if not pmap: if not db_vim["config"].get("mapping_not_needed"): - error_list.append("Port mapping not found for compute_node={} pci={}".format( - port["compute_node"], port["pci"])) + error_list.append( + "Port mapping not found for compute_node={} pci={}".format( + port["compute_node"], port["pci"] + ) + ) continue + pmap = {} service_endpoint_id = "{}:{}".format(port["compute_node"], port["pci"]) new_port = { - "service_endpoint_id": pmap.get("service_endpoint_id") or service_endpoint_id, - "service_endpoint_encapsulation_type": "dot1q" if port["type"] == "SR-IOV" else None, + "service_endpoint_id": pmap.get("service_endpoint_id") + or service_endpoint_id, + "service_endpoint_encapsulation_type": "dot1q" + if port["type"] == "SR-IOV" + else None, "service_endpoint_encapsulation_info": { "vlan": port.get("vlan"), "mac": port.get("mac_address"), - "device_id": pmap.get("device_id") or port["compute_node"], # device_id - "device_interface_id": pmap.get("device_interface_id") or port["pci"], + "device_id": pmap.get("device_id") or port["compute_node"], + "device_interface_id": pmap.get("device_interface_id") + or port["pci"], "switch_dpid": pmap.get("switch_id") or pmap.get("switch_dpid"), "switch_port": pmap.get("switch_port"), "service_mapping_info": pmap.get("service_mapping_info"), - } + }, } # TODO @@ -666,109 +938,179 @@ class VimInteractionSdnNet(VimInteractionBase): sdn_ports.append(new_port) if error_ports: - error_list.append("{} interfaces have not been created as VDU is on ERROR status".format(error_ports)) + error_list.append( + "{} interfaces have not been created as VDU is on ERROR status".format( + error_ports + ) + ) # connect external ports for index, additional_port in enumerate(additional_ports): - additional_port_id = additional_port.get("service_endpoint_id") or "external-{}".format(index) - sdn_ports.append({ - "service_endpoint_id": additional_port_id, - "service_endpoint_encapsulation_type": additional_port.get("service_endpoint_encapsulation_type", - "dot1q"), - "service_endpoint_encapsulation_info": { - "vlan": additional_port.get("vlan") or vlan_used, - "mac": additional_port.get("mac_address"), - "device_id": additional_port.get("device_id"), - "device_interface_id": additional_port.get("device_interface_id"), - "switch_dpid": additional_port.get("switch_dpid") or additional_port.get("switch_id"), - "switch_port": additional_port.get("switch_port"), - "service_mapping_info": additional_port.get("service_mapping_info"), - }}) + additional_port_id = additional_port.get( + "service_endpoint_id" + ) or "external-{}".format(index) + sdn_ports.append( + { + "service_endpoint_id": additional_port_id, + "service_endpoint_encapsulation_type": additional_port.get( + "service_endpoint_encapsulation_type", "dot1q" + ), + "service_endpoint_encapsulation_info": { + "vlan": additional_port.get("vlan") or vlan_used, + "mac": additional_port.get("mac_address"), + "device_id": additional_port.get("device_id"), + "device_interface_id": additional_port.get( + "device_interface_id" + ), + "switch_dpid": additional_port.get("switch_dpid") + or additional_port.get("switch_id"), + "switch_port": additional_port.get("switch_port"), + "service_mapping_info": additional_port.get( + "service_mapping_info" + ), + }, + } + ) new_connected_ports.append(additional_port_id) sdn_info = "" + # if there are more ports to connect or they have been modified, call create/update if error_list: sdn_status = "ERROR" sdn_info = "; ".join(error_list) elif set(connected_ports) != set(new_connected_ports) or sdn_need_update: last_update = time.time() + if not sdn_net_id: if len(sdn_ports) < 2: sdn_status = "ACTIVE" + if not pending_ports: - self.logger.debug("task={} {} new-sdn-net done, less than 2 ports". - format(task_id, ro_task["target_id"])) + self.logger.debug( + "task={} {} new-sdn-net done, less than 2 ports".format( + task_id, ro_task["target_id"] + ) + ) else: net_type = params.get("type") or "ELAN" - sdn_net_id, created_items = target_vim.create_connectivity_service( - net_type, sdn_ports) + ( + sdn_net_id, + created_items, + ) = target_vim.create_connectivity_service(net_type, sdn_ports) created = True - self.logger.debug("task={} {} new-sdn-net={} created={}". - format(task_id, ro_task["target_id"], sdn_net_id, created)) + self.logger.debug( + "task={} {} new-sdn-net={} created={}".format( + task_id, ro_task["target_id"], sdn_net_id, created + ) + ) else: created_items = target_vim.edit_connectivity_service( - sdn_net_id, conn_info=created_items, connection_points=sdn_ports) + sdn_net_id, conn_info=created_items, connection_points=sdn_ports + ) created = True - self.logger.debug("task={} {} update-sdn-net={} created={}". - format(task_id, ro_task["target_id"], sdn_net_id, created)) + self.logger.debug( + "task={} {} update-sdn-net={} created={}".format( + task_id, ro_task["target_id"], sdn_net_id, created + ) + ) + connected_ports = new_connected_ports elif sdn_net_id: - wim_status_dict = target_vim.get_connectivity_service_status(sdn_net_id, conn_info=created_items) + wim_status_dict = target_vim.get_connectivity_service_status( + sdn_net_id, conn_info=created_items + ) sdn_status = wim_status_dict["sdn_status"] + if wim_status_dict.get("sdn_info"): sdn_info = str(wim_status_dict.get("sdn_info")) or "" + if wim_status_dict.get("error_msg"): sdn_info = wim_status_dict.get("error_msg") or "" if pending_ports: if sdn_status != "ERROR": sdn_info = "Waiting for getting interfaces location from VIM. Obtained '{}' of {}".format( - len(ports)-pending_ports, len(ports)) + len(ports) - pending_ports, len(ports) + ) + if sdn_status == "ACTIVE": sdn_status = "BUILD" - ro_vim_item_update = {"vim_id": sdn_net_id, - "vim_status": sdn_status, - "created": created, - "created_items": created_items, - "connected_ports": connected_ports, - "vim_details": sdn_info, - "last_update": last_update} + ro_vim_item_update = { + "vim_id": sdn_net_id, + "vim_status": sdn_status, + "created": created, + "created_items": created_items, + "connected_ports": connected_ports, + "vim_details": sdn_info, + "last_update": last_update, + } + return sdn_status, ro_vim_item_update except Exception as e: - self.logger.error("task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e), - exc_info=not isinstance(e, (sdnconn.SdnConnectorError, vimconn.VimConnException))) - ro_vim_item_update = {"vim_status": "VIM_ERROR", - "created": created, - "vim_details": str(e)} + self.logger.error( + "task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e), + exc_info=not isinstance( + e, (sdnconn.SdnConnectorError, vimconn.VimConnException) + ), + ) + ro_vim_item_update = { + "vim_status": "VIM_ERROR", + "created": created, + "vim_details": str(e), + } + return "FAILED", ro_vim_item_update def delete(self, ro_task, task_index): task = ro_task["tasks"][task_index] task_id = task["task_id"] sdn_vim_id = ro_task["vim_info"].get("vim_id") - ro_vim_item_update_ok = {"vim_status": "DELETED", - "created": False, - "vim_details": "DELETED", - "vim_id": None} + ro_vim_item_update_ok = { + "vim_status": "DELETED", + "created": False, + "vim_details": "DELETED", + "vim_id": None, + } + try: if sdn_vim_id: target_vim = self.my_vims[ro_task["target_id"]] - target_vim.delete_connectivity_service(sdn_vim_id, ro_task["vim_info"].get("created_items")) + target_vim.delete_connectivity_service( + sdn_vim_id, ro_task["vim_info"].get("created_items") + ) except Exception as e: - if isinstance(e, sdnconn.SdnConnectorError) and e.http_code == HTTPStatus.NOT_FOUND.value: + if ( + isinstance(e, sdnconn.SdnConnectorError) + and e.http_code == HTTPStatus.NOT_FOUND.value + ): ro_vim_item_update_ok["vim_details"] = "already deleted" else: - self.logger.error("ro_task={} vim={} del-sdn-net={}: {}".format(ro_task["_id"], ro_task["target_id"], - sdn_vim_id, e), - exc_info=not isinstance(e, (sdnconn.SdnConnectorError, vimconn.VimConnException))) - ro_vim_item_update = {"vim_status": "VIM_ERROR", - "vim_details": "Error while deleting: {}".format(e)} + self.logger.error( + "ro_task={} vim={} del-sdn-net={}: {}".format( + ro_task["_id"], ro_task["target_id"], sdn_vim_id, e + ), + exc_info=not isinstance( + e, (sdnconn.SdnConnectorError, vimconn.VimConnException) + ), + ) + ro_vim_item_update = { + "vim_status": "VIM_ERROR", + "vim_details": "Error while deleting: {}".format(e), + } + return "FAILED", ro_vim_item_update - self.logger.debug("task={} {} del-sdn-net={} {}".format(task_id, ro_task["target_id"], sdn_vim_id, - ro_vim_item_update_ok.get("vim_details", ""))) + self.logger.debug( + "task={} {} del-sdn-net={} {}".format( + task_id, + ro_task["target_id"], + sdn_vim_id, + ro_vim_item_update_ok.get("vim_details", ""), + ) + ) + return "DONE", ro_vim_item_update_ok @@ -793,24 +1135,35 @@ class NsWorker(threading.Thread): self.config = config self.plugins = plugins self.plugin_name = "unknown" - self.logger = logging.getLogger('ro.worker{}'.format(worker_index)) + self.logger = logging.getLogger("ro.worker{}".format(worker_index)) self.worker_index = worker_index self.task_queue = queue.Queue(self.QUEUE_SIZE) - self.my_vims = {} # targetvim: vimplugin class - self.db_vims = {} # targetvim: vim information from database - self.vim_targets = [] # targetvim list + # targetvim: vimplugin class + self.my_vims = {} + # targetvim: vim information from database + self.db_vims = {} + # targetvim list + self.vim_targets = [] self.my_id = config["process_id"] + ":" + str(worker_index) self.db = db self.item2class = { "net": VimInteractionNet(self.db, self.my_vims, self.db_vims, self.logger), "vdu": VimInteractionVdu(self.db, self.my_vims, self.db_vims, self.logger), - "image": VimInteractionImage(self.db, self.my_vims, self.db_vims, self.logger), - "flavor": VimInteractionFlavor(self.db, self.my_vims, self.db_vims, self.logger), - "sdn_net": VimInteractionSdnNet(self.db, self.my_vims, self.db_vims, self.logger), + "image": VimInteractionImage( + self.db, self.my_vims, self.db_vims, self.logger + ), + "flavor": VimInteractionFlavor( + self.db, self.my_vims, self.db_vims, self.logger + ), + "sdn_net": VimInteractionSdnNet( + self.db, self.my_vims, self.db_vims, self.logger + ), } self.time_last_task_processed = None - self.tasks_to_delete = [] # lists of tasks to delete because nsrs or vnfrs has been deleted from db - self.idle = True # it is idle when there are not vim_targets associated + # lists of tasks to delete because nsrs or vnfrs has been deleted from db + self.tasks_to_delete = [] + # it is idle when there are not vim_targets associated + self.idle = True self.task_locked_time = config["global"]["task_locked_time"] def insert_task(self, task): @@ -841,37 +1194,51 @@ class NsWorker(threading.Thread): """ if not db_vim.get("config"): return + file_name = "" + try: if db_vim["config"].get("ca_cert_content"): file_name = "{}:{}".format(target_id, self.worker_index) + try: mkdir(file_name) except FileExistsError: pass + file_name = file_name + "/ca_cert" + with open(file_name, "w") as f: f.write(db_vim["config"]["ca_cert_content"]) del db_vim["config"]["ca_cert_content"] db_vim["config"]["ca_cert"] = file_name except Exception as e: - raise NsWorkerException("Error writing to file '{}': {}".format(file_name, e)) + raise NsWorkerException( + "Error writing to file '{}': {}".format(file_name, e) + ) def _load_plugin(self, name, type="vim"): # type can be vim or sdn if "rovim_dummy" not in self.plugins: self.plugins["rovim_dummy"] = VimDummyConnector + if "rosdn_dummy" not in self.plugins: self.plugins["rosdn_dummy"] = SdnDummyConnector + if name in self.plugins: return self.plugins[name] + try: - for v in iter_entry_points('osm_ro{}.plugins'.format(type), name): + for v in iter_entry_points("osm_ro{}.plugins".format(type), name): self.plugins[name] = v.load() except Exception as e: raise NsWorkerException("Cannot load plugin osm_{}: {}".format(name, e)) + if name and name not in self.plugins: - raise NsWorkerException("Plugin 'osm_{n}' has not been installed".format(n=name)) + raise NsWorkerException( + "Plugin 'osm_{n}' has not been installed".format(n=name) + ) + return self.plugins[name] def _unload_vim(self, target_id): @@ -883,8 +1250,10 @@ class NsWorker(threading.Thread): try: self.db_vims.pop(target_id, None) self.my_vims.pop(target_id, None) + if target_id in self.vim_targets: self.vim_targets.remove(target_id) + self.logger.info("Unloaded {}".format(target_id)) rmtree("{}:{}".format(target_id, self.worker_index)) except FileNotFoundError: @@ -905,42 +1274,66 @@ class NsWorker(threading.Thread): op_text = "" step = "" loaded = target_id in self.vim_targets - target_database = "vim_accounts" if target == "vim" else "wim_accounts" if target == "wim" else "sdns" + target_database = ( + "vim_accounts" + if target == "vim" + else "wim_accounts" + if target == "wim" + else "sdns" + ) + try: step = "Getting {} from db".format(target_id) db_vim = self.db.get_one(target_database, {"_id": _id}) - for op_index, operation in enumerate(db_vim["_admin"].get("operations", ())): + + for op_index, operation in enumerate( + db_vim["_admin"].get("operations", ()) + ): if operation["operationState"] != "PROCESSING": continue + locked_at = operation.get("locked_at") + if locked_at is not None and locked_at >= now - self.task_locked_time: # some other thread is doing this operation return + # lock op_text = "_admin.operations.{}.".format(op_index) - if not self.db.set_one(target_database, - q_filter={"_id": _id, - op_text + "operationState": "PROCESSING", - op_text + "locked_at": locked_at - }, - update_dict={op_text + "locked_at": now, - "admin.current_operation": op_index}, - fail_on_empty=False): + + if not self.db.set_one( + target_database, + q_filter={ + "_id": _id, + op_text + "operationState": "PROCESSING", + op_text + "locked_at": locked_at, + }, + update_dict={ + op_text + "locked_at": now, + "admin.current_operation": op_index, + }, + fail_on_empty=False, + ): return + unset_dict[op_text + "locked_at"] = None unset_dict["current_operation"] = None step = "Loading " + target_id error_text = self._load_vim(target_id) + if not error_text: step = "Checking connectivity" - if target == 'vim': + + if target == "vim": self.my_vims[target_id].check_vim_connectivity() else: self.my_vims[target_id].check_credentials() + update_dict["_admin.operationalState"] = "ENABLED" update_dict["_admin.detailed-status"] = "" unset_dict[op_text + "detailed-status"] = None update_dict[op_text + "operationState"] = "COMPLETED" + return except Exception as e: @@ -955,10 +1348,18 @@ class NsWorker(threading.Thread): unset_dict.pop(op_text + "detailed-status", None) update_dict["_admin.operationalState"] = "ERROR" update_dict["_admin.detailed-status"] = error_text + if op_text: update_dict[op_text + "statusEnteredTime"] = now - self.db.set_one(target_database, q_filter={"_id": _id}, update_dict=update_dict, unset=unset_dict, - fail_on_empty=False) + + self.db.set_one( + target_database, + q_filter={"_id": _id}, + update_dict=update_dict, + unset=unset_dict, + fail_on_empty=False, + ) + if not loaded: self._unload_vim(target_id) @@ -980,9 +1381,16 @@ class NsWorker(threading.Thread): :return: None if ok, descriptive text if error """ target, _, _id = target_id.partition(":") - target_database = "vim_accounts" if target == "vim" else "wim_accounts" if target == "wim" else "sdns" + target_database = ( + "vim_accounts" + if target == "vim" + else "wim_accounts" + if target == "wim" + else "sdns" + ) plugin_name = "" vim = None + try: step = "Getting {}={} from db".format(target, _id) # TODO process for wim, sdnc, ... @@ -994,20 +1402,31 @@ class NsWorker(threading.Thread): step = "Decrypting password" schema_version = vim.get("schema_version") - self.db.encrypt_decrypt_fields(vim, "decrypt", fields=('password', 'secret'), - schema_version=schema_version, salt=_id) + self.db.encrypt_decrypt_fields( + vim, + "decrypt", + fields=("password", "secret"), + schema_version=schema_version, + salt=_id, + ) self._process_vim_config(target_id, vim) + if target == "vim": plugin_name = "rovim_" + vim["vim_type"] step = "Loading plugin '{}'".format(plugin_name) vim_module_conn = self._load_plugin(plugin_name) step = "Loading {}'".format(target_id) self.my_vims[target_id] = vim_module_conn( - uuid=vim['_id'], name=vim['name'], - tenant_id=vim.get('vim_tenant_id'), tenant_name=vim.get('vim_tenant_name'), - url=vim['vim_url'], url_admin=None, - user=vim['vim_user'], passwd=vim['vim_password'], - config=vim.get('config') or {}, persistent_info={} + uuid=vim["_id"], + name=vim["name"], + tenant_id=vim.get("vim_tenant_id"), + tenant_name=vim.get("vim_tenant_name"), + url=vim["vim_url"], + url_admin=None, + user=vim["vim_user"], + passwd=vim["vim_password"], + config=vim.get("config") or {}, + persistent_info={}, ) else: # sdn plugin_name = "rosdn_" + vim["type"] @@ -1018,20 +1437,32 @@ class NsWorker(threading.Thread): wim_config = wim.pop("config", {}) or {} wim["uuid"] = wim["_id"] wim["wim_url"] = wim["url"] + if wim.get("dpid"): wim_config["dpid"] = wim.pop("dpid") + if wim.get("switch_id"): wim_config["switch_id"] = wim.pop("switch_id") - self.my_vims[target_id] = vim_module_conn(wim, wim, wim_config) # wim, wim_account, config + + # wim, wim_account, config + self.my_vims[target_id] = vim_module_conn(wim, wim, wim_config) self.db_vims[target_id] = vim self.error_status = None - self.logger.info("Connector loaded for {}, plugin={}".format(target_id, plugin_name)) + + self.logger.info( + "Connector loaded for {}, plugin={}".format(target_id, plugin_name) + ) except Exception as e: - self.logger.error("Cannot load {} plugin={}: {} {}".format( - target_id, plugin_name, step, e)) + self.logger.error( + "Cannot load {} plugin={}: {} {}".format( + target_id, plugin_name, step, e + ) + ) + self.db_vims[target_id] = vim or {} self.db_vims[target_id] = FailingConnector(str(e)) error_status = "{} Error: {}".format(step, e) + return error_status finally: if target_id not in self.vim_targets: @@ -1043,26 +1474,36 @@ class NsWorker(threading.Thread): :return: None """ now = time.time() + if not self.time_last_task_processed: self.time_last_task_processed = now + try: while True: locked = self.db.set_one( "ro_tasks", - q_filter={"target_id": self.vim_targets, - "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'], - "locked_at.lt": now - self.task_locked_time, - "to_check_at.lt": self.time_last_task_processed}, + q_filter={ + "target_id": self.vim_targets, + "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"], + "locked_at.lt": now - self.task_locked_time, + "to_check_at.lt": self.time_last_task_processed, + }, update_dict={"locked_by": self.my_id, "locked_at": now}, - fail_on_empty=False) + fail_on_empty=False, + ) + if locked: # read and return ro_task = self.db.get_one( "ro_tasks", - q_filter={"target_id": self.vim_targets, - "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'], - "locked_at": now}) + q_filter={ + "target_id": self.vim_targets, + "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"], + "locked_at": now, + }, + ) return ro_task + if self.time_last_task_processed == now: self.time_last_task_processed = None return None @@ -1073,7 +1514,10 @@ class NsWorker(threading.Thread): except DbException as e: self.logger.error("Database exception at _get_db_task: {}".format(e)) except Exception as e: - self.logger.critical("Unexpected exception at _get_db_task: {}".format(e), exc_info=True) + self.logger.critical( + "Unexpected exception at _get_db_task: {}".format(e), exc_info=True + ) + return None def _delete_task(self, ro_task, task_index, task_depends, db_update): @@ -1083,26 +1527,45 @@ class NsWorker(threading.Thread): """ my_task = ro_task["tasks"][task_index] task_id = my_task["task_id"] - needed_delete = ro_task["vim_info"]["created"] or ro_task["vim_info"].get("created_items", False) + needed_delete = ro_task["vim_info"]["created"] or ro_task["vim_info"].get( + "created_items", False + ) + if my_task["status"] == "FAILED": return None, None # TODO need to be retry?? + try: for index, task in enumerate(ro_task["tasks"]): if index == task_index or not task: continue # own task - if my_task["target_record"] == task["target_record"] and task["action"] == "CREATE": + + if ( + my_task["target_record"] == task["target_record"] + and task["action"] == "CREATE" + ): # set to finished - db_update["tasks.{}.status".format(index)] = task["status"] = "FINISHED" - elif task["action"] == "CREATE" and task["status"] not in ("FINISHED", "SUPERSEDED"): + db_update["tasks.{}.status".format(index)] = task[ + "status" + ] = "FINISHED" + elif task["action"] == "CREATE" and task["status"] not in ( + "FINISHED", + "SUPERSEDED", + ): needed_delete = False + if needed_delete: return self.item2class[my_task["item"]].delete(ro_task, task_index) else: return "SUPERSEDED", None except Exception as e: if not isinstance(e, NsWorkerException): - self.logger.critical("Unexpected exception at _delete_task task={}: {}".format(task_id, e), - exc_info=True) + self.logger.critical( + "Unexpected exception at _delete_task task={}: {}".format( + task_id, e + ), + exc_info=True, + ) + return "FAILED", {"vim_status": "VIM_ERROR", "vim_details": str(e)} def _create_task(self, ro_task, task_index, task_depends, db_update): @@ -1113,6 +1576,7 @@ class NsWorker(threading.Thread): my_task = ro_task["tasks"][task_index] task_id = my_task["task_id"] task_status = None + if my_task["status"] == "FAILED": return None, None # TODO need to be retry?? elif my_task["status"] == "SCHEDULED": @@ -1120,19 +1584,29 @@ class NsWorker(threading.Thread): for index, task in enumerate(ro_task["tasks"]): if index == task_index or not task: continue # own task - if task["action"] == "CREATE" and task["status"] not in ("SCHEDULED", "FINISHED", "SUPERSEDED"): + + if task["action"] == "CREATE" and task["status"] not in ( + "SCHEDULED", + "FINISHED", + "SUPERSEDED", + ): return task["status"], "COPY_VIM_INFO" try: task_status, ro_vim_item_update = self.item2class[my_task["item"]].new( - ro_task, task_index, task_depends) + ro_task, task_index, task_depends + ) # TODO update other CREATE tasks except Exception as e: if not isinstance(e, NsWorkerException): - self.logger.error("Error executing task={}: {}".format(task_id, e), exc_info=True) + self.logger.error( + "Error executing task={}: {}".format(task_id, e), exc_info=True + ) + task_status = "FAILED" ro_vim_item_update = {"vim_status": "VIM_ERROR", "vim_details": str(e)} # TODO update ro_vim_item_update + return task_status, ro_vim_item_update else: return None, None @@ -1148,16 +1622,20 @@ class NsWorker(threading.Thread): :param target_id: :return: database ro_task plus index of task """ - if task_id.startswith("vim:") or task_id.startswith("sdn:") or task_id.startswith("wim:"): + if ( + task_id.startswith("vim:") + or task_id.startswith("sdn:") + or task_id.startswith("wim:") + ): target_id, _, task_id = task_id.partition(" ") if task_id.startswith("nsrs:") or task_id.startswith("vnfrs:"): ro_task_dependency = self.db.get_one( "ro_tasks", - q_filter={"target_id": target_id, - "tasks.target_record_id": task_id - }, - fail_on_empty=False) + q_filter={"target_id": target_id, "tasks.target_record_id": task_id}, + fail_on_empty=False, + ) + if ro_task_dependency: for task_index, task in enumerate(ro_task_dependency["tasks"]): if task["target_record_id"] == task_id: @@ -1168,12 +1646,16 @@ class NsWorker(threading.Thread): for task_index, task in enumerate(ro_task["tasks"]): if task and task["task_id"] == task_id: return ro_task, task_index + ro_task_dependency = self.db.get_one( "ro_tasks", - q_filter={"tasks.ANYINDEX.task_id": task_id, - "tasks.ANYINDEX.target_record.ne": None - }, - fail_on_empty=False) + q_filter={ + "tasks.ANYINDEX.task_id": task_id, + "tasks.ANYINDEX.target_record.ne": None, + }, + fail_on_empty=False, + ) + if ro_task_dependency: for task_index, task in ro_task_dependency["tasks"]: if task["task_id"] == task_id: @@ -1183,7 +1665,8 @@ class NsWorker(threading.Thread): def _process_pending_tasks(self, ro_task): ro_task_id = ro_task["_id"] now = time.time() - next_check_at = now + (24*60*60) # one day + # one day + next_check_at = now + (24 * 60 * 60) db_ro_task_update = {} def _update_refresh(new_status): @@ -1194,6 +1677,7 @@ class NsWorker(threading.Thread): nonlocal ro_task next_refresh = time.time() + if task["item"] in ("image", "flavor"): next_refresh += self.REFRESH_IMAGE elif new_status == "BUILD": @@ -1202,6 +1686,7 @@ class NsWorker(threading.Thread): next_refresh += self.REFRESH_ACTIVE else: next_refresh += self.REFRESH_ERROR + next_check_at = min(next_check_at, next_refresh) db_ro_task_update["vim_info.refresh_at"] = next_refresh ro_task["vim_info"]["refresh_at"] = next_refresh @@ -1210,72 +1695,138 @@ class NsWorker(threading.Thread): # 0: get task_status_create lock_object = None task_status_create = None - task_create = next((t for t in ro_task["tasks"] if t and t["action"] == "CREATE" and - t["status"] in ("BUILD", "DONE")), None) + task_create = next( + ( + t + for t in ro_task["tasks"] + if t + and t["action"] == "CREATE" + and t["status"] in ("BUILD", "DONE") + ), + None, + ) + if task_create: task_status_create = task_create["status"] + # 1: look for tasks in status SCHEDULED, or in status CREATE if action is DONE or BUILD for task_action in ("DELETE", "CREATE", "EXEC"): db_vim_update = None new_status = None + for task_index, task in enumerate(ro_task["tasks"]): if not task: continue # task deleted + task_depends = {} target_update = None - if (task_action in ("DELETE", "EXEC") and task["status"] not in ("SCHEDULED", "BUILD")) or \ - task["action"] != task_action or \ - (task_action == "CREATE" and task["status"] in ("FINISHED", "SUPERSEDED")): + + if ( + ( + task_action in ("DELETE", "EXEC") + and task["status"] not in ("SCHEDULED", "BUILD") + ) + or task["action"] != task_action + or ( + task_action == "CREATE" + and task["status"] in ("FINISHED", "SUPERSEDED") + ) + ): continue + task_path = "tasks.{}.status".format(task_index) try: db_vim_info_update = None + if task["status"] == "SCHEDULED": # check if tasks that this depends on have been completed dependency_not_completed = False - for dependency_task_id in (task.get("depends_on") or ()): - dependency_ro_task, dependency_task_index = \ - self._get_dependency(dependency_task_id, target_id=ro_task["target_id"]) - dependency_task = dependency_ro_task["tasks"][dependency_task_index] + + for dependency_task_id in task.get("depends_on") or (): + ( + dependency_ro_task, + dependency_task_index, + ) = self._get_dependency( + dependency_task_id, target_id=ro_task["target_id"] + ) + dependency_task = dependency_ro_task["tasks"][ + dependency_task_index + ] + if dependency_task["status"] == "SCHEDULED": dependency_not_completed = True - next_check_at = min(next_check_at, dependency_ro_task["to_check_at"]) + next_check_at = min( + next_check_at, dependency_ro_task["to_check_at"] + ) break elif dependency_task["status"] == "FAILED": error_text = "Cannot {} {} because depends on failed {} {} id={}): {}".format( - task["action"], task["item"], dependency_task["action"], - dependency_task["item"], dependency_task_id, - dependency_ro_task["vim_info"].get("vim_details")) - self.logger.error("task={} {}".format(task["task_id"], error_text)) + task["action"], + task["item"], + dependency_task["action"], + dependency_task["item"], + dependency_task_id, + dependency_ro_task["vim_info"].get( + "vim_details" + ), + ) + self.logger.error( + "task={} {}".format(task["task_id"], error_text) + ) raise NsWorkerException(error_text) - task_depends[dependency_task_id] = dependency_ro_task["vim_info"]["vim_id"] - task_depends["TASK-{}".format(dependency_task_id)] = \ - dependency_ro_task["vim_info"]["vim_id"] + task_depends[dependency_task_id] = dependency_ro_task[ + "vim_info" + ]["vim_id"] + task_depends[ + "TASK-{}".format(dependency_task_id) + ] = dependency_ro_task["vim_info"]["vim_id"] + if dependency_not_completed: # TODO set at vim_info.vim_details that it is waiting continue + # before calling VIM-plugin as it can take more than task_locked_time, insert to LockRenew # the task of renew this locking. It will update database locket_at periodically if not lock_object: - lock_object = LockRenew.add_lock_object("ro_tasks", ro_task, self) + lock_object = LockRenew.add_lock_object( + "ro_tasks", ro_task, self + ) + if task["action"] == "DELETE": - new_status, db_vim_info_update = self._delete_task(ro_task, task_index, - task_depends, db_ro_task_update) - new_status = "FINISHED" if new_status == "DONE" else new_status + (new_status, db_vim_info_update,) = self._delete_task( + ro_task, task_index, task_depends, db_ro_task_update + ) + new_status = ( + "FINISHED" if new_status == "DONE" else new_status + ) # ^with FINISHED instead of DONE it will not be refreshing + if new_status in ("FINISHED", "SUPERSEDED"): target_update = "DELETE" elif task["action"] == "EXEC": - new_status, db_vim_info_update, db_task_update = self.item2class[task["item"]].exec( - ro_task, task_index, task_depends) - new_status = "FINISHED" if new_status == "DONE" else new_status + ( + new_status, + db_vim_info_update, + db_task_update, + ) = self.item2class[task["item"]].exec( + ro_task, task_index, task_depends + ) + new_status = ( + "FINISHED" if new_status == "DONE" else new_status + ) # ^with FINISHED instead of DONE it will not be refreshing + if db_task_update: # load into database the modified db_task_update "retries" and "next_retry" if db_task_update.get("retries"): - db_ro_task_update["tasks.{}.retries".format(task_index)] = db_task_update["retries"] - next_check_at = time.time() + db_task_update.get("next_retry", 60) + db_ro_task_update[ + "tasks.{}.retries".format(task_index) + ] = db_task_update["retries"] + + next_check_at = time.time() + db_task_update.get( + "next_retry", 60 + ) target_update = None elif task["action"] == "CREATE": if task["status"] == "SCHEDULED": @@ -1283,33 +1834,55 @@ class NsWorker(threading.Thread): new_status = task_status_create target_update = "COPY_VIM_INFO" else: - new_status, db_vim_info_update = \ - self.item2class[task["item"]].new(ro_task, task_index, task_depends) + new_status, db_vim_info_update = self.item2class[ + task["item"] + ].new(ro_task, task_index, task_depends) # self._create_task(ro_task, task_index, task_depends, db_ro_task_update) _update_refresh(new_status) else: - if ro_task["vim_info"]["refresh_at"] and now > ro_task["vim_info"]["refresh_at"]: - new_status, db_vim_info_update = self.item2class[task["item"]].refresh(ro_task) + if ( + ro_task["vim_info"]["refresh_at"] + and now > ro_task["vim_info"]["refresh_at"] + ): + new_status, db_vim_info_update = self.item2class[ + task["item"] + ].refresh(ro_task) _update_refresh(new_status) + except Exception as e: new_status = "FAILED" - db_vim_info_update = {"vim_status": "VIM_ERROR", "vim_details": str(e)} - if not isinstance(e, (NsWorkerException, vimconn.VimConnException)): - self.logger.error("Unexpected exception at _delete_task task={}: {}". - format(task["task_id"], e), exc_info=True) + db_vim_info_update = { + "vim_status": "VIM_ERROR", + "vim_details": str(e), + } + + if not isinstance( + e, (NsWorkerException, vimconn.VimConnException) + ): + self.logger.error( + "Unexpected exception at _delete_task task={}: {}".format( + task["task_id"], e + ), + exc_info=True, + ) try: if db_vim_info_update: db_vim_update = db_vim_info_update.copy() - db_ro_task_update.update({"vim_info." + k: v for k, v in db_vim_info_update.items()}) + db_ro_task_update.update( + { + "vim_info." + k: v + for k, v in db_vim_info_update.items() + } + ) ro_task["vim_info"].update(db_vim_info_update) if new_status: if task_action == "CREATE": task_status_create = new_status db_ro_task_update[task_path] = new_status - if target_update or db_vim_update: + if target_update or db_vim_update: if target_update == "DELETE": self._update_target(task, None) elif target_update == "COPY_VIM_INFO": @@ -1318,21 +1891,39 @@ class NsWorker(threading.Thread): self._update_target(task, db_vim_update) except Exception as e: - if isinstance(e, DbException) and e.http_code == HTTPStatus.NOT_FOUND: + if ( + isinstance(e, DbException) + and e.http_code == HTTPStatus.NOT_FOUND + ): # if the vnfrs or nsrs has been removed from database, this task must be removed - self.logger.debug("marking to delete task={}".format(task["task_id"])) + self.logger.debug( + "marking to delete task={}".format(task["task_id"]) + ) self.tasks_to_delete.append(task) else: - self.logger.error("Unexpected exception at _update_target task={}: {}". - format(task["task_id"], e), exc_info=True) + self.logger.error( + "Unexpected exception at _update_target task={}: {}".format( + task["task_id"], e + ), + exc_info=True, + ) locked_at = ro_task["locked_at"] + if lock_object: - locked_at = [lock_object["locked_at"], lock_object["locked_at"] + self.task_locked_time] + locked_at = [ + lock_object["locked_at"], + lock_object["locked_at"] + self.task_locked_time, + ] # locked_at contains two times to avoid race condition. In case the lock has been renew, it will # contain exactly locked_at + self.task_locked_time LockRenew.remove_lock_object(lock_object) - q_filter = {"_id": ro_task["_id"], "to_check_at": ro_task["to_check_at"], "locked_at": locked_at} + + q_filter = { + "_id": ro_task["_id"], + "to_check_at": ro_task["to_check_at"], + "locked_at": locked_at, + } # modify own task. Try filtering by to_next_check. For race condition if to_check_at has been modified, # outside this task (by ro_nbi) do not update it db_ro_task_update["locked_by"] = None @@ -1340,61 +1931,104 @@ class NsWorker(threading.Thread): db_ro_task_update["locked_at"] = int(now - self.task_locked_time) db_ro_task_update["modified_at"] = now db_ro_task_update["to_check_at"] = next_check_at - if not self.db.set_one("ro_tasks", - update_dict=db_ro_task_update, - q_filter=q_filter, - fail_on_empty=False): + + if not self.db.set_one( + "ro_tasks", + update_dict=db_ro_task_update, + q_filter=q_filter, + fail_on_empty=False, + ): del db_ro_task_update["to_check_at"] del q_filter["to_check_at"] - self.db.set_one("ro_tasks", - q_filter=q_filter, - update_dict=db_ro_task_update, - fail_on_empty=True) + self.db.set_one( + "ro_tasks", + q_filter=q_filter, + update_dict=db_ro_task_update, + fail_on_empty=True, + ) except DbException as e: - self.logger.error("ro_task={} Error updating database {}".format(ro_task_id, e)) + self.logger.error( + "ro_task={} Error updating database {}".format(ro_task_id, e) + ) except Exception as e: - self.logger.error("Error executing ro_task={}: {}".format(ro_task_id, e), exc_info=True) + self.logger.error( + "Error executing ro_task={}: {}".format(ro_task_id, e), exc_info=True + ) def _update_target(self, task, ro_vim_item_update): table, _, temp = task["target_record"].partition(":") _id, _, path_vim_status = temp.partition(":") - path_item = path_vim_status[:path_vim_status.rfind(".")] - path_item = path_item[:path_item.rfind(".")] + path_item = path_vim_status[: path_vim_status.rfind(".")] + path_item = path_item[: path_item.rfind(".")] # path_vim_status: dot separated list targeting vim information, e.g. "vdur.10.vim_info.vim:id" # path_item: dot separated list targeting record information, e.g. "vdur.10" + if ro_vim_item_update: - update_dict = {path_vim_status + "." + k: v for k, v in ro_vim_item_update.items() if k in - ('vim_id', 'vim_details', 'vim_name', 'vim_status', 'interfaces')} + update_dict = { + path_vim_status + "." + k: v + for k, v in ro_vim_item_update.items() + if k + in ("vim_id", "vim_details", "vim_name", "vim_status", "interfaces") + } + if path_vim_status.startswith("vdur."): # for backward compatibility, add vdur.name apart from vdur.vim_name if ro_vim_item_update.get("vim_name"): update_dict[path_item + ".name"] = ro_vim_item_update["vim_name"] + # for backward compatibility, add vdur.vim-id apart from vdur.vim_id if ro_vim_item_update.get("vim_id"): update_dict[path_item + ".vim-id"] = ro_vim_item_update["vim_id"] + # update general status if ro_vim_item_update.get("vim_status"): - update_dict[path_item + ".status"] = ro_vim_item_update["vim_status"] + update_dict[path_item + ".status"] = ro_vim_item_update[ + "vim_status" + ] + if ro_vim_item_update.get("interfaces"): path_interfaces = path_item + ".interfaces" + for i, iface in enumerate(ro_vim_item_update.get("interfaces")): if iface: - update_dict.update({path_interfaces + ".{}.".format(i) + k: v for k, v in iface.items() if - k in ('vlan', 'compute_node', 'pci')}) + update_dict.update( + { + path_interfaces + ".{}.".format(i) + k: v + for k, v in iface.items() + if k in ("vlan", "compute_node", "pci") + } + ) + # put ip_address and mac_address with ip-address and mac-address - if iface.get('ip_address'): - update_dict[path_interfaces + ".{}.".format(i) + "ip-address"] = iface['ip_address'] - if iface.get('mac_address'): - update_dict[path_interfaces + ".{}.".format(i) + "mac-address"] = iface['mac_address'] + if iface.get("ip_address"): + update_dict[ + path_interfaces + ".{}.".format(i) + "ip-address" + ] = iface["ip_address"] + + if iface.get("mac_address"): + update_dict[ + path_interfaces + ".{}.".format(i) + "mac-address" + ] = iface["mac_address"] + if iface.get("mgmt_vnf_interface") and iface.get("ip_address"): - update_dict["ip-address"] = iface.get("ip_address").split(";")[0] + update_dict["ip-address"] = iface.get("ip_address").split( + ";" + )[0] + if iface.get("mgmt_vdu_interface") and iface.get("ip_address"): - update_dict[path_item + ".ip-address"] = iface.get("ip_address").split(";")[0] + update_dict[path_item + ".ip-address"] = iface.get( + "ip_address" + ).split(";")[0] self.db.set_one(table, q_filter={"_id": _id}, update_dict=update_dict) else: update_dict = {path_item + ".status": "DELETED"} - self.db.set_one(table, q_filter={"_id": _id}, update_dict=update_dict, unset={path_vim_status: None}) + self.db.set_one( + table, + q_filter={"_id": _id}, + update_dict=update_dict, + unset={path_vim_status: None}, + ) def _process_delete_db_tasks(self): """ @@ -1405,14 +2039,18 @@ class NsWorker(threading.Thread): task = self.tasks_to_delete[0] vnfrs_deleted = None nsr_id = task["nsr_id"] + if task["target_record"].startswith("vnfrs:"): # check if nsrs is present if self.db.get_one("nsrs", {"_id": nsr_id}, fail_on_empty=False): vnfrs_deleted = task["target_record"].split(":")[1] + try: self.delete_db_tasks(self.db, nsr_id, vnfrs_deleted) except Exception as e: - self.logger.error("Error deleting task={}: {}".format(task["task_id"], e)) + self.logger.error( + "Error deleting task={}: {}".format(task["task_id"], e) + ) self.tasks_to_delete.pop(0) @staticmethod @@ -1429,29 +2067,45 @@ class NsWorker(threading.Thread): ro_tasks = db.get_list("ro_tasks", {"tasks.nsr_id": nsr_id}) now = time.time() conflict = False + for ro_task in ro_tasks: db_update = {} to_delete_ro_task = True + for index, task in enumerate(ro_task["tasks"]): if not task: pass - elif (not vnfrs_deleted and task["nsr_id"] == nsr_id) or \ - (vnfrs_deleted and task["target_record"].startswith("vnfrs:"+vnfrs_deleted)): + elif (not vnfrs_deleted and task["nsr_id"] == nsr_id) or ( + vnfrs_deleted + and task["target_record"].startswith("vnfrs:" + vnfrs_deleted) + ): db_update["tasks.{}".format(index)] = None else: - to_delete_ro_task = False # used by other nsr, ro_task cannot be deleted + # used by other nsr, ro_task cannot be deleted + to_delete_ro_task = False + # delete or update if nobody has changed ro_task meanwhile. Used modified_at for known if changed if to_delete_ro_task: - if not db.del_one("ro_tasks", - q_filter={"_id": ro_task["_id"], "modified_at": ro_task["modified_at"]}, - fail_on_empty=False): + if not db.del_one( + "ro_tasks", + q_filter={ + "_id": ro_task["_id"], + "modified_at": ro_task["modified_at"], + }, + fail_on_empty=False, + ): conflict = True elif db_update: db_update["modified_at"] = now - if not db.set_one("ro_tasks", - q_filter={"_id": ro_task["_id"], "modified_at": ro_task["modified_at"]}, - update_dict=db_update, - fail_on_empty=False): + if not db.set_one( + "ro_tasks", + q_filter={ + "_id": ro_task["_id"], + "modified_at": ro_task["modified_at"], + }, + update_dict=db_update, + fail_on_empty=False, + ): conflict = True if not conflict: return @@ -1491,7 +2145,9 @@ class NsWorker(threading.Thread): if isinstance(e, queue.Empty): pass else: - self.logger.critical("Error processing task: {}".format(e), exc_info=True) + self.logger.critical( + "Error processing task: {}".format(e), exc_info=True + ) # step 2: process pending_tasks, delete not needed tasks try: @@ -1505,6 +2161,8 @@ class NsWorker(threading.Thread): if not busy: time.sleep(5) except Exception as e: - self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True) + self.logger.critical( + "Unexpected exception at run: " + str(e), exc_info=True + ) self.logger.info("Finishing") diff --git a/NG-RO/osm_ng_ro/ro_main.py b/NG-RO/osm_ng_ro/ro_main.py index 485f15db..c9cad857 100644 --- a/NG-RO/osm_ng_ro/ro_main.py +++ b/NG-RO/osm_ng_ro/ro_main.py @@ -41,13 +41,13 @@ from osm_ng_ro import version as ro_version, version_date as ro_version_date __author__ = "Alfonso Tierno " -__version__ = "0.1." # file version, not NBI version +__version__ = "0.1." # file version, not NBI version version_date = "May 2020" -database_version = '1.2' -auth_database_version = '1.0' -ro_server = None # instance of Server class -vim_admin_thread = None # instance of VimAdminThread class +database_version = "1.2" +auth_database_version = "1.0" +ro_server = None # instance of Server class +vim_admin_thread = None # instance of VimAdminThread class # vim_threads = None # instance of VimThread class @@ -76,10 +76,7 @@ valid_url_methods = { "tokens": { "METHODS": ("POST",), "ROLE_PERMISSION": "tokens:", - "": { - "METHODS": ("DELETE",), - "ROLE_PERMISSION": "tokens:id:" - } + "": {"METHODS": ("DELETE",), "ROLE_PERMISSION": "tokens:id:"}, }, } }, @@ -97,9 +94,9 @@ valid_url_methods = { "cancel": { "METHODS": ("POST",), "ROLE_PERMISSION": "deploy:id:id:cancel", - } - } - } + }, + }, + }, }, } }, @@ -107,7 +104,6 @@ valid_url_methods = { class RoException(Exception): - def __init__(self, message, http_code=HTTPStatus.METHOD_NOT_ALLOWED): Exception.__init__(self, message) self.http_code = http_code @@ -118,18 +114,15 @@ class AuthException(RoException): class Authenticator: - def __init__(self, valid_url_methods, valid_query_string): self.valid_url_methods = valid_url_methods self.valid_query_string = valid_query_string def authorize(self, *args, **kwargs): return {"token": "ok", "id": "ok"} - + def new_token(self, token_info, indata, remote): - return {"token": "ok", - "id": "ok", - "remote": remote} + return {"token": "ok", "id": "ok", "remote": remote} def del_token(self, token_id): pass @@ -161,6 +154,7 @@ class Server(object): def _format_in(self, kwargs): try: indata = None + if cherrypy.request.body.length: error_text = "Invalid input format " @@ -171,32 +165,50 @@ class Server(object): cherrypy.request.headers.pop("Content-File-MD5", None) elif "application/yaml" in cherrypy.request.headers["Content-Type"]: error_text = "Invalid yaml format " - indata = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader) + indata = yaml.load( + cherrypy.request.body, Loader=yaml.SafeLoader + ) cherrypy.request.headers.pop("Content-File-MD5", None) - elif "application/binary" in cherrypy.request.headers["Content-Type"] or \ - "application/gzip" in cherrypy.request.headers["Content-Type"] or \ - "application/zip" in cherrypy.request.headers["Content-Type"] or \ - "text/plain" in cherrypy.request.headers["Content-Type"]: + elif ( + "application/binary" in cherrypy.request.headers["Content-Type"] + or "application/gzip" + in cherrypy.request.headers["Content-Type"] + or "application/zip" in cherrypy.request.headers["Content-Type"] + or "text/plain" in cherrypy.request.headers["Content-Type"] + ): indata = cherrypy.request.body # .read() - elif "multipart/form-data" in cherrypy.request.headers["Content-Type"]: + elif ( + "multipart/form-data" + in cherrypy.request.headers["Content-Type"] + ): if "descriptor_file" in kwargs: filecontent = kwargs.pop("descriptor_file") + if not filecontent.file: - raise RoException("empty file or content", HTTPStatus.BAD_REQUEST) + raise RoException( + "empty file or content", HTTPStatus.BAD_REQUEST + ) + indata = filecontent.file # .read() + if filecontent.content_type.value: - cherrypy.request.headers["Content-Type"] = filecontent.content_type.value + cherrypy.request.headers[ + "Content-Type" + ] = filecontent.content_type.value else: # raise cherrypy.HTTPError(HTTPStatus.Not_Acceptable, # "Only 'Content-Type' of type 'application/json' or # 'application/yaml' for input format are available") error_text = "Invalid yaml format " - indata = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader) + indata = yaml.load( + cherrypy.request.body, Loader=yaml.SafeLoader + ) cherrypy.request.headers.pop("Content-File-MD5", None) else: error_text = "Invalid yaml format " indata = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader) cherrypy.request.headers.pop("Content-File-MD5", None) + if not indata: indata = {} @@ -213,7 +225,12 @@ class Server(object): kwargs[k] = yaml.load(v, Loader=yaml.SafeLoader) except Exception: pass - elif k.endswith(".gt") or k.endswith(".lt") or k.endswith(".gte") or k.endswith(".lte"): + elif ( + k.endswith(".gt") + or k.endswith(".lt") + or k.endswith(".gte") + or k.endswith(".lte") + ): try: kwargs[k] = int(v) except Exception: @@ -251,55 +268,83 @@ class Server(object): :return: None """ accept = cherrypy.request.headers.get("Accept") + if data is None: if accept and "text/html" in accept: - return html.format(data, cherrypy.request, cherrypy.response, token_info) + return html.format( + data, cherrypy.request, cherrypy.response, token_info + ) + # cherrypy.response.status = HTTPStatus.NO_CONTENT.value return elif hasattr(data, "read"): # file object if _format: cherrypy.response.headers["Content-Type"] = _format elif "b" in data.mode: # binariy asssumig zip - cherrypy.response.headers["Content-Type"] = 'application/zip' + cherrypy.response.headers["Content-Type"] = "application/zip" else: - cherrypy.response.headers["Content-Type"] = 'text/plain' + cherrypy.response.headers["Content-Type"] = "text/plain" + # TODO check that cherrypy close file. If not implement pending things to close per thread next return data + if accept: if "application/json" in accept: - cherrypy.response.headers["Content-Type"] = 'application/json; charset=utf-8' + cherrypy.response.headers[ + "Content-Type" + ] = "application/json; charset=utf-8" a = json.dumps(data, indent=4) + "\n" + return a.encode("utf8") elif "text/html" in accept: - return html.format(data, cherrypy.request, cherrypy.response, token_info) - - elif "application/yaml" in accept or "*/*" in accept or "text/plain" in accept: + return html.format( + data, cherrypy.request, cherrypy.response, token_info + ) + elif ( + "application/yaml" in accept + or "*/*" in accept + or "text/plain" in accept + ): pass # if there is not any valid accept, raise an error. But if response is already an error, format in yaml elif cherrypy.response.status >= 400: - raise cherrypy.HTTPError(HTTPStatus.NOT_ACCEPTABLE.value, - "Only 'Accept' of type 'application/json' or 'application/yaml' " - "for output format are available") - cherrypy.response.headers["Content-Type"] = 'application/yaml' - return yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False, - encoding='utf-8', allow_unicode=True) # , canonical=True, default_style='"' + raise cherrypy.HTTPError( + HTTPStatus.NOT_ACCEPTABLE.value, + "Only 'Accept' of type 'application/json' or 'application/yaml' " + "for output format are available", + ) + + cherrypy.response.headers["Content-Type"] = "application/yaml" + + return yaml.safe_dump( + data, + explicit_start=True, + indent=4, + default_flow_style=False, + tags=False, + encoding="utf-8", + allow_unicode=True, + ) # , canonical=True, default_style='"' @cherrypy.expose def index(self, *args, **kwargs): token_info = None + try: if cherrypy.request.method == "GET": token_info = self.authenticator.authorize() - outdata = token_info # Home page + outdata = token_info # Home page else: - raise cherrypy.HTTPError(HTTPStatus.METHOD_NOT_ALLOWED.value, - "Method {} not allowed for tokens".format(cherrypy.request.method)) + raise cherrypy.HTTPError( + HTTPStatus.METHOD_NOT_ALLOWED.value, + "Method {} not allowed for tokens".format(cherrypy.request.method), + ) return self._format_out(outdata, token_info) - except (NsException, AuthException) as e: # cherrypy.log("index Exception {}".format(e)) cherrypy.response.status = e.http_code.value + return self._format_out("Welcome to OSM!", token_info) @cherrypy.expose @@ -307,11 +352,19 @@ class Server(object): # TODO consider to remove and provide version using the static version file try: if cherrypy.request.method != "GET": - raise RoException("Only method GET is allowed", HTTPStatus.METHOD_NOT_ALLOWED) + raise RoException( + "Only method GET is allowed", + HTTPStatus.METHOD_NOT_ALLOWED, + ) elif args or kwargs: - raise RoException("Invalid URL or query string for version", HTTPStatus.METHOD_NOT_ALLOWED) + raise RoException( + "Invalid URL or query string for version", + HTTPStatus.METHOD_NOT_ALLOWED, + ) + # TODO include version of other modules, pick up from some kafka admin message osm_ng_ro_version = {"version": ro_version, "date": ro_version_date} + return self._format_out(osm_ng_ro_version) except RoException as e: cherrypy.response.status = e.http_code.value @@ -320,6 +373,7 @@ class Server(object): "status": e.http_code.value, "detail": str(e), } + return self._format_out(problem_details, None) def new_token(self, engine_session, indata, *args, **kwargs): @@ -329,58 +383,77 @@ class Server(object): token_info = self.authenticator.authorize() except Exception: token_info = None + if kwargs: indata.update(kwargs) + # This is needed to log the user when authentication fails cherrypy.request.login = "{}".format(indata.get("username", "-")) - token_info = self.authenticator.new_token(token_info, indata, cherrypy.request.remote) - cherrypy.session['Authorization'] = token_info["id"] + token_info = self.authenticator.new_token( + token_info, indata, cherrypy.request.remote + ) + cherrypy.session["Authorization"] = token_info["id"] self._set_location_header("admin", "v1", "tokens", token_info["id"]) # for logging # cherrypy.response.cookie["Authorization"] = outdata["id"] # cherrypy.response.cookie["Authorization"]['expires'] = 3600 + return token_info, token_info["id"], True def del_token(self, engine_session, indata, version, _id, *args, **kwargs): token_id = _id + if not token_id and "id" in kwargs: token_id = kwargs["id"] elif not token_id: token_info = self.authenticator.authorize() # for logging token_id = token_info["id"] + self.authenticator.del_token(token_id) token_info = None - cherrypy.session['Authorization'] = "logout" + cherrypy.session["Authorization"] = "logout" # cherrypy.response.cookie["Authorization"] = token_id # cherrypy.response.cookie["Authorization"]['expires'] = 0 + return None, None, True - + @cherrypy.expose def test(self, *args, **kwargs): - if not cherrypy.config.get("server.enable_test") or (isinstance(cherrypy.config["server.enable_test"], str) and - cherrypy.config["server.enable_test"].lower() == "false"): + if not cherrypy.config.get("server.enable_test") or ( + isinstance(cherrypy.config["server.enable_test"], str) + and cherrypy.config["server.enable_test"].lower() == "false" + ): cherrypy.response.status = HTTPStatus.METHOD_NOT_ALLOWED.value + return "test URL is disabled" + thread_info = None - if args and args[0] == "help": - return "
\ninit\nfile/  download file\ndb-clear/table\nfs-clear[/folder]\nlogin\nlogin2\n"\
-                   "sleep/
" + if args and args[0] == "help": + return ( + "
\ninit\nfile/  download file\ndb-clear/table\nfs-clear[/folder]\nlogin\nlogin2\n"
+                "sleep/
" + ) elif args and args[0] == "init": try: # self.ns.load_dbase(cherrypy.request.app.config) self.ns.create_admin() + return "Done. User 'admin', password 'admin' created" except Exception: cherrypy.response.status = HTTPStatus.FORBIDDEN.value + return self._format_out("Database already initialized") elif args and args[0] == "file": - return cherrypy.lib.static.serve_file(cherrypy.tree.apps['/ro'].config["storage"]["path"] + "/" + args[1], - "text/plain", "attachment") + return cherrypy.lib.static.serve_file( + cherrypy.tree.apps["/ro"].config["storage"]["path"] + "/" + args[1], + "text/plain", + "attachment", + ) elif args and args[0] == "file2": - f_path = cherrypy.tree.apps['/ro'].config["storage"]["path"] + "/" + args[1] + f_path = cherrypy.tree.apps["/ro"].config["storage"]["path"] + "/" + args[1] f = open(f_path, "r") cherrypy.response.headers["Content-type"] = "text/plain" return f @@ -393,24 +466,32 @@ class Server(object): folders = (args[1],) else: folders = self.ns.fs.dir_ls(".") + for folder in folders: self.ns.fs.file_delete(folder) + return ",".join(folders) + " folders deleted\n" elif args and args[0] == "login": if not cherrypy.request.headers.get("Authorization"): - cherrypy.response.headers["WWW-Authenticate"] = 'Basic realm="Access to OSM site", charset="UTF-8"' + cherrypy.response.headers[ + "WWW-Authenticate" + ] = 'Basic realm="Access to OSM site", charset="UTF-8"' cherrypy.response.status = HTTPStatus.UNAUTHORIZED.value elif args and args[0] == "login2": if not cherrypy.request.headers.get("Authorization"): - cherrypy.response.headers["WWW-Authenticate"] = 'Bearer realm="Access to OSM site"' + cherrypy.response.headers[ + "WWW-Authenticate" + ] = 'Bearer realm="Access to OSM site"' cherrypy.response.status = HTTPStatus.UNAUTHORIZED.value elif args and args[0] == "sleep": sleep_time = 5 + try: sleep_time = int(args[1]) except Exception: cherrypy.response.status = HTTPStatus.FORBIDDEN.value return self._format_out("Database already initialized") + thread_info = cherrypy.thread_data print(thread_info) time.sleep(sleep_time) @@ -418,53 +499,76 @@ class Server(object): elif len(args) >= 2 and args[0] == "message": main_topic = args[1] return_text = "
{} ->\n".format(main_topic)
+
             try:
-                if cherrypy.request.method == 'POST':
+                if cherrypy.request.method == "POST":
                     to_send = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader)
                     for k, v in to_send.items():
                         self.ns.msg.write(main_topic, k, v)
                         return_text += "  {}: {}\n".format(k, v)
-                elif cherrypy.request.method == 'GET':
+                elif cherrypy.request.method == "GET":
                     for k, v in kwargs.items():
-                        self.ns.msg.write(main_topic, k, yaml.load(v, Loader=yaml.SafeLoader))
-                        return_text += "  {}: {}\n".format(k, yaml.load(v, Loader=yaml.SafeLoader))
+                        self.ns.msg.write(
+                            main_topic, k, yaml.load(v, Loader=yaml.SafeLoader)
+                        )
+                        return_text += "  {}: {}\n".format(
+                            k, yaml.load(v, Loader=yaml.SafeLoader)
+                        )
             except Exception as e:
                 return_text += "Error: " + str(e)
+
             return_text += "
\n" + return return_text return_text = ( - "
\nheaders:\n  args: {}\n".format(args) +
-            "  kwargs: {}\n".format(kwargs) +
-            "  headers: {}\n".format(cherrypy.request.headers) +
-            "  path_info: {}\n".format(cherrypy.request.path_info) +
-            "  query_string: {}\n".format(cherrypy.request.query_string) +
-            "  session: {}\n".format(cherrypy.session) +
-            "  cookie: {}\n".format(cherrypy.request.cookie) +
-            "  method: {}\n".format(cherrypy.request.method) +
-            "  session: {}\n".format(cherrypy.session.get('fieldname')) +
-            "  body:\n")
+            "
\nheaders:\n  args: {}\n".format(args)
+            + "  kwargs: {}\n".format(kwargs)
+            + "  headers: {}\n".format(cherrypy.request.headers)
+            + "  path_info: {}\n".format(cherrypy.request.path_info)
+            + "  query_string: {}\n".format(cherrypy.request.query_string)
+            + "  session: {}\n".format(cherrypy.session)
+            + "  cookie: {}\n".format(cherrypy.request.cookie)
+            + "  method: {}\n".format(cherrypy.request.method)
+            + "  session: {}\n".format(cherrypy.session.get("fieldname"))
+            + "  body:\n"
+        )
         return_text += "    length: {}\n".format(cherrypy.request.body.length)
+
         if cherrypy.request.body.length:
             return_text += "    content: {}\n".format(
-                str(cherrypy.request.body.read(int(cherrypy.request.headers.get('Content-Length', 0)))))
+                str(
+                    cherrypy.request.body.read(
+                        int(cherrypy.request.headers.get("Content-Length", 0))
+                    )
+                )
+            )
+
         if thread_info:
             return_text += "thread: {}\n".format(thread_info)
+
         return_text += "
" + return return_text @staticmethod def _check_valid_url_method(method, *args): if len(args) < 3: - raise RoException("URL must contain at least 'main_topic/version/topic'", HTTPStatus.METHOD_NOT_ALLOWED) + raise RoException( + "URL must contain at least 'main_topic/version/topic'", + HTTPStatus.METHOD_NOT_ALLOWED, + ) reference = valid_url_methods for arg in args: if arg is None: break + if not isinstance(reference, dict): - raise RoException("URL contains unexpected extra items '{}'".format(arg), - HTTPStatus.METHOD_NOT_ALLOWED) + raise RoException( + "URL contains unexpected extra items '{}'".format(arg), + HTTPStatus.METHOD_NOT_ALLOWED, + ) if arg in reference: reference = reference[arg] @@ -474,11 +578,22 @@ class Server(object): # reference = reference["*"] break else: - raise RoException("Unexpected URL item {}".format(arg), HTTPStatus.METHOD_NOT_ALLOWED) + raise RoException( + "Unexpected URL item {}".format(arg), + HTTPStatus.METHOD_NOT_ALLOWED, + ) + if "TODO" in reference and method in reference["TODO"]: - raise RoException("Method {} not supported yet for this URL".format(method), HTTPStatus.NOT_IMPLEMENTED) + raise RoException( + "Method {} not supported yet for this URL".format(method), + HTTPStatus.NOT_IMPLEMENTED, + ) elif "METHODS" not in reference or method not in reference["METHODS"]: - raise RoException("Method {} not supported for this URL".format(method), HTTPStatus.METHOD_NOT_ALLOWED) + raise RoException( + "Method {} not supported for this URL".format(method), + HTTPStatus.METHOD_NOT_ALLOWED, + ) + return reference["ROLE_PERMISSION"] + method.lower() @staticmethod @@ -492,71 +607,137 @@ class Server(object): :return: None """ # Use cherrypy.request.base for absoluted path and make use of request.header HOST just in case behind aNAT - cherrypy.response.headers["Location"] = "/ro/{}/{}/{}/{}".format(main_topic, version, topic, id) + cherrypy.response.headers["Location"] = "/ro/{}/{}/{}/{}".format( + main_topic, version, topic, id + ) + return @cherrypy.expose - def default(self, main_topic=None, version=None, topic=None, _id=None, _id2=None, *args, **kwargs): + def default( + self, + main_topic=None, + version=None, + topic=None, + _id=None, + _id2=None, + *args, + **kwargs, + ): token_info = None outdata = None _format = None method = "DONE" rollback = [] engine_session = None + try: if not main_topic or not version or not topic: - raise RoException("URL must contain at least 'main_topic/version/topic'", - HTTPStatus.METHOD_NOT_ALLOWED) - if main_topic not in ("admin", "ns",): - raise RoException("URL main_topic '{}' not supported".format(main_topic), - HTTPStatus.METHOD_NOT_ALLOWED) - if version != 'v1': - raise RoException("URL version '{}' not supported".format(version), HTTPStatus.METHOD_NOT_ALLOWED) - - if kwargs and "METHOD" in kwargs and kwargs["METHOD"] in ("PUT", "POST", "DELETE", "GET", "PATCH"): + raise RoException( + "URL must contain at least 'main_topic/version/topic'", + HTTPStatus.METHOD_NOT_ALLOWED, + ) + + if main_topic not in ( + "admin", + "ns", + ): + raise RoException( + "URL main_topic '{}' not supported".format(main_topic), + HTTPStatus.METHOD_NOT_ALLOWED, + ) + + if version != "v1": + raise RoException( + "URL version '{}' not supported".format(version), + HTTPStatus.METHOD_NOT_ALLOWED, + ) + + if ( + kwargs + and "METHOD" in kwargs + and kwargs["METHOD"] in ("PUT", "POST", "DELETE", "GET", "PATCH") + ): method = kwargs.pop("METHOD") else: method = cherrypy.request.method - role_permission = self._check_valid_url_method(method, main_topic, version, topic, _id, _id2, *args, - **kwargs) + role_permission = self._check_valid_url_method( + method, main_topic, version, topic, _id, _id2, *args, **kwargs + ) # skip token validation if requesting a token indata = self._format_in(kwargs) + if main_topic != "admin" or topic != "tokens": token_info = self.authenticator.authorize(role_permission, _id) + outdata, created_id, done = self.map_operation[role_permission]( - engine_session, indata, version, _id, _id2, *args, *kwargs) + engine_session, indata, version, _id, _id2, *args, *kwargs + ) + if created_id: self._set_location_header(main_topic, version, topic, _id) - cherrypy.response.status = HTTPStatus.ACCEPTED.value if not done else HTTPStatus.OK.value if \ - outdata is not None else HTTPStatus.NO_CONTENT.value + + cherrypy.response.status = ( + HTTPStatus.ACCEPTED.value + if not done + else HTTPStatus.OK.value + if outdata is not None + else HTTPStatus.NO_CONTENT.value + ) + return self._format_out(outdata, token_info, _format) except Exception as e: - if isinstance(e, (RoException, NsException, DbException, FsException, MsgException, AuthException, - ValidationError)): + if isinstance( + e, + ( + RoException, + NsException, + DbException, + FsException, + MsgException, + AuthException, + ValidationError, + ), + ): http_code_value = cherrypy.response.status = e.http_code.value http_code_name = e.http_code.name cherrypy.log("Exception {}".format(e)) else: - http_code_value = cherrypy.response.status = HTTPStatus.BAD_REQUEST.value # INTERNAL_SERVER_ERROR + http_code_value = ( + cherrypy.response.status + ) = HTTPStatus.BAD_REQUEST.value # INTERNAL_SERVER_ERROR cherrypy.log("CRITICAL: Exception {}".format(e), traceback=True) http_code_name = HTTPStatus.BAD_REQUEST.name + if hasattr(outdata, "close"): # is an open file outdata.close() + error_text = str(e) rollback.reverse() + for rollback_item in rollback: try: if rollback_item.get("operation") == "set": - self.ns.db.set_one(rollback_item["topic"], {"_id": rollback_item["_id"]}, - rollback_item["content"], fail_on_empty=False) + self.ns.db.set_one( + rollback_item["topic"], + {"_id": rollback_item["_id"]}, + rollback_item["content"], + fail_on_empty=False, + ) else: - self.ns.db.del_one(rollback_item["topic"], {"_id": rollback_item["_id"]}, - fail_on_empty=False) + self.ns.db.del_one( + rollback_item["topic"], + {"_id": rollback_item["_id"]}, + fail_on_empty=False, + ) except Exception as e2: - rollback_error_text = "Rollback Exception {}: {}".format(rollback_item, e2) + rollback_error_text = "Rollback Exception {}: {}".format( + rollback_item, e2 + ) cherrypy.log(rollback_error_text) error_text += ". " + rollback_error_text + # if isinstance(e, MsgException): # error_text = "{} has been '{}' but other modules cannot be informed because an error on bus".format( # engine_topic[:-1], method, error_text) @@ -565,6 +746,7 @@ class Server(object): "status": http_code_value, "detail": error_text, } + return self._format_out(problem_details, token_info) # raise cherrypy.HTTPError(e.http_code.value, str(e)) finally: @@ -572,7 +754,9 @@ class Server(object): if method in ("PUT", "PATCH", "POST") and isinstance(outdata, dict): for logging_id in ("id", "op_id", "nsilcmop_id", "nslcmop_id"): if outdata.get(logging_id): - cherrypy.request.login += ";{}={}".format(logging_id, outdata[logging_id][:36]) + cherrypy.request.login += ";{}={}".format( + logging_id, outdata[logging_id][:36] + ) def _start_service(): @@ -587,24 +771,27 @@ def _start_service(): cherrypy.log.error("Starting osm_ng_ro") # update general cherrypy configuration update_dict = {} + engine_config = cherrypy.tree.apps["/ro"].config - engine_config = cherrypy.tree.apps['/ro'].config for k, v in environ.items(): if not k.startswith("OSMRO_"): continue + k1, _, k2 = k[6:].lower().partition("_") + if not k2: continue + try: if k1 in ("server", "test", "auth", "log"): # update [global] configuration - update_dict[k1 + '.' + k2] = yaml.safe_load(v) + update_dict[k1 + "." + k2] = yaml.safe_load(v) elif k1 == "static": # update [/static] configuration engine_config["/static"]["tools.staticdir." + k2] = yaml.safe_load(v) elif k1 == "tools": # update [/] configuration - engine_config["/"]["tools." + k2.replace('_', '.')] = yaml.safe_load(v) + engine_config["/"]["tools." + k2.replace("_", ".")] = yaml.safe_load(v) elif k1 in ("message", "database", "storage", "authentication"): engine_config[k1][k2] = yaml.safe_load(v) @@ -616,26 +803,35 @@ def _start_service(): engine_config["global"].update(update_dict) # logging cherrypy - log_format_simple = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s" - log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S') + log_format_simple = ( + "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s" + ) + log_formatter_simple = logging.Formatter( + log_format_simple, datefmt="%Y-%m-%dT%H:%M:%S" + ) logger_server = logging.getLogger("cherrypy.error") logger_access = logging.getLogger("cherrypy.access") logger_cherry = logging.getLogger("cherrypy") logger = logging.getLogger("ro") if "log.file" in engine_config["global"]: - file_handler = logging.handlers.RotatingFileHandler(engine_config["global"]["log.file"], - maxBytes=100e6, backupCount=9, delay=0) + file_handler = logging.handlers.RotatingFileHandler( + engine_config["global"]["log.file"], maxBytes=100e6, backupCount=9, delay=0 + ) file_handler.setFormatter(log_formatter_simple) logger_cherry.addHandler(file_handler) logger.addHandler(file_handler) + # log always to standard output - for format_, logger in {"ro.server %(filename)s:%(lineno)s": logger_server, - "ro.access %(filename)s:%(lineno)s": logger_access, - "%(name)s %(filename)s:%(lineno)s": logger - }.items(): + for format_, logger in { + "ro.server %(filename)s:%(lineno)s": logger_server, + "ro.access %(filename)s:%(lineno)s": logger_access, + "%(name)s %(filename)s:%(lineno)s": logger, + }.items(): log_format_cherry = "%(asctime)s %(levelname)s {} %(message)s".format(format_) - log_formatter_cherry = logging.Formatter(log_format_cherry, datefmt='%Y-%m-%dT%H:%M:%S') + log_formatter_cherry = logging.Formatter( + log_format_cherry, datefmt="%Y-%m-%dT%H:%M:%S" + ) str_handler = logging.StreamHandler() str_handler.setFormatter(log_formatter_cherry) logger.addHandler(str_handler) @@ -643,24 +839,32 @@ def _start_service(): if engine_config["global"].get("log.level"): logger_cherry.setLevel(engine_config["global"]["log.level"]) logger.setLevel(engine_config["global"]["log.level"]) + # logging other modules - for k1, logname in {"message": "ro.msg", "database": "ro.db", "storage": "ro.fs"}.items(): + for k1, logname in { + "message": "ro.msg", + "database": "ro.db", + "storage": "ro.fs", + }.items(): engine_config[k1]["logger_name"] = logname logger_module = logging.getLogger(logname) + if "logfile" in engine_config[k1]: - file_handler = logging.handlers.RotatingFileHandler(engine_config[k1]["logfile"], - maxBytes=100e6, backupCount=9, delay=0) + file_handler = logging.handlers.RotatingFileHandler( + engine_config[k1]["logfile"], maxBytes=100e6, backupCount=9, delay=0 + ) file_handler.setFormatter(log_formatter_simple) logger_module.addHandler(file_handler) + if "loglevel" in engine_config[k1]: logger_module.setLevel(engine_config[k1]["loglevel"]) # TODO add more entries, e.g.: storage engine_config["assignment"] = {} # ^ each VIM, SDNc will be assigned one worker id. Ns class will add items and VimThread will auto-assign - cherrypy.tree.apps['/ro'].root.ns.start(engine_config) - cherrypy.tree.apps['/ro'].root.authenticator.start(engine_config) - cherrypy.tree.apps['/ro'].root.ns.init_db(target_version=database_version) + cherrypy.tree.apps["/ro"].root.ns.start(engine_config) + cherrypy.tree.apps["/ro"].root.authenticator.start(engine_config) + cherrypy.tree.apps["/ro"].root.ns.init_db(target_version=database_version) # # start subscriptions thread: vim_admin_thread = VimAdminThread(config=engine_config, engine=ro_server.ns) @@ -678,37 +882,45 @@ def _stop_service(): TODO: Ending database connections. """ global vim_admin_thread + # terminate vim_admin_thread if vim_admin_thread: vim_admin_thread.terminate() + vim_admin_thread = None - cherrypy.tree.apps['/ro'].root.ns.stop() + cherrypy.tree.apps["/ro"].root.ns.stop() cherrypy.log.error("Stopping osm_ng_ro") def ro_main(config_file): global ro_server + ro_server = Server() - cherrypy.engine.subscribe('start', _start_service) - cherrypy.engine.subscribe('stop', _stop_service) - cherrypy.quickstart(ro_server, '/ro', config_file) + cherrypy.engine.subscribe("start", _start_service) + cherrypy.engine.subscribe("stop", _stop_service) + cherrypy.quickstart(ro_server, "/ro", config_file) def usage(): - print("""Usage: {} [options] + print( + """Usage: {} [options] -c|--config [configuration_file]: loads the configuration file (default: ./ro.cfg) -h|--help: shows this help - """.format(sys.argv[0])) + """.format( + sys.argv[0] + ) + ) # --log-socket-host HOST: send logs to this host") # --log-socket-port PORT: send logs using this port (default: 9022)") -if __name__ == '__main__': +if __name__ == "__main__": try: # load parameters and configuration opts, args = getopt.getopt(sys.argv[1:], "hvc:", ["config=", "help"]) # TODO add "log-socket-host=", "log-socket-port=", "log-file=" config_file = None + for o, a in opts: if o in ("-h", "--help"): usage() @@ -717,17 +929,29 @@ if __name__ == '__main__': config_file = a else: assert False, "Unhandled option" + if config_file: if not path.isfile(config_file): - print("configuration file '{}' that not exist".format(config_file), file=sys.stderr) + print( + "configuration file '{}' that not exist".format(config_file), + file=sys.stderr, + ) exit(1) else: - for config_file in (path.dirname(__file__) + "/ro.cfg", "./ro.cfg", "/etc/osm/ro.cfg"): + for config_file in ( + path.dirname(__file__) + "/ro.cfg", + "./ro.cfg", + "/etc/osm/ro.cfg", + ): if path.isfile(config_file): break else: - print("No configuration file 'ro.cfg' found neither at local folder nor at /etc/osm/", file=sys.stderr) + print( + "No configuration file 'ro.cfg' found neither at local folder nor at /etc/osm/", + file=sys.stderr, + ) exit(1) + ro_main(config_file) except KeyboardInterrupt: print("KeyboardInterrupt. Finishing", file=sys.stderr) diff --git a/NG-RO/osm_ng_ro/validation.py b/NG-RO/osm_ng_ro/validation.py index 54d8eedc..efd940cb 100644 --- a/NG-RO/osm_ng_ro/validation.py +++ b/NG-RO/osm_ng_ro/validation.py @@ -25,10 +25,18 @@ Validator of input data using JSON schemas """ # Basis schemas -name_schema = {"type": "string", "minLength": 1, "maxLength": 255, "pattern": "^[^,;()'\"]+$"} +name_schema = { + "type": "string", + "minLength": 1, + "maxLength": 255, + "pattern": "^[^,;()'\"]+$", +} string_schema = {"type": "string", "minLength": 1, "maxLength": 255} ssh_key_schema = {"type": "string", "minLength": 1} -id_schema = {"type": "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"} +id_schema = { + "type": "string", + "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$", +} bool_schema = {"type": "boolean"} null_schema = {"type": "null"} object_schema = {"type": "object"} @@ -42,7 +50,7 @@ deploy_item_schema = { "vim_info": object_schema, "common_id": string_schema, }, - "additionalProperties": True + "additionalProperties": True, } deploy_item_list = { @@ -96,10 +104,10 @@ deploy_schema = { "type": "object", "properties": { "vld": deploy_item_list, - } + }, }, }, - "additionalProperties": False + "additionalProperties": False, } @@ -119,12 +127,17 @@ def validate_input(indata, schema_to_use): try: if schema_to_use: js_v(indata, schema_to_use) + return None except js_e.ValidationError as e: if e.path: error_pos = "at '" + ":".join(map(str, e.path)) + "'" else: error_pos = "" + raise ValidationError("Format error {} '{}' ".format(error_pos, e.message)) except js_e.SchemaError: - raise ValidationError("Bad json schema {}".format(schema_to_use), http_code=HTTPStatus.INTERNAL_SERVER_ERROR) + raise ValidationError( + "Bad json schema {}".format(schema_to_use), + http_code=HTTPStatus.INTERNAL_SERVER_ERROR, + ) diff --git a/NG-RO/osm_ng_ro/vim_admin.py b/NG-RO/osm_ng_ro/vim_admin.py index e843c80a..17bfb202 100644 --- a/NG-RO/osm_ng_ro/vim_admin.py +++ b/NG-RO/osm_ng_ro/vim_admin.py @@ -33,7 +33,6 @@ __author__ = "Alfonso Tierno " class VimAdminException(Exception): - def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST): self.http_code = http_code Exception.__init__(self, message) @@ -78,9 +77,10 @@ class LockRenew: "initial_lock_time": database_object["locked_at"], "locked_at": database_object["locked_at"], "thread": thread_object, - "unlocked": False # True when it is not needed any more + "unlocked": False, # True when it is not needed any more } LockRenew.renew_list.append(lock_object) + return lock_object @staticmethod @@ -90,36 +90,66 @@ class LockRenew: async def renew_locks(self): while not self.to_terminate: if not self.renew_list: - await asyncio.sleep(self.task_locked_time - self.task_relock_time, loop=self.loop) + await asyncio.sleep( + self.task_locked_time - self.task_relock_time, loop=self.loop + ) continue + lock_object = self.renew_list[0] - if lock_object["unlocked"] or not lock_object["thread"] or not lock_object["thread"].is_alive(): + + if ( + lock_object["unlocked"] + or not lock_object["thread"] + or not lock_object["thread"].is_alive() + ): # task has been finished or locker thread is dead, not needed to re-locked. self.renew_list.pop(0) continue locked_at = lock_object["locked_at"] now = time() - time_to_relock = locked_at + self.task_locked_time - self.task_relock_time - now + time_to_relock = ( + locked_at + self.task_locked_time - self.task_relock_time - now + ) + if time_to_relock < 1: if lock_object["initial_lock_time"] + self.task_max_locked_time < now: self.renew_list.pop(0) # re-lock new_locked_at = locked_at + self.task_locked_time + try: - if self.db.set_one(lock_object["table"], - update_dict={"locked_at": new_locked_at, "modified_at": now}, - q_filter={"_id": lock_object["_id"], "locked_at": locked_at}, - fail_on_empty=False): - self.logger.debug("Renew lock for {}.{}".format(lock_object["table"], lock_object["_id"])) + if self.db.set_one( + lock_object["table"], + update_dict={ + "locked_at": new_locked_at, + "modified_at": now, + }, + q_filter={ + "_id": lock_object["_id"], + "locked_at": locked_at, + }, + fail_on_empty=False, + ): + self.logger.debug( + "Renew lock for {}.{}".format( + lock_object["table"], lock_object["_id"] + ) + ) lock_object["locked_at"] = new_locked_at self.renew_list.append(lock_object) else: - self.logger.info("Cannot renew lock for {}.{}".format(lock_object["table"], - lock_object["_id"])) + self.logger.info( + "Cannot renew lock for {}.{}".format( + lock_object["table"], lock_object["_id"] + ) + ) except Exception as e: - self.logger.error("Exception when trying to renew lock for {}.{}: {}".format( - lock_object["table"], lock_object["_id"], e)) + self.logger.error( + "Exception when trying to renew lock for {}.{}: {}".format( + lock_object["table"], lock_object["_id"], e + ) + ) else: # wait until it is time to re-lock it await asyncio.sleep(time_to_relock, loop=self.loop) @@ -127,12 +157,17 @@ class LockRenew: def stop(self): # unlock all locked items now = time() + for lock_object in self.renew_list: locked_at = lock_object["locked_at"] + if not lock_object["unlocked"] or locked_at + self.task_locked_time >= now: - self.db.set_one(lock_object["table"], update_dict={"locked_at": 0}, - q_filter={"_id": lock_object["_id"], "locked_at": locked_at}, - fail_on_empty=False) + self.db.set_one( + lock_object["table"], + update_dict={"locked_at": 0}, + q_filter={"_id": lock_object["_id"], "locked_at": locked_at}, + fail_on_empty=False, + ) class VimAdminThread(threading.Thread): @@ -156,20 +191,25 @@ class VimAdminThread(threading.Thread): self.last_rotask_time = 0 self.next_check_unused_vim = time() + self.TIME_CHECK_UNUSED_VIM self.logger = logging.getLogger("ro.vimadmin") - self.aiomain_task_kafka = None # asyncio task for receiving vim actions from kafka bus - self.aiomain_task_vim = None # asyncio task for watching ro_tasks not processed by nobody + # asyncio task for receiving vim actions from kafka bus + self.aiomain_task_kafka = None + # asyncio task for watching ro_tasks not processed by nobody + self.aiomain_task_vim = None self.aiomain_task_renew_lock = None # ^asyncio task for maintain an ro_task locked when VIM plugin takes too much time processing an order self.lock_renew = LockRenew(config, self.logger) self.task_locked_time = config["global"]["task_locked_time"] async def vim_watcher(self): - """ Reads database periodically looking for tasks not processed by nobody because of a reboot + """Reads database periodically looking for tasks not processed by nobody because of a reboot in order to load this vim""" # firstly read VIMS not processed for target_database in ("vim_accounts", "wim_accounts", "sdns"): - unattended_targets = self.db.get_list(target_database, - q_filter={"_admin.operations.operationState": "PROCESSING"}) + unattended_targets = self.db.get_list( + target_database, + q_filter={"_admin.operations.operationState": "PROCESSING"}, + ) + for target in unattended_targets: target_id = "{}:{}".format(target_database[:3], target["_id"]) self.logger.info("ordered to check {}".format(target_id)) @@ -178,37 +218,57 @@ class VimAdminThread(threading.Thread): while not self.to_terminate: now = time() processed_vims = [] + if not self.last_rotask_time: self.last_rotask_time = 0 - ro_tasks = self.db.get_list("ro_tasks", - q_filter={"target_id.ncont": self.engine.get_assigned_vims(), - "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'], - "locked_at.lt": now - self.task_locked_time, - "to_check_at.gt": self.last_rotask_time, - "to_check_at.lte": now - self.MAX_TIME_UNATTENDED}) + + ro_tasks = self.db.get_list( + "ro_tasks", + q_filter={ + "target_id.ncont": self.engine.get_assigned_vims(), + "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"], + "locked_at.lt": now - self.task_locked_time, + "to_check_at.gt": self.last_rotask_time, + "to_check_at.lte": now - self.MAX_TIME_UNATTENDED, + }, + ) self.last_rotask_time = now - self.MAX_TIME_UNATTENDED + for ro_task in ro_tasks: # if already checked ignore if ro_task["target_id"] in processed_vims: continue + processed_vims.append(ro_task["target_id"]) + # if already assigned ignore if ro_task["target_id"] in self.engine.get_assigned_vims(): continue + # if there is some task locked on this VIM, there is an RO working on it, so ignore - if self.db.get_list("ro_tasks", - q_filter={"target_id": ro_task["target_id"], - "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'], - "locked_at.gt": now - self.task_locked_time}): + if self.db.get_list( + "ro_tasks", + q_filter={ + "target_id": ro_task["target_id"], + "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"], + "locked_at.gt": now - self.task_locked_time, + }, + ): continue + # unattended, assign vim self.engine.assign_vim(ro_task["target_id"]) - self.logger.debug("ordered to load {}. Inactivity detected".format(ro_task["target_id"])) + self.logger.debug( + "ordered to load {}. Inactivity detected".format( + ro_task["target_id"] + ) + ) # every 2 hours check if there are vims without any ro_task and unload it if now > self.next_check_unused_vim: self.next_check_unused_vim = now + self.TIME_CHECK_UNUSED_VIM self.engine.unload_unused_vims() + await asyncio.sleep(self.MAX_TIME_UNATTENDED, loop=self.loop) async def aiomain(self): @@ -217,32 +277,57 @@ class VimAdminThread(threading.Thread): try: if not self.aiomain_task_kafka: # await self.msg.aiowrite("admin", "echo", "dummy message", loop=self.loop) - await self.msg.aiowrite("vim_account", "echo", "dummy message", loop=self.loop) + await self.msg.aiowrite( + "vim_account", "echo", "dummy message", loop=self.loop + ) kafka_working = True self.logger.debug("Starting vim_account subscription task") self.aiomain_task_kafka = asyncio.ensure_future( - self.msg.aioread(self.kafka_topics, loop=self.loop, group_id=False, - aiocallback=self._msg_callback), - loop=self.loop) + self.msg.aioread( + self.kafka_topics, + loop=self.loop, + group_id=False, + aiocallback=self._msg_callback, + ), + loop=self.loop, + ) + if not self.aiomain_task_vim: self.aiomain_task_vim = asyncio.ensure_future( - self.vim_watcher(), - loop=self.loop) + self.vim_watcher(), loop=self.loop + ) + if not self.aiomain_task_renew_lock: - self.aiomain_task_renew_lock = asyncio.ensure_future(self.lock_renew.renew_locks(), loop=self.loop) + self.aiomain_task_renew_lock = asyncio.ensure_future( + self.lock_renew.renew_locks(), loop=self.loop + ) done, _ = await asyncio.wait( - [self.aiomain_task_kafka, self.aiomain_task_vim, self.aiomain_task_renew_lock], - timeout=None, loop=self.loop, return_when=asyncio.FIRST_COMPLETED) + [ + self.aiomain_task_kafka, + self.aiomain_task_vim, + self.aiomain_task_renew_lock, + ], + timeout=None, + loop=self.loop, + return_when=asyncio.FIRST_COMPLETED, + ) + try: if self.aiomain_task_kafka in done: exc = self.aiomain_task_kafka.exception() - self.logger.error("kafka subscription task exception: {}".format(exc)) + self.logger.error( + "kafka subscription task exception: {}".format(exc) + ) self.aiomain_task_kafka = None + if self.aiomain_task_vim in done: exc = self.aiomain_task_vim.exception() - self.logger.error("vim_account watcher task exception: {}".format(exc)) + self.logger.error( + "vim_account watcher task exception: {}".format(exc) + ) self.aiomain_task_vim = None + if self.aiomain_task_renew_lock in done: exc = self.aiomain_task_renew_lock.exception() self.logger.error("renew_locks task exception: {}".format(exc)) @@ -253,10 +338,14 @@ class VimAdminThread(threading.Thread): except Exception as e: if self.to_terminate: return + if kafka_working: # logging only first time - self.logger.critical("Error accessing kafka '{}'. Retrying ...".format(e)) + self.logger.critical( + "Error accessing kafka '{}'. Retrying ...".format(e) + ) kafka_working = False + await asyncio.sleep(10, loop=self.loop) def run(self): @@ -274,13 +363,18 @@ class VimAdminThread(threading.Thread): self.db = dbmemory.DbMemory() self.db.db_connect(self.config["database"]) else: - raise VimAdminException("Invalid configuration param '{}' at '[database]':'driver'".format( - self.config["database"]["driver"])) + raise VimAdminException( + "Invalid configuration param '{}' at '[database]':'driver'".format( + self.config["database"]["driver"] + ) + ) + self.lock_renew.start(self.db, self.loop) if not self.msg: config_msg = self.config["message"].copy() config_msg["loop"] = self.loop + if config_msg["driver"] == "local": self.msg = msglocal.MsgLocal() self.msg.connect(config_msg) @@ -288,20 +382,27 @@ class VimAdminThread(threading.Thread): self.msg = msgkafka.MsgKafka() self.msg.connect(config_msg) else: - raise VimAdminException("Invalid configuration param '{}' at '[message]':'driver'".format( - config_msg["driver"])) + raise VimAdminException( + "Invalid configuration param '{}' at '[message]':'driver'".format( + config_msg["driver"] + ) + ) except (DbException, MsgException) as e: raise VimAdminException(str(e), http_code=e.http_code) self.logger.info("Starting") while not self.to_terminate: try: - self.loop.run_until_complete(asyncio.ensure_future(self.aiomain(), loop=self.loop)) + self.loop.run_until_complete( + asyncio.ensure_future(self.aiomain(), loop=self.loop) + ) # except asyncio.CancelledError: # break # if cancelled it should end, breaking loop except Exception as e: if not self.to_terminate: - self.logger.exception("Exception '{}' at messaging read loop".format(e), exc_info=True) + self.logger.exception( + "Exception '{}' at messaging read loop".format(e), exc_info=True + ) self.logger.info("Finishing") self._stop() @@ -318,9 +419,11 @@ class VimAdminThread(threading.Thread): try: if command == "echo": return + if topic in self.kafka_topics: - target = topic[0:3] # vim, wim or sdn + target = topic[0:3] # vim, wim or sdn target_id = target + ":" + params["_id"] + if command in ("edited", "edit"): self.engine.reload_vim(target_id) self.logger.debug("ordered to reload {}".format(target_id)) @@ -330,12 +433,19 @@ class VimAdminThread(threading.Thread): elif command in ("create", "created"): self.engine.check_vim(target_id) self.logger.debug("ordered to check {}".format(target_id)) - except (DbException, MsgException) as e: - self.logger.error("Error while processing topic={} command={}: {}".format(topic, command, e)) + self.logger.error( + "Error while processing topic={} command={}: {}".format( + topic, command, e + ) + ) except Exception as e: - self.logger.exception("Exception while processing topic={} command={}: {}".format(topic, command, e), - exc_info=True) + self.logger.exception( + "Exception while processing topic={} command={}: {}".format( + topic, command, e + ), + exc_info=True, + ) def _stop(self): """ @@ -345,6 +455,7 @@ class VimAdminThread(threading.Thread): try: if self.db: self.db.db_disconnect() + if self.msg: self.msg.disconnect() except (DbException, MsgException) as e: @@ -358,10 +469,14 @@ class VimAdminThread(threading.Thread): """ self.to_terminate = True self.lock_renew.to_terminate = True + if self.aiomain_task_kafka: self.loop.call_soon_threadsafe(self.aiomain_task_kafka.cancel) + if self.aiomain_task_vim: self.loop.call_soon_threadsafe(self.aiomain_task_vim.cancel) + if self.aiomain_task_renew_lock: self.loop.call_soon_threadsafe(self.aiomain_task_renew_lock.cancel) + self.lock_renew.stop() diff --git a/NG-RO/setup.py b/NG-RO/setup.py index 4e8dabaa..31c3b358 100644 --- a/NG-RO/setup.py +++ b/NG-RO/setup.py @@ -22,26 +22,28 @@ _name = "osm_ng_ro" _readme = "osm-ng-ro is the New Generation Resource Orchestrator for OSM" setup( name=_name, - description='OSM Resource Orchestrator', + description="OSM Resource Orchestrator", long_description=_readme, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), - author='ETSI OSM', - author_email='alfonso.tiernosepulveda@telefonica.com', - maintainer='Alfonso Tierno', - maintainer_email='alfonso.tiernosepulveda@telefonica.com', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), + author="ETSI OSM", + author_email="alfonso.tiernosepulveda@telefonica.com", + maintainer="Alfonso Tierno", + maintainer_email="alfonso.tiernosepulveda@telefonica.com", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=find_packages(exclude=["temp", "local"]), include_package_data=True, install_requires=[ - 'CherryPy==18.1.2', - 'osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git#egg=osm-common', - 'jsonschema', - 'PyYAML', - 'requests', - 'cryptography', # >=2.5 installed right version with the debian post-install script + "CherryPy==18.1.2", + "osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git#egg=osm-common", + "jsonschema", + "PyYAML", + "requests", + "cryptography", # >=2.5 installed right version with the debian post-install script "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], ) diff --git a/NG-RO/tox.ini b/NG-RO/tox.ini index 081bc1c8..fe012728 100644 --- a/NG-RO/tox.ini +++ b/NG-RO/tox.ini @@ -24,7 +24,7 @@ install_command = python3 -m pip install -r requirements.txt -U {opts} {packag basepython = python3 deps = flake8 commands = flake8 osm_ng_ro --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,E226,E402,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:build] basepython = python3 diff --git a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaConfigLet.py b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaConfigLet.py index f340f413..f45ec758 100644 --- a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaConfigLet.py +++ b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaConfigLet.py @@ -54,10 +54,10 @@ interface {interface} switchport_def = self._int_SRIOV.format(service=s_type, vlan_id=vlan_id) else: switchport_def = self._int_PASSTROUGH.format(vlan_id=vlan_id) - return self._basic_int.format(uuid=uuid, - interface=interface, - type=i_type, - switchport_def=switchport_def) + + return self._basic_int.format( + uuid=uuid, interface=interface, type=i_type, switchport_def=switchport_def + ) def getElan_sriov(self, uuid, interface, vlan_id, index): return self._get_interface(uuid, interface, vlan_id, "ELAN", index, "trunk") @@ -66,10 +66,14 @@ interface {interface} return self._get_interface(uuid, interface, vlan_id, "ELINE", index, "trunk") def getElan_passthrough(self, uuid, interface, vlan_id, index): - return self._get_interface(uuid, interface, vlan_id, "ELAN", index, "dot1q-tunnel") + return self._get_interface( + uuid, interface, vlan_id, "ELAN", index, "dot1q-tunnel" + ) def getEline_passthrough(self, uuid, interface, vlan_id, index): - return self._get_interface(uuid, interface, vlan_id, "ELINE", index, "dot1q-tunnel") + return self._get_interface( + uuid, interface, vlan_id, "ELINE", index, "dot1q-tunnel" + ) _basic_vlan = """ vlan {vlan} @@ -92,12 +96,21 @@ vlan {vlan} def _get_vlan(self, uuid, vlan_id, vni_id, s_type): if self.topology == self._VLAN: return self._configLet_VLAN.format(service=s_type, vlan=vlan_id, uuid=uuid) + if self.topology == self._VLAN_MLAG: - return self._configLet_VLAN_MLAG.format(service=s_type, vlan=vlan_id, uuid=uuid) + return self._configLet_VLAN_MLAG.format( + service=s_type, vlan=vlan_id, uuid=uuid + ) + if self.topology == self._VXLAN: - return self._configLet_VXLAN.format(service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id) + return self._configLet_VXLAN.format( + service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id + ) + if self.topology == self._VXLAN_MLAG: - return self._configLet_VXLAN_MLAG.format(service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id) + return self._configLet_VXLAN_MLAG.format( + service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id + ) def getElan_vlan(self, uuid, vlan_id, vni_id): return self._get_vlan(uuid, vlan_id, vni_id, "ELAN") @@ -117,11 +130,9 @@ router bgp {bgp} def _get_bgp(self, uuid, vlan_id, vni_id, loopback0, bgp, s_type): if self.topology == self._VXLAN or self.topology == self._VXLAN_MLAG: - return self._configLet_BGP.format(uuid=uuid, - bgp=bgp, - vlan=vlan_id, - loopback=loopback0, - vni=vni_id) + return self._configLet_BGP.format( + uuid=uuid, bgp=bgp, vlan=vlan_id, loopback=loopback0, vni=vni_id + ) def getElan_bgp(self, uuid, vlan_id, vni_id, loopback0, bgp): return self._get_bgp(uuid, vlan_id, vni_id, loopback0, bgp, "ELAN") diff --git a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaTask.py b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaTask.py index a338afd4..6af7c433 100644 --- a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaTask.py +++ b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaTask.py @@ -48,6 +48,7 @@ class AristaCVPTask: def __apply_state(self, task, state): t_id = self.__get_id(task) self.cvpClientApi.add_note_to_task(t_id, "Executed by OSM") + if state == "executed": return self.__execute_task(t_id) elif state == "cancelled": @@ -64,33 +65,39 @@ class AristaCVPTask: def update_all_tasks(self, data): new_data = dict() + for task_id in data.keys(): res = self.cvpClientApi.get_task_by_id(task_id) new_data[task_id] = res + return new_data def get_pending_tasks(self): - return self.cvpClientApi.get_tasks_by_status('Pending') + return self.cvpClientApi.get_tasks_by_status("Pending") def get_pending_tasks_old(self): taskList = [] - tasksField = {'workOrderId': 'workOrderId', - 'workOrderState': 'workOrderState', - 'currentTaskName': 'currentTaskName', - 'description': 'description', - 'workOrderUserDefinedStatus': - 'workOrderUserDefinedStatus', - 'note': 'note', - 'taskStatus': 'taskStatus', - 'workOrderDetails': 'workOrderDetails'} - tasks = self.cvpClientApi.get_tasks_by_status('Pending') + tasksField = { + "workOrderId": "workOrderId", + "workOrderState": "workOrderState", + "currentTaskName": "currentTaskName", + "description": "description", + "workOrderUserDefinedStatus": "workOrderUserDefinedStatus", + "note": "note", + "taskStatus": "taskStatus", + "workOrderDetails": "workOrderDetails", + } + tasks = self.cvpClientApi.get_tasks_by_status("Pending") + # Reduce task data to required fields for task in tasks: taskFacts = {} for field in task.keys(): if field in tasksField: taskFacts[tasksField[field]] = task[field] + taskList.append(taskFacts) + return taskList def task_action(self, tasks, wait, state): @@ -118,15 +125,18 @@ class AristaCVPTask: now = time.time() while (now - start) < wait: data = self.update_all_tasks(data) + if all([self.__terminal(self.__get_state(t)) for t in data.values()]): break + time.sleep(1) now = time.time() if wait: for i, task in data.items(): if not self.__terminal(self.__get_state(task)): - warnings.append("Task {} has not completed in {} seconds". - format(i, wait)) + warnings.append( + "Task {} has not completed in {} seconds".format(i, wait) + ) return changed, data, warnings diff --git a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/wimconn_arista.py b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/wimconn_arista.py index e72a0822..314c6733 100644 --- a/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/wimconn_arista.py +++ b/RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/wimconn_arista.py @@ -26,11 +26,14 @@ # # This work has been performed in the context of Arista Telefonica OSM PoC. ## + from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError import re import socket + # Required by compare function import difflib + # Library that uses Levenshtein Distance to calculate the differences # between strings. # from fuzzywuzzy import fuzz @@ -49,24 +52,22 @@ from osm_rosdn_arista_cloudvision.aristaTask import AristaCVPTask class SdnError(Enum): - UNREACHABLE = 'Unable to reach the WIM url, connect error.', - TIMEOUT = 'Unable to reach the WIM url, timeout.', - VLAN_INCONSISTENT = \ - 'VLAN value inconsistent between the connection points', - VLAN_NOT_PROVIDED = 'VLAN value not provided', - CONNECTION_POINTS_SIZE = \ - 'Unexpected number of connection points: 2 expected.', - ENCAPSULATION_TYPE = \ - 'Unexpected service_endpoint_encapsulation_type. ' \ - 'Only "dotq1" is accepted.', - BANDWIDTH = 'Unable to get the bandwidth.', - STATUS = 'Unable to get the status for the service.', - DELETE = 'Unable to delete service.', - CLEAR_ALL = 'Unable to clear all the services', - UNKNOWN_ACTION = 'Unknown action invoked.', - BACKUP = 'Unable to get the backup parameter.', - UNSUPPORTED_FEATURE = "Unsupported feature", - UNAUTHORIZED = "Failed while authenticating", + UNREACHABLE = "Unable to reach the WIM url, connect error." + TIMEOUT = "Unable to reach the WIM url, timeout." + VLAN_INCONSISTENT = "VLAN value inconsistent between the connection points" + VLAN_NOT_PROVIDED = "VLAN value not provided" + CONNECTION_POINTS_SIZE = "Unexpected number of connection points: 2 expected." + ENCAPSULATION_TYPE = ( + 'Unexpected service_endpoint_encapsulation_type. Only "dotq1" is accepted.' + ) + BANDWIDTH = "Unable to get the bandwidth." + STATUS = "Unable to get the status for the service." + DELETE = "Unable to delete service." + CLEAR_ALL = "Unable to clear all the services" + UNKNOWN_ACTION = "Unknown action invoked." + BACKUP = "Unable to get the backup parameter." + UNSUPPORTED_FEATURE = "Unsupported feature" + UNAUTHORIZED = "Failed while authenticating" INTERNAL_ERROR = "Internal error" @@ -97,14 +98,15 @@ class AristaSdnConnector(SdnConnectorBase): -- All created services identification is stored in a generic ConfigLet 'OSM_metadata' to keep track of the managed resources by OSM in the Arista deployment. """ + __supported_service_types = ["ELINE (L2)", "ELINE", "ELAN"] __service_types_ELAN = "ELAN" __service_types_ELINE = "ELINE" __ELINE_num_connection_points = 2 __supported_service_types = ["ELINE", "ELAN"] __supported_encapsulation_types = ["dot1q"] - __WIM_LOGGER = 'ro.sdn.arista' - __SERVICE_ENDPOINT_MAPPING = 'service_endpoint_mapping' + __WIM_LOGGER = "ro.sdn.arista" + __SERVICE_ENDPOINT_MAPPING = "service_endpoint_mapping" __ENCAPSULATION_TYPE_PARAM = "service_endpoint_encapsulation_type" __ENCAPSULATION_INFO_PARAM = "service_endpoint_encapsulation_info" __BACKUP_PARAM = "backup" @@ -119,16 +121,16 @@ class AristaSdnConnector(SdnConnectorBase): __SW_PORT_PARAM = "switch_port" __VLAN_PARAM = "vlan" __VNI_PARAM = "vni" - __SEPARATOR = '_' - __MANAGED_BY_OSM = '## Managed by OSM ' + __SEPARATOR = "_" + __MANAGED_BY_OSM = "## Managed by OSM " __OSM_PREFIX = "osm_" __OSM_METADATA = "OSM_metadata" - __METADATA_PREFIX = '!## Service' + __METADATA_PREFIX = "!## Service" __EXC_TASK_EXEC_WAIT = 10 __ROLLB_TASK_EXEC_WAIT = 10 __API_REQUEST_TOUT = 60 - __SWITCH_TAG_NAME = 'topology_type' - __SWITCH_TAG_VALUE = 'leaf' + __SWITCH_TAG_NAME = "topology_type" + __SWITCH_TAG_VALUE = "leaf" __LOOPBACK_INTF = "Loopback0" _VLAN = "VLAN" _VXLAN = "VXLAN" @@ -159,55 +161,74 @@ class AristaSdnConnector(SdnConnectorBase): :param logger (logging.Logger): optional logger object. If none is passed 'ro.sdn.sdnconn' is used. """ self.__regex = re.compile( - r'^(?:http|ftp)s?://' # http:// or https:// - r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain... - r'localhost|' # localhost... - r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip - r'(?::\d+)?', re.IGNORECASE) # optional port + r"^(?:http|ftp)s?://" # http:// or https:// + r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|" # domain... + r"localhost|" # localhost... + r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})" # ...or ip + r"(?::\d+)?", + re.IGNORECASE, + ) # optional port self.raiseException = True self.logger = logger or logging.getLogger(self.__WIM_LOGGER) super().__init__(wim, wim_account, config, self.logger) self.__wim = wim self.__wim_account = wim_account self.__config = config + if self.is_valid_destination(self.__wim.get("wim_url")): self.__wim_url = self.__wim.get("wim_url") else: - raise SdnConnectorError(message='Invalid wim_url value', - http_code=500) + raise SdnConnectorError(message="Invalid wim_url value", http_code=500) + self.__user = wim_account.get("user") self.__passwd = wim_account.get("password") self.client = None self.cvp_inventory = None self.cvp_tags = None - self.logger.debug("Arista SDN plugin {}, cvprac version {}, user:{} and config:{}". - format(wim, cvprac_version, self.__user, - self.delete_keys_from_dict(config, ('passwd',)))) + self.logger.debug( + "Arista SDN plugin {}, cvprac version {}, user:{} and config:{}".format( + wim, + cvprac_version, + self.__user, + self.delete_keys_from_dict(config, ("passwd",)), + ) + ) self.allDeviceFacts = [] self.taskC = None + try: self.__load_topology() self.__load_switches() except (ConnectTimeout, Timeout) as ct: - raise SdnConnectorError(message=SdnError.TIMEOUT + " " + str(ct), http_code=408) + raise SdnConnectorError( + message=SdnError.TIMEOUT + " " + str(ct), http_code=408 + ) except ConnectionError as ce: - raise SdnConnectorError(message=SdnError.UNREACHABLE + " " + str(ce), http_code=404) + raise SdnConnectorError( + message=SdnError.UNREACHABLE + " " + str(ce), http_code=404 + ) except SdnConnectorError as sc: raise sc except CvpLoginError as le: raise SdnConnectorError(message=le.msg, http_code=500) from le except Exception as e: - raise SdnConnectorError(message="Unable to load switches from CVP" + " " + str(e), - http_code=500) from e - self.logger.debug("Using topology {} in Arista Leaf switches: {}".format( - self.topology, - self.delete_keys_from_dict(self.switches, ('passwd',)))) + raise SdnConnectorError( + message="Unable to load switches from CVP " + str(e), http_code=500 + ) from e + + self.logger.debug( + "Using topology {} in Arista Leaf switches: {}".format( + self.topology, self.delete_keys_from_dict(self.switches, ("passwd",)) + ) + ) self.clC = AristaSDNConfigLet(self.topology) def __load_topology(self): self.topology = self._VXLAN_MLAG - if self.__config and self.__config.get('topology'): - topology = self.__config.get('topology') + + if self.__config and self.__config.get("topology"): + topology = self.__config.get("topology") + if topology == "VLAN": self.topology = self._VLAN elif topology == "VXLAN": @@ -218,7 +239,7 @@ class AristaSdnConnector(SdnConnectorBase): self.topology = self._VXLAN_MLAG def __load_switches(self): - """ Retrieves the switches to configure in the following order + """Retrieves the switches to configure in the following order 1. from incoming configuration: 1.1 using port mapping using user and password from WIM @@ -236,47 +257,58 @@ class AristaSdnConnector(SdnConnectorBase): for port in self.__config.get(self.__SERVICE_ENDPOINT_MAPPING): switch_dpid = port.get(self.__SW_ID_PARAM) if switch_dpid and switch_dpid not in self.switches: - self.switches[switch_dpid] = {'passwd': self.__passwd, - 'ip': None, - 'usr': self.__user, - 'lo0': None, - 'AS': None, - 'serialNumber': None, - 'mlagPeerDevice': None} - - if self.__config and self.__config.get('switches'): + self.switches[switch_dpid] = { + "passwd": self.__passwd, + "ip": None, + "usr": self.__user, + "lo0": None, + "AS": None, + "serialNumber": None, + "mlagPeerDevice": None, + } + + if self.__config and self.__config.get("switches"): # Not directly from json, complete one by one - config_switches = self.__config.get('switches') + config_switches = self.__config.get("switches") for cs, cs_content in config_switches.items(): if cs not in self.switches: - self.switches[cs] = {'passwd': self.__passwd, - 'ip': None, - 'usr': self.__user, - 'lo0': None, - 'AS': None, - 'serialNumber': None, - 'mlagPeerDevice': None} + self.switches[cs] = { + "passwd": self.__passwd, + "ip": None, + "usr": self.__user, + "lo0": None, + "AS": None, + "serialNumber": None, + "mlagPeerDevice": None, + } + if cs_content: self.switches[cs].update(cs_content) # Load the rest of the data if self.client is None: self.client = self.__connect() + self.__load_inventory() + if not self.switches: self.__get_tags(self.__SWITCH_TAG_NAME, self.__SWITCH_TAG_VALUE) + for device in self.allDeviceFacts: # get the switches whose topology_tag is 'leaf' - if device['serialNumber'] in self.cvp_tags: - if not self.switches.get(device['hostname']): - switch_data = {'passwd': self.__passwd, - 'ip': device['ipAddress'], - 'usr': self.__user, - 'lo0': None, - 'AS': None, - 'serialNumber': None, - 'mlagPeerDevice': None} - self.switches[device['hostname']] = switch_data + if device["serialNumber"] in self.cvp_tags: + if not self.switches.get(device["hostname"]): + switch_data = { + "passwd": self.__passwd, + "ip": device["ipAddress"], + "usr": self.__user, + "lo0": None, + "AS": None, + "serialNumber": None, + "mlagPeerDevice": None, + } + self.switches[device["hostname"]] = switch_data + if len(self.switches) == 0: self.logger.error("Unable to load Leaf switches from CVP") return @@ -285,68 +317,93 @@ class AristaSdnConnector(SdnConnectorBase): # used to make eAPI calls by using switch.py module for s in self.switches: for device in self.allDeviceFacts: - if device['hostname'] == s: - if not self.switches[s].get('ip'): - self.switches[s]['ip'] = device['ipAddress'] - self.switches[s]['serialNumber'] = device['serialNumber'] + if device["hostname"] == s: + if not self.switches[s].get("ip"): + self.switches[s]["ip"] = device["ipAddress"] + self.switches[s]["serialNumber"] = device["serialNumber"] break # Each switch has a different loopback address, # so it's a different configLet - if not self.switches[s].get('lo0'): - inf = self.__get_interface_ip(self.switches[s]['serialNumber'], self.__LOOPBACK_INTF) - self.switches[s]["lo0"] = inf.split('/')[0] - if not self.switches[s].get('AS'): - self.switches[s]["AS"] = self.__get_device_ASN(self.switches[s]['serialNumber']) + if not self.switches[s].get("lo0"): + inf = self.__get_interface_ip( + self.switches[s]["serialNumber"], self.__LOOPBACK_INTF + ) + self.switches[s]["lo0"] = inf.split("/")[0] + + if not self.switches[s].get("AS"): + self.switches[s]["AS"] = self.__get_device_ASN( + self.switches[s]["serialNumber"] + ) + if self.topology in (self._VXLAN_MLAG, self._VLAN_MLAG): for s in self.switches: - if not self.switches[s].get('mlagPeerDevice'): - self.switches[s]['mlagPeerDevice'] = self.__get_peer_MLAG(self.switches[s]['serialNumber']) - - def __check_service(self, service_type, connection_points, - check_vlan=True, check_num_cp=True, kwargs=None): - """ Reviews the connection points elements looking for semantic errors in the incoming data - """ + if not self.switches[s].get("mlagPeerDevice"): + self.switches[s]["mlagPeerDevice"] = self.__get_peer_MLAG( + self.switches[s]["serialNumber"] + ) + + def __check_service( + self, + service_type, + connection_points, + check_vlan=True, + check_num_cp=True, + kwargs=None, + ): + """Reviews the connection points elements looking for semantic errors in the incoming data""" if service_type not in self.__supported_service_types: - raise Exception("The service '{}' is not supported. Only '{}' are accepted".format( - service_type, - self.__supported_service_types)) + raise Exception( + "The service '{}' is not supported. Only '{}' are accepted".format( + service_type, self.__supported_service_types + ) + ) if check_num_cp: if len(connection_points) < 2: raise Exception(SdnError.CONNECTION_POINTS_SIZE) - if (len(connection_points) != self.__ELINE_num_connection_points and - service_type == self.__service_types_ELINE): + + if ( + len(connection_points) != self.__ELINE_num_connection_points + and service_type == self.__service_types_ELINE + ): raise Exception(SdnError.CONNECTION_POINTS_SIZE) if check_vlan: - vlan_id = '' + vlan_id = "" + for cp in connection_points: enc_type = cp.get(self.__ENCAPSULATION_TYPE_PARAM) - if (enc_type and - enc_type not in self.__supported_encapsulation_types): + + if enc_type and enc_type not in self.__supported_encapsulation_types: raise Exception(SdnError.ENCAPSULATION_TYPE) + encap_info = cp.get(self.__ENCAPSULATION_INFO_PARAM) cp_vlan_id = str(encap_info.get(self.__VLAN_PARAM)) + if cp_vlan_id: if not vlan_id: vlan_id = cp_vlan_id elif vlan_id != cp_vlan_id: raise Exception(SdnError.VLAN_INCONSISTENT) + if not vlan_id: raise Exception(SdnError.VLAN_NOT_PROVIDED) + if vlan_id in self.__get_srvVLANs(): - raise Exception('VLAN {} already assigned to a connectivity service'.format(vlan_id)) + raise Exception( + "VLAN {} already assigned to a connectivity service".format(vlan_id) + ) # Commented out for as long as parameter isn't implemented # bandwidth = kwargs.get(self.__BANDWIDTH_PARAM) # if not isinstance(bandwidth, int): - # self.__exception(SdnError.BANDWIDTH, http_code=400) + # self.__exception(SdnError.BANDWIDTH, http_code=400) # Commented out for as long as parameter isn't implemented # backup = kwargs.get(self.__BACKUP_PARAM) # if not isinstance(backup, bool): - # self.__exception(SdnError.BACKUP, http_code=400) + # self.__exception(SdnError.BACKUP, http_code=400) def check_credentials(self): """Retrieves the CloudVision version information, as the easiest way @@ -355,18 +412,23 @@ class AristaSdnConnector(SdnConnectorBase): try: if self.client is None: self.client = self.__connect() + result = self.client.api.get_cvp_info() self.logger.debug(result) except CvpLoginError as e: self.logger.info(str(e)) self.client = None - raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e), - http_code=401) from e + + raise SdnConnectorError( + message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401 + ) from e except Exception as ex: self.client = None self.logger.error(str(ex)) - raise SdnConnectorError(message=SdnError.INTERNAL_ERROR + " " + str(ex), - http_code=500) from ex + + raise SdnConnectorError( + message=SdnError.INTERNAL_ERROR + " " + str(ex), http_code=500 + ) from ex def get_connectivity_service_status(self, service_uuid, conn_info=None): """Monitor the status of the connectivity service established @@ -405,81 +467,112 @@ class AristaSdnConnector(SdnConnectorBase): new information available for the connectivity service. """ try: - self.logger.debug("invoked get_connectivity_service_status '{}'".format(service_uuid)) + self.logger.debug( + "invoked get_connectivity_service_status '{}'".format(service_uuid) + ) + if not service_uuid: - raise SdnConnectorError(message='No connection service UUID', - http_code=500) + raise SdnConnectorError( + message="No connection service UUID", http_code=500 + ) self.__get_Connection() - if conn_info is None: - raise SdnConnectorError(message='No connection information for service UUID {}'.format(service_uuid), - http_code=500) - if 'configLetPerSwitch' in conn_info.keys(): + if conn_info is None: + raise SdnConnectorError( + message="No connection information for service UUID {}".format( + service_uuid + ), + http_code=500, + ) + + if "configLetPerSwitch" in conn_info.keys(): c_info = conn_info else: c_info = None - cls_perSw = self.__get_serviceData(service_uuid, - conn_info['service_type'], - conn_info['vlan_id'], - c_info) + + cls_perSw = self.__get_serviceData( + service_uuid, conn_info["service_type"], conn_info["vlan_id"], c_info + ) t_isCancelled = False t_isFailed = False t_isPending = False failed_switches = [] + for s in self.switches: if len(cls_perSw[s]) > 0: for cl in cls_perSw[s]: # Fix 1030 SDN-ARISTA Key error note when deploy a NS # Added protection to check that 'note' exists and additionally # verify that it is managed by OSM - if (not cls_perSw[s][0]['config'] or - not cl.get('note') or - self.__MANAGED_BY_OSM not in cl['note']): + if ( + not cls_perSw[s][0]["config"] + or not cl.get("note") + or self.__MANAGED_BY_OSM not in cl["note"] + ): continue - note = cl['note'] + + note = cl["note"] t_id = note.split(self.__SEPARATOR)[1] result = self.client.api.get_task_by_id(t_id) - if result['workOrderUserDefinedStatus'] == 'Completed': + + if result["workOrderUserDefinedStatus"] == "Completed": continue - elif result['workOrderUserDefinedStatus'] == 'Cancelled': + elif result["workOrderUserDefinedStatus"] == "Cancelled": t_isCancelled = True - elif result['workOrderUserDefinedStatus'] == 'Failed': + elif result["workOrderUserDefinedStatus"] == "Failed": t_isFailed = True else: t_isPending = True + failed_switches.append(s) + if t_isCancelled: - error_msg = 'Some works were cancelled in switches: {}'.format(str(failed_switches)) - sdn_status = 'DOWN' + error_msg = "Some works were cancelled in switches: {}".format( + str(failed_switches) + ) + sdn_status = "DOWN" elif t_isFailed: - error_msg = 'Some works failed in switches: {}'.format(str(failed_switches)) - sdn_status = 'ERROR' + error_msg = "Some works failed in switches: {}".format( + str(failed_switches) + ) + sdn_status = "ERROR" elif t_isPending: - error_msg = 'Some works are still under execution in switches: {}'.format(str(failed_switches)) - sdn_status = 'BUILD' + error_msg = ( + "Some works are still under execution in switches: {}".format( + str(failed_switches) + ) + ) + sdn_status = "BUILD" else: - error_msg = '' - sdn_status = 'ACTIVE' - sdn_info = '' - return {'sdn_status': sdn_status, - 'error_msg': error_msg, - 'sdn_info': sdn_info} + error_msg = "" + sdn_status = "ACTIVE" + + sdn_info = "" + + return { + "sdn_status": sdn_status, + "error_msg": error_msg, + "sdn_info": sdn_info, + } except CvpLoginError as e: self.logger.info(str(e)) self.client = None - raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e), - http_code=401) from e + + raise SdnConnectorError( + message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401 + ) from e except Exception as ex: self.client = None self.logger.error(str(ex), exc_info=True) - raise SdnConnectorError(message=str(ex) + " " + str(ex), - http_code=500) from ex - def create_connectivity_service(self, service_type, connection_points, - **kwargs): - """Stablish SDN/WAN connectivity between the endpoints + raise SdnConnectorError( + message=str(ex) + " " + str(ex), http_code=500 + ) from ex + + def create_connectivity_service(self, service_type, connection_points, **kwargs): + """Establish SDN/WAN connectivity between the endpoints :param service_type: (str): ``ELINE`` (L2), ``ELAN`` (L2), ``ETREE`` (L2), ``L3``. :param connection_points: (list): each point corresponds to @@ -528,24 +621,24 @@ class AristaSdnConnector(SdnConnectorBase): Provide the parameter http_code """ try: - self.logger.debug("invoked create_connectivity_service '{}' ports: {}". - format(service_type, connection_points)) + self.logger.debug( + "invoked create_connectivity_service '{}' ports: {}".format( + service_type, connection_points + ) + ) self.__get_Connection() - self.__check_service(service_type, - connection_points, - check_vlan=True, - kwargs=kwargs) + self.__check_service( + service_type, connection_points, check_vlan=True, kwargs=kwargs + ) service_uuid = str(uuid.uuid4()) - self.logger.info("Service with uuid {} created.". - format(service_uuid)) + self.logger.info("Service with uuid {} created.".format(service_uuid)) s_uid, s_connInf = self.__processConnection( - service_uuid, - service_type, - connection_points, - kwargs) + service_uuid, service_type, connection_points, kwargs + ) + try: - self.__addMetadata(s_uid, service_type, s_connInf['vlan_id']) + self.__addMetadata(s_uid, service_type, s_connInf["vlan_id"]) except Exception: pass @@ -553,28 +646,29 @@ class AristaSdnConnector(SdnConnectorBase): except CvpLoginError as e: self.logger.info(str(e)) self.client = None - raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e), - http_code=401) from e + + raise SdnConnectorError( + message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401 + ) from e except SdnConnectorError as sde: raise sde except ValueError as err: self.client = None self.logger.error(str(err), exc_info=True) - raise SdnConnectorError(message=str(err), - http_code=500) from err + + raise SdnConnectorError(message=str(err), http_code=500) from err except Exception as ex: self.client = None self.logger.error(str(ex), exc_info=True) + if self.raiseException: raise ex - raise SdnConnectorError(message=str(ex), - http_code=500) from ex - def __processConnection(self, - service_uuid, - service_type, - connection_points, - kwargs): + raise SdnConnectorError(message=str(ex), http_code=500) from ex + + def __processConnection( + self, service_uuid, service_type, connection_points, kwargs + ): """ Invoked from creation and edit methods @@ -590,107 +684,133 @@ class AristaSdnConnector(SdnConnectorBase): cls_perSw = {} cls_cp = {} cl_bgp = {} + for s in self.switches: cls_perSw[s] = [] cls_cp[s] = [] + vlan_processed = False - vlan_id = '' + vlan_id = "" i = 0 processed_connection_points = [] + for cp in connection_points: i += 1 encap_info = cp.get(self.__ENCAPSULATION_INFO_PARAM) + if not vlan_processed: vlan_id = str(encap_info.get(self.__VLAN_PARAM)) + if not vlan_id: continue + vni_id = encap_info.get(self.__VNI_PARAM) + if not vni_id: vni_id = str(10000 + int(vlan_id)) if service_type == self.__service_types_ELAN: - cl_vlan = self.clC.getElan_vlan(service_uuid, - vlan_id, - vni_id) + cl_vlan = self.clC.getElan_vlan(service_uuid, vlan_id, vni_id) else: - cl_vlan = self.clC.getEline_vlan(service_uuid, - vlan_id, - vni_id) + cl_vlan = self.clC.getEline_vlan(service_uuid, vlan_id, vni_id) + vlan_processed = True encap_type = cp.get(self.__ENCAPSULATION_TYPE_PARAM) switch_id = encap_info.get(self.__SW_ID_PARAM) interface = encap_info.get(self.__SW_PORT_PARAM) - switches = [{'name': switch_id, 'interface': interface}] + switches = [{"name": switch_id, "interface": interface}] # remove those connections that are equal. This happens when several sriovs are located in the same # compute node interface, that is, in the same switch and interface switches = [x for x in switches if x not in processed_connection_points] + if not switches: continue + processed_connection_points += switches + for switch in switches: if not interface: raise SdnConnectorError( - message="Connection point switch port empty for switch_dpid {}".format(switch_id), - http_code=406) + message="Connection point switch port empty for switch_dpid {}".format( + switch_id + ), + http_code=406, + ) # it should be only one switch where the mac is attached - if encap_type == 'dot1q': + if encap_type == "dot1q": # SRIOV configLet for Leaf switch mac's attached to if service_type == self.__service_types_ELAN: - cl_encap = self.clC.getElan_sriov(service_uuid, interface, vlan_id, i) + cl_encap = self.clC.getElan_sriov( + service_uuid, interface, vlan_id, i + ) else: - cl_encap = self.clC.getEline_sriov(service_uuid, interface, vlan_id, i) + cl_encap = self.clC.getEline_sriov( + service_uuid, interface, vlan_id, i + ) elif not encap_type: # PT configLet for Leaf switch attached to the mac if service_type == self.__service_types_ELAN: - cl_encap = self.clC.getElan_passthrough(service_uuid, - interface, - vlan_id, i) + cl_encap = self.clC.getElan_passthrough( + service_uuid, interface, vlan_id, i + ) else: - cl_encap = self.clC.getEline_passthrough(service_uuid, - interface, - vlan_id, i) - if cls_cp.get(switch['name']): - cls_cp[switch['name']] = str(cls_cp[switch['name']]) + cl_encap + cl_encap = self.clC.getEline_passthrough( + service_uuid, interface, vlan_id, i + ) + + if cls_cp.get(switch["name"]): + cls_cp[switch["name"]] = str(cls_cp[switch["name"]]) + cl_encap else: - cls_cp[switch['name']] = cl_encap + cls_cp[switch["name"]] = cl_encap # at least 1 connection point has to be received if not vlan_processed: - raise SdnConnectorError(message=SdnError.UNSUPPORTED_FEATURE, - http_code=406) + raise SdnConnectorError( + message=SdnError.UNSUPPORTED_FEATURE, http_code=406 + ) for s in self.switches: # for cl in cp_configLets: - cl_name = (self.__OSM_PREFIX + - s + - self.__SEPARATOR + service_type + str(vlan_id) + - self.__SEPARATOR + service_uuid) - cl_config = '' + cl_name = ( + self.__OSM_PREFIX + + s + + self.__SEPARATOR + + service_type + + str(vlan_id) + + self.__SEPARATOR + + service_uuid + ) + cl_config = "" + # Apply BGP configuration only for VXLAN topologies if self.topology in (self._VXLAN_MLAG, self._VXLAN): if service_type == self.__service_types_ELAN: - cl_bgp[s] = self.clC.getElan_bgp(service_uuid, - vlan_id, - vni_id, - self.switches[s]['lo0'], - self.switches[s]['AS']) + cl_bgp[s] = self.clC.getElan_bgp( + service_uuid, + vlan_id, + vni_id, + self.switches[s]["lo0"], + self.switches[s]["AS"], + ) else: - cl_bgp[s] = self.clC.getEline_bgp(service_uuid, - vlan_id, - vni_id, - self.switches[s]['lo0'], - self.switches[s]['AS']) + cl_bgp[s] = self.clC.getEline_bgp( + service_uuid, + vlan_id, + vni_id, + self.switches[s]["lo0"], + self.switches[s]["AS"], + ) else: - cl_bgp[s] = '' + cl_bgp[s] = "" if not cls_cp.get(s): # Apply VLAN configuration to peer MLAG switch, # only necessary when there are no connection points in the switch if self.topology in (self._VXLAN_MLAG, self._VLAN_MLAG): for p in self.switches: - if self.switches[p]['mlagPeerDevice'] == s: + if self.switches[p]["mlagPeerDevice"] == s: if cls_cp.get(p): if self.topology == self._VXLAN_MLAG: cl_config = str(cl_vlan) + str(cl_bgp[s]) @@ -699,7 +819,7 @@ class AristaSdnConnector(SdnConnectorBase): else: cl_config = str(cl_vlan) + str(cl_bgp[s]) + str(cls_cp[s]) - cls_perSw[s] = [{'name': cl_name, 'config': cl_config}] + cls_perSw[s] = [{"name": cl_name, "config": cl_config}] allLeafConfigured, allLeafModified = self.__updateConnection(cls_perSw) @@ -710,17 +830,19 @@ class AristaSdnConnector(SdnConnectorBase): "vlan_id": vlan_id, "connection_points": connection_points, "configLetPerSwitch": cls_perSw, - 'allLeafConfigured': allLeafConfigured, - 'allLeafModified': allLeafModified} + "allLeafConfigured": allLeafConfigured, + "allLeafModified": allLeafModified, + } return service_uuid, conn_info except Exception as ex: - self.logger.debug("Exception processing connection {}: {}". - format(service_uuid, str(ex))) + self.logger.debug( + "Exception processing connection {}: {}".format(service_uuid, str(ex)) + ) raise ex def __updateConnection(self, cls_perSw): - """ Invoked in the creation and modification + """Invoked in the creation and modification checks if the new connection points config is: - already in the Cloud Vision, the configLet is modified, and applied to the switch, @@ -740,18 +862,21 @@ class AristaSdnConnector(SdnConnectorBase): for s in self.switches: allLeafConfigured[s] = False allLeafModified[s] = False + cl_toDelete = [] + for s in self.switches: toDelete_in_cvp = False - if not (cls_perSw.get(s) and cls_perSw[s][0].get('config')): + if not (cls_perSw.get(s) and cls_perSw[s][0].get("config")): # when there is no configuration, means that there is no interface # in the switch to be connected, so the configLet has to be removed from CloudVision # after removing the ConfigLet from the switch if it was already there # get config let name and key cl = cls_perSw[s] + try: - cvp_cl = self.client.api.get_configlet_by_name(cl[0]['name']) + cvp_cl = self.client.api.get_configlet_by_name(cl[0]["name"]) # remove configLet cl_toDelete.append(cvp_cl) cl[0] = cvp_cl @@ -765,69 +890,84 @@ class AristaSdnConnector(SdnConnectorBase): else: res = self.__configlet_modify(cls_perSw[s]) allLeafConfigured[s] = res[0] + if not allLeafConfigured[s]: continue + cl = cls_perSw[s] + res = self.__device_modify( - device_to_update=s, - new_configlets=cl, - delete=toDelete_in_cvp) + device_to_update=s, new_configlets=cl, delete=toDelete_in_cvp + ) + if "errorMessage" in str(res): raise Exception(str(res)) + self.logger.info("Device {} modify result {}".format(s, res)) - for t_id in res[1]['tasks']: + + for t_id in res[1]["tasks"]: if not toDelete_in_cvp: - note_msg = "{}{}{}{}##".format(self.__MANAGED_BY_OSM, - self.__SEPARATOR, - t_id, - self.__SEPARATOR) + note_msg = "{}{}{}{}##".format( + self.__MANAGED_BY_OSM, + self.__SEPARATOR, + t_id, + self.__SEPARATOR, + ) self.client.api.add_note_to_configlet( - cls_perSw[s][0]['key'], - note_msg) - cls_perSw[s][0]['note'] = note_msg - tasks = {t_id: {'workOrderId': t_id}} + cls_perSw[s][0]["key"], note_msg + ) + cls_perSw[s][0]["note"] = note_msg + + tasks = {t_id: {"workOrderId": t_id}} self.__exec_task(tasks, self.__EXC_TASK_EXEC_WAIT) + # with just one configLet assigned to a device, # delete all if there are errors in next loops if not toDelete_in_cvp: allLeafModified[s] = True + if len(cl_toDelete) > 0: self.__configlet_modify(cl_toDelete, delete=True) return allLeafConfigured, allLeafModified except Exception as ex: try: - self.__rollbackConnection(cls_perSw, - allLeafConfigured, - allLeafModified) + self.__rollbackConnection(cls_perSw, allLeafConfigured, allLeafModified) except Exception as e: - self.logger.error("Exception rolling back in updating connection: {}". - format(e), exc_info=True) + self.logger.error( + "Exception rolling back in updating connection: {}".format(e), + exc_info=True, + ) + raise ex - def __rollbackConnection(self, - cls_perSw, - allLeafConfigured, - allLeafModified): - """ Removes the given configLet from the devices and then remove the configLets - """ + def __rollbackConnection(self, cls_perSw, allLeafConfigured, allLeafModified): + """Removes the given configLet from the devices and then remove the configLets""" for s in self.switches: if allLeafModified[s]: try: res = self.__device_modify( device_to_update=s, new_configlets=cls_perSw[s], - delete=True) + delete=True, + ) + if "errorMessage" in str(res): raise Exception(str(res)) + tasks = dict() - for t_id in res[1]['tasks']: - tasks[t_id] = {'workOrderId': t_id} + + for t_id in res[1]["tasks"]: + tasks[t_id] = {"workOrderId": t_id} + self.__exec_task(tasks) self.logger.info("Device {} modify result {}".format(s, res)) except Exception as e: - self.logger.error('Error removing configlets from device {}: {}'.format(s, e)) + self.logger.error( + "Error removing configlets from device {}: {}".format(s, e) + ) pass + for s in self.switches: if allLeafConfigured[s]: self.__configlet_modify(cls_perSw[s], delete=True) @@ -835,23 +975,27 @@ class AristaSdnConnector(SdnConnectorBase): def __exec_task(self, tasks, tout=10): if self.taskC is None: self.__connect() + data = self.taskC.update_all_tasks(tasks).values() - self.taskC.task_action(data, tout, 'executed') + self.taskC.task_action(data, tout, "executed") def __device_modify(self, device_to_update, new_configlets, delete): - """ Updates the devices (switches) adding or removing the configLet, + """Updates the devices (switches) adding or removing the configLet, the tasks Id's associated to the change are returned """ - self.logger.info('Enter in __device_modify delete: {}'.format(delete)) + self.logger.info("Enter in __device_modify delete: {}".format(delete)) updated = [] changed = False # Task Ids that have been identified during device actions newTasks = [] - if (len(new_configlets) == 0 or - device_to_update is None or - len(device_to_update) == 0): - data = {'updated': updated, 'tasks': newTasks} + if ( + len(new_configlets) == 0 + or device_to_update is None + or len(device_to_update) == 0 + ): + data = {"updated": updated, "tasks": newTasks} + return [changed, data] self.__load_inventory() @@ -862,17 +1006,21 @@ class AristaSdnConnector(SdnConnectorBase): for try_device in allDeviceFacts: # Add Device Specific Configlets # self.logger.debug(device) - if try_device['hostname'] not in device_to_update: + if try_device["hostname"] not in device_to_update: continue + dev_cvp_configlets = self.client.api.get_configlets_by_device_id( - try_device['systemMacAddress']) + try_device["systemMacAddress"] + ) # self.logger.debug(dev_cvp_configlets) - try_device['deviceSpecificConfiglets'] = [] + try_device["deviceSpecificConfiglets"] = [] + for cvp_configlet in dev_cvp_configlets: - if int(cvp_configlet['containerCount']) == 0: - try_device['deviceSpecificConfiglets'].append( - {'name': cvp_configlet['name'], - 'key': cvp_configlet['key']}) + if int(cvp_configlet["containerCount"]) == 0: + try_device["deviceSpecificConfiglets"].append( + {"name": cvp_configlet["name"], "key": cvp_configlet["key"]} + ) + # self.logger.debug(device) device = try_device break @@ -884,81 +1032,95 @@ class AristaSdnConnector(SdnConnectorBase): update_devices = [] if delete: - for cvp_configlet in device['deviceSpecificConfiglets']: + for cvp_configlet in device["deviceSpecificConfiglets"]: for cl in new_configlets: - if cvp_configlet['name'] == cl['name']: + if cvp_configlet["name"] == cl["name"]: remove_configlets.append(cvp_configlet) device_update = True else: for configlet in new_configlets: - if configlet not in device['deviceSpecificConfiglets']: + if configlet not in device["deviceSpecificConfiglets"]: add_configlets.append(configlet) device_update = True + if device_update: - update_devices.append({'hostname': device['hostname'], - 'configlets': [add_configlets, - remove_configlets], - 'device': device}) + update_devices.append( + { + "hostname": device["hostname"], + "configlets": [add_configlets, remove_configlets], + "device": device, + } + ) + self.logger.info("Device to modify: {}".format(update_devices)) up_device = update_devices[0] - cl_toAdd = up_device['configlets'][0] - cl_toDel = up_device['configlets'][1] + cl_toAdd = up_device["configlets"][0] + cl_toDel = up_device["configlets"][1] + # Update Configlets try: if delete and len(cl_toDel) > 0: r = self.client.api.remove_configlets_from_device( - 'OSM', - up_device['device'], - cl_toDel, - create_task=True) + "OSM", up_device["device"], cl_toDel, create_task=True + ) dev_action = r - self.logger.debug("remove_configlets_from_device {} {}".format(dev_action, cl_toDel)) + self.logger.debug( + "remove_configlets_from_device {} {}".format(dev_action, cl_toDel) + ) elif len(cl_toAdd) > 0: r = self.client.api.apply_configlets_to_device( - 'OSM', - up_device['device'], - cl_toAdd, - create_task=True) + "OSM", up_device["device"], cl_toAdd, create_task=True + ) dev_action = r - self.logger.debug("apply_configlets_to_device {} {}".format(dev_action, cl_toAdd)) - + self.logger.debug( + "apply_configlets_to_device {} {}".format(dev_action, cl_toAdd) + ) except Exception as error: errorMessage = str(error) - msg = "errorMessage: Device {} Configlets couldnot be updated: {}".format( - up_device['hostname'], errorMessage) + msg = "errorMessage: Device {} Configlets could not be updated: {}".format( + up_device["hostname"], errorMessage + ) raise SdnConnectorError(msg) from error else: if "errorMessage" in str(dev_action): m = "Device {} Configlets update fail: {}".format( - up_device['name'], dev_action['errorMessage']) + up_device["name"], dev_action["errorMessage"] + ) raise SdnConnectorError(m) else: changed = True - if 'taskIds' in str(dev_action): + if "taskIds" in str(dev_action): # Fix 1030 SDN-ARISTA Key error note when deploy a NS - if not dev_action['data']['taskIds']: - raise SdnConnectorError("No taskIds found: Device {} Configlets could not be updated".format( - up_device['hostname'])) - for taskId in dev_action['data']['taskIds']: - updated.append({ - up_device['hostname']: "Configlets-{}".format(taskId)}) + if not dev_action["data"]["taskIds"]: + raise SdnConnectorError( + "No taskIds found: Device {} Configlets could not be updated".format( + up_device["hostname"] + ) + ) + + for taskId in dev_action["data"]["taskIds"]: + updated.append( + {up_device["hostname"]: "Configlets-{}".format(taskId)} + ) newTasks.append(taskId) else: - updated.append({up_device['hostname']: - "Configlets-No_Specific_Tasks"}) - data = {'updated': updated, 'tasks': newTasks} + updated.append( + {up_device["hostname"]: "Configlets-No_Specific_Tasks"} + ) + + data = {"updated": updated, "tasks": newTasks} + return [changed, data] def __configlet_modify(self, configletsToApply, delete=False): - ''' adds/update or delete the provided configLets + """Adds/update or delete the provided configLets :param configletsToApply: list of configLets to apply :param delete: flag to indicate if the configLets have to be deleted from Cloud Vision Portal :return: data: dict of module actions and taskIDs - ''' - self.logger.info('Enter in __configlet_modify delete:{}'.format( - delete)) + """ + self.logger.info("Enter in __configlet_modify delete:{}".format(delete)) # Compare configlets against cvp_facts-configlets changed = False @@ -973,10 +1135,11 @@ class AristaSdnConnector(SdnConnectorBase): to_update = False to_create = False to_check = False + try: - cvp_cl = self.client.api.get_configlet_by_name(cl['name']) - cl['key'] = cvp_cl['key'] - cl['note'] = cvp_cl['note'] + cvp_cl = self.client.api.get_configlet_by_name(cl["name"]) + cl["key"] = cvp_cl["key"] + cl["note"] = cvp_cl["note"] found_in_cvp = True except CvpApiError as error: if "Entity does not exist" in error.msg: @@ -987,119 +1150,130 @@ class AristaSdnConnector(SdnConnectorBase): if delete: if found_in_cvp: to_delete = True - configlet = {'name': cvp_cl['name'], - 'data': cvp_cl} + configlet = {"name": cvp_cl["name"], "data": cvp_cl} else: if found_in_cvp: - cl_compare = self.__compare(cl['config'], - cvp_cl['config']) + cl_compare = self.__compare(cl["config"], cvp_cl["config"]) + # compare function returns a floating point number if cl_compare[0] != 100.0: to_update = True - configlet = {'name': cl['name'], - 'data': cvp_cl, - 'config': cl['config']} + configlet = { + "name": cl["name"], + "data": cvp_cl, + "config": cl["config"], + } else: to_check = True - configlet = {'name': cl['name'], - 'key': cvp_cl['key'], - 'data': cvp_cl, - 'config': cl['config']} + configlet = { + "name": cl["name"], + "key": cvp_cl["key"], + "data": cvp_cl, + "config": cl["config"], + } else: to_create = True - configlet = {'name': cl['name'], - 'config': cl['config']} + configlet = {"name": cl["name"], "config": cl["config"]} try: if to_delete: - operation = 'delete' + operation = "delete" resp = self.client.api.delete_configlet( - configlet['data']['name'], - configlet['data']['key']) + configlet["data"]["name"], configlet["data"]["key"] + ) elif to_update: - operation = 'update' + operation = "update" resp = self.client.api.update_configlet( - configlet['config'], - configlet['data']['key'], - configlet['data']['name'], - wait_task_ids=True) + configlet["config"], + configlet["data"]["key"], + configlet["data"]["name"], + wait_task_ids=True, + ) elif to_create: - operation = 'create' + operation = "create" resp = self.client.api.add_configlet( - configlet['name'], - configlet['config']) + configlet["name"], configlet["config"] + ) else: - operation = 'checked' - resp = 'checked' + operation = "checked" + resp = "checked" except Exception as error: - errorMessage = str(error).split(':')[-1] + errorMessage = str(error).split(":")[-1] message = "Configlet {} cannot be {}: {}".format( - cl['name'], operation, errorMessage) + cl["name"], operation, errorMessage + ) + if to_delete: - deleted.append({configlet['name']: message}) + deleted.append({configlet["name"]: message}) elif to_update: - updated.append({configlet['name']: message}) + updated.append({configlet["name"]: message}) elif to_create: - new.append({configlet['name']: message}) + new.append({configlet["name"]: message}) elif to_check: - checked.append({configlet['name']: message}) - + checked.append({configlet["name"]: message}) else: if "error" in str(resp).lower(): message = "Configlet {} cannot be deleted: {}".format( - cl['name'], resp['errorMessage']) + cl["name"], resp["errorMessage"] + ) + if to_delete: - deleted.append({configlet['name']: message}) + deleted.append({configlet["name"]: message}) elif to_update: - updated.append({configlet['name']: message}) + updated.append({configlet["name"]: message}) elif to_create: - new.append({configlet['name']: message}) + new.append({configlet["name"]: message}) elif to_check: - checked.append({configlet['name']: message}) + checked.append({configlet["name"]: message}) else: if to_delete: changed = True - deleted.append({configlet['name']: "success"}) + deleted.append({configlet["name"]: "success"}) elif to_update: changed = True - updated.append({configlet['name']: "success"}) + updated.append({configlet["name"]: "success"}) elif to_create: changed = True - cl['key'] = resp # This key is used in API call deviceApplyConfigLet FGA - new.append({configlet['name']: "success"}) + # This key is used in API call deviceApplyConfigLet FGA + cl["key"] = resp + new.append({configlet["name"]: "success"}) elif to_check: changed = False - checked.append({configlet['name']: "success"}) + checked.append({configlet["name"]: "success"}) + + data = {"new": new, "updated": updated, "deleted": deleted, "checked": checked} - data = {'new': new, 'updated': updated, 'deleted': deleted, 'checked': checked} return [changed, data] def __get_configletsDevices(self, configlets): for s in self.switches: configlet = configlets[s] + # Add applied Devices if len(configlet) > 0: - configlet['devices'] = [] - applied_devices = self.client.api.get_applied_devices( - configlet['name']) - for device in applied_devices['data']: - configlet['devices'].append(device['hostName']) + configlet["devices"] = [] + applied_devices = self.client.api.get_applied_devices(configlet["name"]) + + for device in applied_devices["data"]: + configlet["devices"].append(device["hostName"]) def __get_serviceData(self, service_uuid, service_type, vlan_id, conn_info=None): cls_perSw = {} + for s in self.switches: cls_perSw[s] = [] + if not conn_info: - srv_cls = self.__get_serviceConfigLets(service_uuid, - service_type, - vlan_id) + srv_cls = self.__get_serviceConfigLets(service_uuid, service_type, vlan_id) self.__get_configletsDevices(srv_cls) + for s in self.switches: cl = srv_cls[s] if len(cl) > 0: - for dev in cl['devices']: + for dev in cl["devices"]: cls_perSw[dev].append(cl) else: - cls_perSw = conn_info['configLetPerSwitch'] + cls_perSw = conn_info["configLetPerSwitch"] + return cls_perSw def delete_connectivity_service(self, service_uuid, conn_info=None): @@ -1113,59 +1287,77 @@ class AristaSdnConnector(SdnConnectorBase): :raises: SdnConnectorException: In case of error. The parameter http_code must be filled """ try: - self.logger.debug('invoked delete_connectivity_service {}'. - format(service_uuid)) + self.logger.debug( + "invoked delete_connectivity_service {}".format(service_uuid) + ) + if not service_uuid: - raise SdnConnectorError(message='No connection service UUID', - http_code=500) + raise SdnConnectorError( + message="No connection service UUID", http_code=500 + ) self.__get_Connection() + if conn_info is None: - raise SdnConnectorError(message='No connection information for service UUID {}'.format(service_uuid), - http_code=500) + raise SdnConnectorError( + message="No connection information for service UUID {}".format( + service_uuid + ), + http_code=500, + ) + c_info = None - cls_perSw = self.__get_serviceData(service_uuid, - conn_info['service_type'], - conn_info['vlan_id'], - c_info) + cls_perSw = self.__get_serviceData( + service_uuid, conn_info["service_type"], conn_info["vlan_id"], c_info + ) allLeafConfigured = {} allLeafModified = {} + for s in self.switches: allLeafConfigured[s] = True allLeafModified[s] = True + found_in_cvp = False + for s in self.switches: if cls_perSw[s]: found_in_cvp = True + if found_in_cvp: - self.__rollbackConnection(cls_perSw, - allLeafConfigured, - allLeafModified) + self.__rollbackConnection(cls_perSw, allLeafConfigured, allLeafModified) else: # if the service is not defined in Cloud Vision, return a 404 - NotFound error - raise SdnConnectorError(message='Service {} was not found in Arista Cloud Vision {}'. - format(service_uuid, self.__wim_url), - http_code=404) + raise SdnConnectorError( + message="Service {} was not found in Arista Cloud Vision {}".format( + service_uuid, self.__wim_url + ), + http_code=404, + ) + self.__removeMetadata(service_uuid) except CvpLoginError as e: self.logger.info(str(e)) self.client = None - raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e), - http_code=401) from e + raise SdnConnectorError( + message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401 + ) from e except SdnConnectorError as sde: raise sde except Exception as ex: self.client = None self.logger.error(ex) + if self.raiseException: raise ex - raise SdnConnectorError(message=SdnError.INTERNAL_ERROR + " " + str(ex), - http_code=500) from ex + + raise SdnConnectorError( + message=SdnError.INTERNAL_ERROR + " " + str(ex), http_code=500 + ) from ex def __addMetadata(self, service_uuid, service_type, vlan_id): - """ Adds the connectivity service from 'OSM_metadata' configLet - """ + """Adds the connectivity service from 'OSM_metadata' configLet""" found_in_cvp = False + try: cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA) found_in_cvp = True @@ -1174,24 +1366,31 @@ class AristaSdnConnector(SdnConnectorBase): pass else: raise error + try: - new_serv = '{} {} {} {}\n'.format(self.__METADATA_PREFIX, service_type, vlan_id, service_uuid) + new_serv = "{} {} {} {}\n".format( + self.__METADATA_PREFIX, service_type, vlan_id, service_uuid + ) if found_in_cvp: - cl_config = cvp_cl['config'] + new_serv + cl_config = cvp_cl["config"] + new_serv else: cl_config = new_serv - cl_meta = [{'name': self.__OSM_METADATA, 'config': cl_config}] + + cl_meta = [{"name": self.__OSM_METADATA, "config": cl_config}] self.__configlet_modify(cl_meta) except Exception as e: - self.logger.error('Error in setting metadata in CloudVision from OSM for service {}: {}'. - format(service_uuid, str(e))) + self.logger.error( + "Error in setting metadata in CloudVision from OSM for service {}: {}".format( + service_uuid, str(e) + ) + ) pass def __removeMetadata(self, service_uuid): - """ Removes the connectivity service from 'OSM_metadata' configLet - """ + """Removes the connectivity service from 'OSM_metadata' configLet""" found_in_cvp = False + try: cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA) found_in_cvp = True @@ -1200,28 +1399,32 @@ class AristaSdnConnector(SdnConnectorBase): pass else: raise error + try: if found_in_cvp: - if service_uuid in cvp_cl['config']: - cl_config = '' - for line in cvp_cl['config'].split('\n'): + if service_uuid in cvp_cl["config"]: + cl_config = "" + + for line in cvp_cl["config"].split("\n"): if service_uuid in line: continue else: cl_config = cl_config + line - cl_meta = [{'name': self.__OSM_METADATA, 'config': cl_config}] + + cl_meta = [{"name": self.__OSM_METADATA, "config": cl_config}] self.__configlet_modify(cl_meta) except Exception as e: - self.logger.error('Error in removing metadata in CloudVision from OSM for service {}: {}'. - format(service_uuid, str(e))) + self.logger.error( + "Error in removing metadata in CloudVision from OSM for service {}: {}".format( + service_uuid, str(e) + ) + ) pass - def edit_connectivity_service(self, - service_uuid, - conn_info=None, - connection_points=None, - **kwargs): - """ Change an existing connectivity service. + def edit_connectivity_service( + self, service_uuid, conn_info=None, connection_points=None, **kwargs + ): + """Change an existing connectivity service. This method's arguments and return value follow the same convention as :meth:`~.create_connectivity_service`. @@ -1243,43 +1446,52 @@ class AristaSdnConnector(SdnConnectorBase): SdnConnectorError: In case of error. """ try: - self.logger.debug('invoked edit_connectivity_service for service {}. ports: {}'.format(service_uuid, - connection_points)) + self.logger.debug( + "invoked edit_connectivity_service for service {}. ports: {}".format( + service_uuid, connection_points + ) + ) if not service_uuid: - raise SdnConnectorError(message='Unable to perform operation, missing or empty uuid', - http_code=500) + raise SdnConnectorError( + message="Unable to perform operation, missing or empty uuid", + http_code=500, + ) + if not conn_info: - raise SdnConnectorError(message='Unable to perform operation, missing or empty connection information', - http_code=500) + raise SdnConnectorError( + message="Unable to perform operation, missing or empty connection information", + http_code=500, + ) if connection_points is None: return None self.__get_Connection() - cls_currentPerSw = conn_info['configLetPerSwitch'] - service_type = conn_info['service_type'] - - self.__check_service(service_type, - connection_points, - check_vlan=False, - check_num_cp=False, - kwargs=kwargs) + cls_currentPerSw = conn_info["configLetPerSwitch"] + service_type = conn_info["service_type"] - s_uid, s_connInf = self.__processConnection( - service_uuid, + self.__check_service( service_type, connection_points, - kwargs) - self.logger.info("Service with uuid {} configuration updated". - format(s_uid)) + check_vlan=False, + check_num_cp=False, + kwargs=kwargs, + ) + + s_uid, s_connInf = self.__processConnection( + service_uuid, service_type, connection_points, kwargs + ) + self.logger.info("Service with uuid {} configuration updated".format(s_uid)) + return s_connInf except CvpLoginError as e: self.logger.info(str(e)) self.client = None - raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e), - http_code=401) from e + raise SdnConnectorError( + message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401 + ) from e except SdnConnectorError as sde: raise sde except Exception as ex: @@ -1288,92 +1500,113 @@ class AristaSdnConnector(SdnConnectorBase): # TODO check if there are pending task, and cancel them before restoring self.__updateConnection(cls_currentPerSw) except Exception as e: - self.logger.error("Unable to restore configuration in service {} after an error in the configuration" - " updated: {}".format(service_uuid, str(e))) + self.logger.error( + "Unable to restore configuration in service {} after an error in the configuration" + " updated: {}".format(service_uuid, str(e)) + ) + if self.raiseException: raise ex - raise SdnConnectorError(message=str(ex), - http_code=500) from ex + + raise SdnConnectorError(message=str(ex), http_code=500) from ex def clear_all_connectivity_services(self): - """ Removes all connectivity services from Arista CloudVision with two steps: - - retrives all the services from Arista CloudVision + """Removes all connectivity services from Arista CloudVision with two steps: + - retrieves all the services from Arista CloudVision - removes each service """ try: - self.logger.debug('invoked AristaImpl ' + - 'clear_all_connectivity_services') + self.logger.debug("invoked AristaImpl clear_all_connectivity_services") self.__get_Connection() s_list = self.__get_srvUUIDs() + for serv in s_list: conn_info = {} - conn_info['service_type'] = serv['type'] - conn_info['vlan_id'] = serv['vlan'] - - self.delete_connectivity_service(serv['uuid'], conn_info) + conn_info["service_type"] = serv["type"] + conn_info["vlan_id"] = serv["vlan"] + self.delete_connectivity_service(serv["uuid"], conn_info) except CvpLoginError as e: self.logger.info(str(e)) self.client = None - raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e), - http_code=401) from e + + raise SdnConnectorError( + message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401 + ) from e except SdnConnectorError as sde: raise sde except Exception as ex: self.client = None self.logger.error(ex) + if self.raiseException: raise ex - raise SdnConnectorError(message=SdnError.INTERNAL_ERROR + " " + str(ex), - http_code=500) from ex + + raise SdnConnectorError( + message=SdnError.INTERNAL_ERROR + " " + str(ex), http_code=500 + ) from ex def get_all_active_connectivity_services(self): - """ Return the uuid of all the active connectivity services with two steps: + """Return the uuid of all the active connectivity services with two steps: - retrives all the services from Arista CloudVision - retrives the status of each server """ try: - self.logger.debug('invoked AristaImpl {}'.format( - 'get_all_active_connectivity_services')) + self.logger.debug( + "invoked AristaImpl {}".format("get_all_active_connectivity_services") + ) self.__get_Connection() s_list = self.__get_srvUUIDs() result = [] + for serv in s_list: conn_info = {} - conn_info['service_type'] = serv['type'] - conn_info['vlan_id'] = serv['vlan'] + conn_info["service_type"] = serv["type"] + conn_info["vlan_id"] = serv["vlan"] + status = self.get_connectivity_service_status(serv["uuid"], conn_info) + + if status["sdn_status"] == "ACTIVE": + result.append(serv["uuid"]) - status = self.get_connectivity_service_status(serv['uuid'], conn_info) - if status['sdn_status'] == 'ACTIVE': - result.append(serv['uuid']) return result except CvpLoginError as e: self.logger.info(str(e)) self.client = None - raise SdnConnectorError(message=SdnError.UNAUTHORIZED + " " + str(e), - http_code=401) from e + raise SdnConnectorError( + message=SdnError.UNAUTHORIZED + " " + str(e), http_code=401 + ) from e except SdnConnectorError as sde: raise sde except Exception as ex: self.client = None self.logger.error(ex) + if self.raiseException: raise ex - raise SdnConnectorError(message=SdnError.INTERNAL_ERROR, - http_code=500) from ex + + raise SdnConnectorError( + message=SdnError.INTERNAL_ERROR, http_code=500 + ) from ex def __get_serviceConfigLets(self, service_uuid, service_type, vlan_id): - """ Return the configLet's associated with a connectivity service, + """Return the configLet's associated with a connectivity service, There should be one, as maximum, per device (switch) for a given connectivity service """ srv_cls = {} + for s in self.switches: srv_cls[s] = [] found_in_cvp = False - name = (self.__OSM_PREFIX + - s + - self.__SEPARATOR + service_type + str(vlan_id) + - self.__SEPARATOR + service_uuid) + name = ( + self.__OSM_PREFIX + + s + + self.__SEPARATOR + + service_type + + str(vlan_id) + + self.__SEPARATOR + + service_uuid + ) + try: cvp_cl = self.client.api.get_configlet_by_name(name) found_in_cvp = True @@ -1382,16 +1615,19 @@ class AristaSdnConnector(SdnConnectorBase): pass else: raise error + if found_in_cvp: srv_cls[s] = cvp_cl + return srv_cls def __get_srvVLANs(self): - """ Returns a list with all the VLAN id's used in the connectivity services managed + """Returns a list with all the VLAN id's used in the connectivity services managed in tha Arista CloudVision by checking the 'OSM_metadata' configLet where this information is stored """ found_in_cvp = False + try: cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA) found_in_cvp = True @@ -1400,26 +1636,28 @@ class AristaSdnConnector(SdnConnectorBase): pass else: raise error + s_vlan_list = [] if found_in_cvp: - lines = cvp_cl['config'].split('\n') + lines = cvp_cl["config"].split("\n") + for line in lines: if self.__METADATA_PREFIX in line: - s_vlan = line.split(' ')[3] + s_vlan = line.split(" ")[3] else: continue - if (s_vlan is not None and - len(s_vlan) > 0 and - s_vlan not in s_vlan_list): + + if s_vlan is not None and len(s_vlan) > 0 and s_vlan not in s_vlan_list: s_vlan_list.append(s_vlan) return s_vlan_list def __get_srvUUIDs(self): - """ Retrieves all the connectivity services, managed in tha Arista CloudVision + """Retrieves all the connectivity services, managed in tha Arista CloudVision by checking the 'OSM_metadata' configLet where this information is stored """ found_in_cvp = False + try: cvp_cl = self.client.api.get_configlet_by_name(self.__OSM_METADATA) found_in_cvp = True @@ -1428,29 +1666,31 @@ class AristaSdnConnector(SdnConnectorBase): pass else: raise error + serv_list = [] if found_in_cvp: - lines = cvp_cl['config'].split('\n') + lines = cvp_cl["config"].split("\n") + for line in lines: if self.__METADATA_PREFIX in line: - line = line.split(' ') - serv = {'uuid': line[4], 'type': line[2], 'vlan': line[3]} + line = line.split(" ") + serv = {"uuid": line[4], "type": line[2], "vlan": line[3]} else: continue - if (serv is not None and - len(serv) > 0 and - serv not in serv_list): + + if serv is not None and len(serv) > 0 and serv not in serv_list: serv_list.append(serv) return serv_list def __get_Connection(self): - """ Open a connection with Arista CloudVision, - invoking the version retrival as test + """Open a connection with Arista CloudVision, + invoking the version retrival as test """ try: if self.client is None: self.client = self.__connect() + self.client.api.get_cvp_info() except (CvpSessionLogOutError, RequestException) as e: self.logger.debug("Connection error '{}'. Reconnecting".format(e)) @@ -1458,12 +1698,13 @@ class AristaSdnConnector(SdnConnectorBase): self.client.api.get_cvp_info() def __connect(self): - ''' Connects to CVP device using user provided credentials from initialization. + """Connects to CVP device using user provided credentials from initialization. :return: CvpClient object with connection instantiated. - ''' + """ client = CvpClient() protocol, _, rest_url = self.__wim_url.rpartition("://") host, _, port = rest_url.partition(":") + if port and port.endswith("/"): port = int(port[:-1]) elif port: @@ -1471,18 +1712,21 @@ class AristaSdnConnector(SdnConnectorBase): else: port = 443 - client.connect([host], - self.__user, - self.__passwd, - protocol=protocol or "https", - port=port, - connect_timeout=2) + client.connect( + [host], + self.__user, + self.__passwd, + protocol=protocol or "https", + port=port, + connect_timeout=2, + ) client.api = CvpApi(client, request_timeout=self.__API_REQUEST_TOUT) self.taskC = AristaCVPTask(client.api) + return client def __compare(self, fromText, toText, lines=10): - """ Compare text string in 'fromText' with 'toText' and produce + """Compare text string in 'fromText' with 'toText' and produce diffRatio - a score as a float in the range [0, 1] 2.0*M / T T is the total number of elements in both sequences, M is the number of matches. @@ -1499,108 +1743,162 @@ class AristaSdnConnector(SdnConnectorBase): tolines = toText.splitlines(1) diff = list(difflib.unified_diff(fromlines, tolines, n=lines)) textComp = difflib.SequenceMatcher(None, fromText, toText) - diffRatio = round(textComp.quick_ratio()*100, 2) + diffRatio = round(textComp.quick_ratio() * 100, 2) + return [diffRatio, diff] def __load_inventory(self): - """ Get Inventory Data for All Devices (aka switches) from the Arista CloudVision - """ + """Get Inventory Data for All Devices (aka switches) from the Arista CloudVision""" if not self.cvp_inventory: self.cvp_inventory = self.client.api.get_inventory() + self.allDeviceFacts = [] + for device in self.cvp_inventory: self.allDeviceFacts.append(device) def __get_tags(self, name, value): if not self.cvp_tags: self.cvp_tags = [] - url = '/api/v1/rest/analytics/tags/labels/devices/{}/value/{}/elements'.format(name, value) - self.logger.debug('get_tags: URL {}'.format(url)) + url = "/api/v1/rest/analytics/tags/labels/devices/{}/value/{}/elements".format( + name, value + ) + self.logger.debug("get_tags: URL {}".format(url)) data = self.client.get(url, timeout=self.__API_REQUEST_TOUT) - for dev in data['notifications']: - for elem in dev['updates']: + + for dev in data["notifications"]: + for elem in dev["updates"]: self.cvp_tags.append(elem) - self.logger.debug('Available devices with tag_name {} - value {}: {} '.format(name, value, self.cvp_tags)) + + self.logger.debug( + "Available devices with tag_name {} - value {}: {}".format( + name, value, self.cvp_tags + ) + ) def __get_interface_ip(self, device_id, interface): - url = '/api/v1/rest/{}/Sysdb/ip/config/ipIntfConfig/{}/'.format(device_id, interface) - self.logger.debug('get_interface_ip: URL {}'.format(url)) + url = "/api/v1/rest/{}/Sysdb/ip/config/ipIntfConfig/{}/".format( + device_id, interface + ) + self.logger.debug("get_interface_ip: URL {}".format(url)) data = None + try: data = self.client.get(url, timeout=self.__API_REQUEST_TOUT) - if data['notifications']: - for notification in data['notifications']: - for update in notification['updates']: - if update == 'addrWithMask': - return notification['updates'][update]['value'] + + if data["notifications"]: + for notification in data["notifications"]: + for update in notification["updates"]: + if update == "addrWithMask": + return notification["updates"][update]["value"] except Exception as e: - raise SdnConnectorError("Invalid response from url {}: data {} - {}".format(url, data, str(e))) - raise SdnConnectorError("Unable to get ip for interface {} in device {}, data {}". - format(interface, device_id, data)) + raise SdnConnectorError( + "Invalid response from url {}: data {} - {}".format(url, data, str(e)) + ) + + raise SdnConnectorError( + "Unable to get ip for interface {} in device {}, data {}".format( + interface, device_id, data + ) + ) def __get_device_ASN(self, device_id): - url = '/api/v1/rest/{}/Sysdb/routing/bgp/config/'.format(device_id) - self.logger.debug('get_device_ASN: URL {}'.format(url)) + url = "/api/v1/rest/{}/Sysdb/routing/bgp/config/".format(device_id) + self.logger.debug("get_device_ASN: URL {}".format(url)) data = None + try: data = self.client.get(url, timeout=self.__API_REQUEST_TOUT) - if data['notifications']: - for notification in data['notifications']: - for update in notification['updates']: - if update == 'asNumber': - return notification['updates'][update]['value']['value']['int'] + if data["notifications"]: + for notification in data["notifications"]: + for update in notification["updates"]: + if update == "asNumber": + return notification["updates"][update]["value"]["value"][ + "int" + ] except Exception as e: - raise SdnConnectorError("Invalid response from url {}: data {} - {}".format(url, data, str(e))) - raise SdnConnectorError("Unable to get AS in device {}, data {}".format(device_id, data)) + raise SdnConnectorError( + "Invalid response from url {}: data {} - {}".format(url, data, str(e)) + ) + + raise SdnConnectorError( + "Unable to get AS in device {}, data {}".format(device_id, data) + ) def __get_peer_MLAG(self, device_id): peer = None - url = '/api/v1/rest/{}/Sysdb/mlag/status/'.format(device_id) - self.logger.debug('get_MLAG_status: URL {}'.format(url)) + url = "/api/v1/rest/{}/Sysdb/mlag/status/".format(device_id) + self.logger.debug("get_MLAG_status: URL {}".format(url)) + try: data = self.client.get(url, timeout=self.__API_REQUEST_TOUT) - if data['notifications']: + + if data["notifications"]: found = False - for notification in data['notifications']: - for update in notification['updates']: - if update == 'systemId': - mlagSystemId = notification['updates'][update]['value'] + + for notification in data["notifications"]: + for update in notification["updates"]: + if update == "systemId": + mlagSystemId = notification["updates"][update]["value"] found = True break + if found: break + # search the MLAG System Id if found: for s in self.switches: - if self.switches[s]['serialNumber'] == device_id: + if self.switches[s]["serialNumber"] == device_id: continue - url = '/api/v1/rest/{}/Sysdb/mlag/status/'.format(self.switches[s]['serialNumber']) - self.logger.debug('Searching for MLAG system id {} in switch {}'.format(mlagSystemId, s)) + + url = "/api/v1/rest/{}/Sysdb/mlag/status/".format( + self.switches[s]["serialNumber"] + ) + self.logger.debug( + "Searching for MLAG system id {} in switch {}".format( + mlagSystemId, s + ) + ) data = self.client.get(url, timeout=self.__API_REQUEST_TOUT) found = False - for notification in data['notifications']: - for update in notification['updates']: - if update == 'systemId': - if mlagSystemId == notification['updates'][update]['value']: + + for notification in data["notifications"]: + for update in notification["updates"]: + if update == "systemId": + if ( + mlagSystemId + == notification["updates"][update]["value"] + ): peer = s found = True break + if found: break + if found: break + if peer is None: - self.logger.error('No Peer device found for device {} with MLAG address {}'.format(device_id, - mlagSystemId)) + self.logger.error( + "No Peer device found for device {} with MLAG address {}".format( + device_id, mlagSystemId + ) + ) else: - self.logger.debug('Peer MLAG for device {} - value {}'.format(device_id, peer)) + self.logger.debug( + "Peer MLAG for device {} - value {}".format(device_id, peer) + ) + return peer except Exception: - raise SdnConnectorError("Invalid response from url {}: data {}".format(url, data)) + raise SdnConnectorError( + "Invalid response from url {}: data {}".format(url, data) + ) def is_valid_destination(self, url): - """ Check that the provided WIM URL is correct - """ + """Check that the provided WIM URL is correct""" if re.match(self.__regex, url): return True elif self.is_valid_ipv4_address(url): @@ -1609,8 +1907,7 @@ class AristaSdnConnector(SdnConnectorBase): return self.is_valid_ipv6_address(url) def is_valid_ipv4_address(self, address): - """ Checks that the given IP is IPv4 valid - """ + """Checks that the given IP is IPv4 valid""" try: socket.inet_pton(socket.AF_INET, address) except AttributeError: # no inet_pton here, sorry @@ -1618,25 +1915,30 @@ class AristaSdnConnector(SdnConnectorBase): socket.inet_aton(address) except socket.error: return False - return address.count('.') == 3 + + return address.count(".") == 3 except socket.error: # not a valid address return False + return True def is_valid_ipv6_address(self, address): - """ Checks that the given IP is IPv6 valid - """ + """Checks that the given IP is IPv6 valid""" try: socket.inet_pton(socket.AF_INET6, address) except socket.error: # not a valid address return False + return True def delete_keys_from_dict(self, dict_del, lst_keys): if dict_del is None: return dict_del + dict_copy = {k: v for k, v in dict_del.items() if k not in lst_keys} + for k, v in dict_copy.items(): if isinstance(v, dict): dict_copy[k] = self.delete_keys_from_dict(v, lst_keys) + return dict_copy diff --git a/RO-SDN-arista_cloudvision/setup.py b/RO-SDN-arista_cloudvision/setup.py index 33bd29fd..cf1257e2 100644 --- a/RO-SDN-arista_cloudvision/setup.py +++ b/RO-SDN-arista_cloudvision/setup.py @@ -30,29 +30,32 @@ osm-ro pluging for arista_cloudvision SDN setup( name=_name, - description='OSM ro sdn plugin for arista with CloudVision', + description="OSM ro sdn plugin for arista with CloudVision", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='OSM_TECH@LIST.ETSI.ORG', - maintainer='Oscar Luis Peral', - maintainer_email='oscarluis.peral@atos.net', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="OSM_TECH@LIST.ETSI.ORG", + maintainer="Oscar Luis Peral", + maintainer_email="oscarluis.peral@atos.net", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ "requests", "uuid", "cvprac", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rosdn.plugins': ['rosdn_arista_cloudvision = osm_rosdn_arista_cloudvision.' - 'wimconn_arista:AristaSdnConnector'] + "osm_rosdn.plugins": [ + "rosdn_arista_cloudvision = osm_rosdn_arista_cloudvision.wimconn_arista:AristaSdnConnector" + ] }, ) diff --git a/RO-SDN-arista_cloudvision/tox.ini b/RO-SDN-arista_cloudvision/tox.ini index 564d2919..cfad8e99 100644 --- a/RO-SDN-arista_cloudvision/tox.ini +++ b/RO-SDN-arista_cloudvision/tox.ini @@ -27,7 +27,7 @@ install_command = python3 -m pip install -r requirements.txt -U {opts} {packages basepython = python3 deps = flake8 commands = flake8 osm_rosdn_arista_cloudvision --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-SDN-dpb/osm_rosdn_dpb/wimconn_dpb.py b/RO-SDN-dpb/osm_rosdn_dpb/wimconn_dpb.py index dc717e57..e08cdfdf 100755 --- a/RO-SDN-dpb/osm_rosdn_dpb/wimconn_dpb.py +++ b/RO-SDN-dpb/osm_rosdn_dpb/wimconn_dpb.py @@ -30,17 +30,20 @@ import logging import paramiko import requests import struct + # import sys from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError -class DpbSshInterface(): +class DpbSshInterface: """ Communicate with the DPB via SSH """ __LOGGER_NAME_EXT = ".ssh" __FUNCTION_MAP_POS = 1 - def __init__(self, username, password, wim_url, wim_port, network, auth_data, logger_name): + def __init__( + self, username, password, wim_url, wim_port, network, auth_data, logger_name + ): self.logger = logging.getLogger(logger_name + self.__LOGGER_NAME_EXT) self.__username = username self.__password = password @@ -62,41 +65,44 @@ class DpbSshInterface(): """post request to dpb via ssh notes: - - session_id need only be unique per ssh session, thus is currently safe if + - session_id need only be unique per ssh session, thus is currently safe if ro is restarted """ self._check_connection() + if data is None: data = {} - url_ext_info = url_params.split('/') + + url_ext_info = url_params.split("/") + for i in range(0, len(url_ext_info)): if url_ext_info[i] == "service": - data["service-id"] = int(url_ext_info[i+1]) + data["service-id"] = int(url_ext_info[i + 1]) + data["type"] = function[self.__FUNCTION_MAP_POS] data = { "session": self.__session_id, - "content": data + "content": data, } self.__session_id += 1 try: data = json.dumps(data).encode("utf-8") - data_packed = struct.pack( - ">I" + str(len(data)) + "s", len(data), data) + data_packed = struct.pack(">I" + str(len(data)) + "s", len(data), data) self.__stdin.write(data_packed) self.logger.debug("Data sent to DPB via SSH") except Exception as e: - raise SdnConnectorError( - "Failed to write via SSH | text: {}".format(e), 500) + raise SdnConnectorError("Failed to write via SSH | text: {}".format(e), 500) try: data_len = struct.unpack(">I", self.__stdout.read(4))[0] - data = struct.unpack(str(data_len) + "s", - self.__stdout.read(data_len))[0] + data = struct.unpack(str(data_len) + "s", self.__stdout.read(data_len))[0] + return json.loads(data).get("content", {}) except Exception as e: raise SdnConnectorError( - "Could not get response from WIM | text: {}".format(e), 500) + "Could not get response from WIM | text: {}".format(e), 500 + ) def get(self, function, url_params=""): raise SdnConnectorError("SSH Get not implemented", 500) @@ -104,69 +110,87 @@ class DpbSshInterface(): def __create_client(self): ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + return ssh_client def __connect(self): private_key = None password = None + if self.__auth_data.get("auth_type", "PASS") == "KEY": private_key = self.__build_private_key_obj() + if self.__auth_data.get("auth_type", "PASS") == "PASS": password = self.__password try: - self.__ssh_client.connect(hostname=self.__url, - port=self.__port, - username=self.__username, - password=password, - pkey=private_key, - look_for_keys=False, - compress=False) + self.__ssh_client.connect( + hostname=self.__url, + port=self.__port, + username=self.__username, + password=password, + pkey=private_key, + look_for_keys=False, + compress=False, + ) stdin, stdout, stderr = self.__ssh_client.exec_command( - command=self.__network) + command=self.__network + ) except paramiko.BadHostKeyException as e: raise SdnConnectorError( - "Could not add SSH host key | text: {}".format(e), 500) + "Could not add SSH host key | text: {}".format(e), 500 + ) except paramiko.AuthenticationException as e: raise SdnConnectorError( - "Could not authorize SSH connection | text: {}".format(e), 400) + "Could not authorize SSH connection | text: {}".format(e), 400 + ) except paramiko.SSHException as e: raise SdnConnectorError( - "Could not establish the SSH connection | text: {}".format(e), 500) + "Could not establish the SSH connection | text: {}".format(e), 500 + ) except Exception as e: raise SdnConnectorError( - "Unknown error occurred when connecting via SSH | text: {}".format(e), 500) + "Unknown error occurred when connecting via SSH | text: {}".format(e), + 500, + ) try: data_len = struct.unpack(">I", stdout.read(4))[0] - data = json.loads(struct.unpack( - str(data_len) + "s", stdout.read(data_len))[0]) + data = json.loads( + struct.unpack(str(data_len) + "s", stdout.read(data_len))[0] + ) except Exception as e: raise SdnConnectorError( - "Failed to get response from DPB | text: {}".format(e), 500) + "Failed to get response from DPB | text: {}".format(e), 500 + ) + if "error" in data: - raise SdnConnectorError( - data.get("msg", data.get("error", "ERROR")), 500) + raise SdnConnectorError(data.get("msg", data.get("error", "ERROR")), 500) + self.logger.info("SSH connection to DPB established OK") + return stdin, stdout def __build_private_key_obj(self): try: - with open(self.__auth_data.get("key_file"), 'r') as key_file: + with open(self.__auth_data.get("key_file"), "r") as key_file: if self.__auth_data.get("key_type") == "RSA": - return paramiko.RSAKey.from_private_key(key_file, - password=self.__auth_data.get("key_pass", None)) + return paramiko.RSAKey.from_private_key( + key_file, password=self.__auth_data.get("key_pass", None) + ) elif self.__auth_data.get("key_type") == "ECDSA": - return paramiko.ECDSAKey.from_private_key(key_file, - password=self.__auth_data.get("key_pass", None)) + return paramiko.ECDSAKey.from_private_key( + key_file, password=self.__auth_data.get("key_pass", None) + ) else: raise SdnConnectorError("Key type not supported", 400) except Exception as e: raise SdnConnectorError( - "Could not load private SSH key | text: {}".format(e), 500) + "Could not load private SSH key | text: {}".format(e), 500 + ) -class DpbRestInterface(): +class DpbRestInterface: """ Communicate with the DPB via the REST API """ __LOGGER_NAME_EXT = ".rest" @@ -175,31 +199,34 @@ class DpbRestInterface(): def __init__(self, wim_url, wim_port, network, logger_name): self.logger = logging.getLogger(logger_name + self.__LOGGER_NAME_EXT) self.__base_url = "http://{}:{}/network/{}".format( - wim_url, str(wim_port), network) + wim_url, str(wim_port), network + ) self.logger.info("REST defined OK") def post(self, function, url_params="", data=None, get_response=True): - url = self.__base_url + url_params + \ - "/" + function[self.__FUNCTION_MAP_POS] + url = self.__base_url + url_params + "/" + function[self.__FUNCTION_MAP_POS] + try: self.logger.info(data) response = requests.post(url, json=data) + if response.status_code != 200: raise SdnConnectorError( - "REST request failed (status code: {})".format(response.status_code)) + "REST request failed (status code: {})".format(response.status_code) + ) + if get_response: return response.json() except Exception as e: - raise SdnConnectorError( - "REST request failed | text: {}".format(e), 500) + raise SdnConnectorError("REST request failed | text: {}".format(e), 500) def get(self, function, url_params=""): url = self.__base_url + url_params + function[self.__FUNCTION_MAP_POS] + try: return requests.get(url) except Exception as e: - raise SdnConnectorError( - "REST request failed | text: {}".format(e), 500) + raise SdnConnectorError("REST request failed | text: {}".format(e), 500) class DpbConnector(SdnConnectorBase): @@ -210,10 +237,7 @@ class DpbConnector(SdnConnectorBase): __SUPPORTED_CONNECTION_TYPES = ["REST", "SSH"] __SUPPORTED_SSH_AUTH_TYPES = ["KEY", "PASS"] __SUPPORTED_SSH_KEY_TYPES = ["ECDSA", "RSA"] - __STATUS_MAP = { - "ACTIVE": "ACTIVE", - "ACTIVATING": "BUILD", - "FAILED": "ERROR"} + __STATUS_MAP = {"ACTIVE": "ACTIVE", "ACTIVATING": "BUILD", "FAILED": "ERROR"} __ACTIONS_MAP = { "CREATE": ("create-service", "new-service"), "DEFINE": ("define", "define-service"), @@ -222,7 +246,7 @@ class DpbConnector(SdnConnectorBase): "DEACTIVATE": ("deactivate", "deactivate-service"), "CHECK": ("await-status", "await-service-status"), "GET": ("services", "NOT IMPLEMENTED"), - "RESET": ("reset", "NOT IMPLEMENTED") + "RESET": ("reset", "NOT IMPLEMENTED"), } def __init__(self, wim, wim_account, config): @@ -237,137 +261,201 @@ class DpbConnector(SdnConnectorBase): self.__password = self.__account.get("passwd", "") self.__username = self.__account.get("user", "") self.__network = self.__cli_config.get("network", "") - self.__connection_type = self.__cli_config.get( - "connection_type", "REST") + self.__connection_type = self.__cli_config.get("connection_type", "REST") self.__port = self.__cli_config.get( - "port", (80 if self.__connection_type == "REST" else 22)) + "port", (80 if self.__connection_type == "REST" else 22) + ) self.__ssh_auth = self.__cli_config.get("ssh_auth", None) if self.__connection_type == "SSH": - interface = DpbSshInterface(self.__username, - self.__password, - self.__url, - self.__port, - self.__network, - self.__ssh_auth, - self.__LOGGER_NAME) + interface = DpbSshInterface( + self.__username, + self.__password, + self.__url, + self.__port, + self.__network, + self.__ssh_auth, + self.__LOGGER_NAME, + ) elif self.__connection_type == "REST": - interface = DpbRestInterface(self.__url, - self.__port, - self.__network, - self.__LOGGER_NAME) + interface = DpbRestInterface( + self.__url, self.__port, self.__network, self.__LOGGER_NAME + ) else: raise SdnConnectorError( - "Connection type not supported (must be SSH or REST)", 400) + "Connection type not supported (must be SSH or REST)", 400 + ) + self.__post = interface.post self.__get = interface.get self.logger.info("DPB WimConn Init OK") def create_connectivity_service(self, service_type, connection_points, **kwargs): self.logger.info("Creating a connectivity service") + try: response = self.__post(self.__ACTIONS_MAP.get("CREATE")) + if "service-id" in response: service_id = int(response.get("service-id")) self.logger.debug("created service id {}".format(service_id)) else: raise SdnConnectorError( - "Invalid create service response (could be an issue with the DPB)", 500) + "Invalid create service response (could be an issue with the DPB)", + 500, + ) + data = {"segment": []} + for point in connection_points: - data["segment"].append({ - "terminal-name": point.get("service_endpoint_id"), - "label": int((point.get("service_endpoint_encapsulation_info")).get("vlan")), - "ingress-bw": 10.0, - "egress-bw": 10.0}) + data["segment"].append( + { + "terminal-name": point.get("service_endpoint_id"), + "label": int( + (point.get("service_endpoint_encapsulation_info")).get( + "vlan" + ) + ), + "ingress-bw": 10.0, + "egress-bw": 10.0, + } + ) # "ingress-bw": (bandwidth.get(point.get("service_endpoint_id"))).get("ingress"), # "egress-bw": (bandwidth.get(point.get("service_endpoint_id"))).get("egress")} - self.__post(self.__ACTIONS_MAP.get("DEFINE"), - "/service/"+str(service_id), data, get_response=False) - self.__post(self.__ACTIONS_MAP.get("ACTIVATE"), - "/service/"+str(service_id), get_response=False) - self.logger.debug( - "Created connectivity service id:{}".format(service_id)) + self.__post( + self.__ACTIONS_MAP.get("DEFINE"), + "/service/" + str(service_id), + data, + get_response=False, + ) + self.__post( + self.__ACTIONS_MAP.get("ACTIVATE"), + "/service/" + str(service_id), + get_response=False, + ) + self.logger.debug("Created connectivity service id:{}".format(service_id)) + return (str(service_id), None) except Exception as e: raise SdnConnectorError( - "Connectivity service could not be made | text: {}".format(e), 500) + "Connectivity service could not be made | text: {}".format(e), 500 + ) def get_connectivity_service_status(self, service_uuid, conn_info=None): self.logger.info( - "Checking connectivity service status id:{}".format(service_uuid)) - data = { - "timeout-millis": 10000, - "acceptable": ["ACTIVE", "FAILED"] - } + "Checking connectivity service status id:{}".format(service_uuid) + ) + data = {"timeout-millis": 10000, "acceptable": ["ACTIVE", "FAILED"]} + try: - response = self.__post(self.__ACTIONS_MAP.get( - "CHECK"), "/service/"+service_uuid, data) + response = self.__post( + self.__ACTIONS_MAP.get("CHECK"), + "/service/" + service_uuid, + data, + ) + if "status" in response: status = response.get("status", None) self.logger.info("CHECKED CONNECTIVITY SERVICE STATUS") + return {"wim_status": self.__STATUS_MAP.get(status)} else: raise SdnConnectorError( - "Invalid status check response (could be an issue with the DPB)", 500) + "Invalid status check response (could be an issue with the DPB)", + 500, + ) except Exception as e: raise SdnConnectorError( - "Failed to check service status | text: {}".format(e), 500) + "Failed to check service status | text: {}".format(e), 500 + ) def delete_connectivity_service(self, service_uuid, conn_info=None): - self.logger.info( - "Deleting connectivity service id: {}".format(service_uuid)) + self.logger.info("Deleting connectivity service id: {}".format(service_uuid)) + try: - self.__post(self.__ACTIONS_MAP.get("RELEASE"), - "/service/"+service_uuid, get_response=False) + self.__post( + self.__ACTIONS_MAP.get("RELEASE"), + "/service/" + service_uuid, + get_response=False, + ) except Exception as e: raise SdnConnectorError( - "Could not delete service id:{} (could be an issue with the DPB): {}".format(service_uuid, e), 500) - self.logger.debug( - "Deleted connectivity service id:{}".format(service_uuid)) + "Could not delete service id:{} (could be an issue with the DPB): {}".format( + service_uuid, e + ), + 500, + ) + + self.logger.debug("Deleted connectivity service id:{}".format(service_uuid)) + return None - def edit_connectivity_service(self, service_uuid, conn_info=None, connection_points=None, **kwargs): - self.logger.info( - "Editing connectivity service id: {}".format(service_uuid)) - data = { - "timeout-millis": 10000, - "acceptable": ["DORMANT"] - } + def edit_connectivity_service( + self, service_uuid, conn_info=None, connection_points=None, **kwargs + ): + self.logger.info("Editing connectivity service id: {}".format(service_uuid)) + data = {"timeout-millis": 10000, "acceptable": ["DORMANT"]} + try: - self.__post(self.__ACTIONS_MAP.get("RESET"), - "/service/"+service_uuid, get_response=False) - response = self.__post(self.__ACTIONS_MAP.get( - "CHECK"), "/service/"+service_uuid, data) + self.__post( + self.__ACTIONS_MAP.get("RESET"), + "/service/" + service_uuid, + get_response=False, + ) + response = self.__post( + self.__ACTIONS_MAP.get("CHECK"), + "/service/" + service_uuid, + data, + ) + if "status" in response: - self.logger.debug( - "Connectivity service {} reset".format(service_uuid)) + self.logger.debug("Connectivity service {} reset".format(service_uuid)) else: raise SdnConnectorError( - "Invalid status check response (could be an issue with the DPB)", 500) + "Invalid status check response (could be an issue with the DPB)", + 500, + ) except Exception as e: - raise SdnConnectorError( - "Failed to reset service | text: {}".format(e), 500) + raise SdnConnectorError("Failed to reset service | text: {}".format(e), 500) + try: data = {"segment": []} + for point in connection_points: - data["segment"].append({ - "terminal-name": point.get("service_endpoint_id"), - "label": int((point.get("service_endpoint_encapsulation_info")).get("vlan")), - "ingress-bw": 10.0, - "egress-bw": 10.0}) + data["segment"].append( + { + "terminal-name": point.get("service_endpoint_id"), + "label": int( + (point.get("service_endpoint_encapsulation_info")).get( + "vlan" + ) + ), + "ingress-bw": 10.0, + "egress-bw": 10.0, + } + ) # "ingress-bw": (bandwidth.get(point.get("service_endpoint_id"))).get("ingress"), # "egress-bw": (bandwidth.get(point.get("service_endpoint_id"))).get("egress")} - self.__post(self.__ACTIONS_MAP.get("DEFINE"), "/service/" + - str(service_uuid), data, get_response=False) - self.__post(self.__ACTIONS_MAP.get("ACTIVATE"), - "/service/"+str(service_uuid), get_response=False) + + self.__post( + self.__ACTIONS_MAP.get("DEFINE"), + "/service/" + str(service_uuid), + data, + get_response=False, + ) + self.__post( + self.__ACTIONS_MAP.get("ACTIVATE"), + "/service/" + str(service_uuid), + get_response=False, + ) except Exception as e: raise SdnConnectorError( - "Failed to edit connectivity service | text: {}".format(e), 500) - self.logger.debug( - "Edited connectivity service {}".format(service_uuid)) + "Failed to edit connectivity service | text: {}".format(e), 500 + ) + + self.logger.debug("Edited connectivity service {}".format(service_uuid)) + return conn_info def __check_service(self, serv_type, points, kwargs): diff --git a/RO-SDN-dpb/setup.py b/RO-SDN-dpb/setup.py index a1b31d7a..e2afbc24 100644 --- a/RO-SDN-dpb/setup.py +++ b/RO-SDN-dpb/setup.py @@ -30,18 +30,20 @@ osm-ro plugin for dpb SDN setup( name=_name, - description='OSM ro sdn plugin for dpb', + description="OSM ro sdn plugin for dpb", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='OSM_TECH@LIST.ETSI.ORG', - maintainer='ETSI OSM', - maintainer_email='OSM_TECH@LIST.ETSI.ORG', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="OSM_TECH@LIST.ETSI.ORG'", + maintainer="ETSI OSM", + maintainer_email="OSM_TECH@LIST.ETSI.ORG", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ @@ -49,8 +51,8 @@ setup( "requests", "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rosdn.plugins': ['rosdn_dpb = osm_rosdn_dpb.wimconn_dpb:DpbConnector'], + "osm_rosdn.plugins": ["rosdn_dpb = osm_rosdn_dpb.wimconn_dpb:DpbConnector"], }, ) diff --git a/RO-SDN-dpb/tox.ini b/RO-SDN-dpb/tox.ini index bae20e2e..e223da48 100644 --- a/RO-SDN-dpb/tox.ini +++ b/RO-SDN-dpb/tox.ini @@ -27,7 +27,7 @@ commands=python3 -m unittest discover -v basepython = python3 deps = flake8 commands = flake8 osm_rosdn_dpb --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-SDN-dynpac/osm_rosdn_dynpac/wimconn_dynpac.py b/RO-SDN-dynpac/osm_rosdn_dynpac/wimconn_dynpac.py index 0e16cfcd..7a84f222 100644 --- a/RO-SDN-dynpac/osm_rosdn_dynpac/wimconn_dynpac.py +++ b/RO-SDN-dynpac/osm_rosdn_dynpac/wimconn_dynpac.py @@ -30,35 +30,34 @@ from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError class SdnError(Enum): - UNREACHABLE = 'Unable to reach the WIM.', - SERVICE_TYPE_ERROR = 'Unexpected service_type. Only "L2" is accepted.', - CONNECTION_POINTS_SIZE = \ - 'Unexpected number of connection points: 2 expected.', - ENCAPSULATION_TYPE = \ - 'Unexpected service_endpoint_encapsulation_type. \ - Only "dotq1" is accepted.', - BANDWIDTH = 'Unable to get the bandwidth.', - STATUS = 'Unable to get the status for the service.', - DELETE = 'Unable to delete service.', - CLEAR_ALL = 'Unable to clear all the services', - UNKNOWN_ACTION = 'Unknown action invoked.', - BACKUP = 'Unable to get the backup parameter.', - UNSUPPORTED_FEATURE = "Unsupported feature", + UNREACHABLE = "Unable to reach the WIM." + SERVICE_TYPE_ERROR = 'Unexpected service_type. Only "L2" is accepted.' + CONNECTION_POINTS_SIZE = "Unexpected number of connection points: 2 expected." + ENCAPSULATION_TYPE = ( + 'Unexpected service_endpoint_encapsulation_type. Only "dotq1" is accepted.' + ) + BANDWIDTH = "Unable to get the bandwidth." + STATUS = "Unable to get the status for the service." + DELETE = "Unable to delete service." + CLEAR_ALL = "Unable to clear all the services" + UNKNOWN_ACTION = "Unknown action invoked." + BACKUP = "Unable to get the backup parameter." + UNSUPPORTED_FEATURE = "Unsupported feature" UNAUTHORIZED = "Failed while authenticating" class SdnAPIActions(Enum): - CHECK_CONNECTIVITY = "CHECK_CONNECTIVITY", - CREATE_SERVICE = "CREATE_SERVICE", - DELETE_SERVICE = "DELETE_SERVICE", - CLEAR_ALL = "CLEAR_ALL", - SERVICE_STATUS = "SERVICE_STATUS", + CHECK_CONNECTIVITY = "CHECK_CONNECTIVITY" + CREATE_SERVICE = "CREATE_SERVICE" + DELETE_SERVICE = "DELETE_SERVICE" + CLEAR_ALL = "CLEAR_ALL" + SERVICE_STATUS = "SERVICE_STATUS" class DynpacConnector(SdnConnectorBase): __supported_service_types = ["ELINE (L2)", "ELINE"] __supported_encapsulation_types = ["dot1q"] - __WIM_LOGGER = 'ro.sdn.dynpac' + __WIM_LOGGER = "ro.sdn.dynpac" __ENCAPSULATION_TYPE_PARAM = "service_endpoint_encapsulation_type" __ENCAPSULATION_INFO_PARAM = "service_endpoint_encapsulation_info" __BACKUP_PARAM = "backup" @@ -87,7 +86,7 @@ class DynpacConnector(SdnConnectorBase): body = self.__get_body(service_type, connection_points, kwargs) - headers = {'Content-type': 'application/x-www-form-urlencoded'} + headers = {"Content-type": "application/x-www-form-urlencoded"} endpoint = "{}/service/create".format(self.__wim_url) try: @@ -101,17 +100,20 @@ class DynpacConnector(SdnConnectorBase): description = "Description: {}.".format(error.get("description")) exception = reason + description self.__exception(exception, http_code=response.status_code) + uuid = response.content self.logger.info("Service with uuid {} created.".format(uuid)) + return (uuid, None) - def edit_connectivity_service(self, service_uuid, - conn_info, connection_points, - **kwargs): + def edit_connectivity_service( + self, service_uuid, conn_info, connection_points, **kwargs + ): self.__exception(SdnError.UNSUPPORTED_FEATURE, http_code=501) def get_connectivity_service_status(self, service_uuid): endpoint = "{}/service/status/{}".format(self.__wim_url, service_uuid) + try: response = requests.get(endpoint) except requests.exceptions.RequestException as e: @@ -119,16 +121,21 @@ class DynpacConnector(SdnConnectorBase): if response.status_code != 200: self.__exception(SdnError.STATUS, http_code=response.status_code) - self.logger.info("Status for service with uuid {}: {}" - .format(service_uuid, response.content)) + + self.logger.info( + "Status for service with uuid {}: {}".format(service_uuid, response.content) + ) + return response.content def delete_connectivity_service(self, service_uuid, conn_info): endpoint = "{}/service/delete/{}".format(self.__wim_url, service_uuid) + try: response = requests.delete(endpoint) except requests.exceptions.RequestException as e: self.__exception(e.message, http_code=503) + if response.status_code != 200: self.__exception(SdnError.DELETE, http_code=response.status_code) @@ -136,15 +143,18 @@ class DynpacConnector(SdnConnectorBase): def clear_all_connectivity_services(self): endpoint = "{}/service/clearAll".format(self.__wim_url) + try: response = requests.delete(endpoint) http_code = response.status_code except requests.exceptions.RequestException as e: self.__exception(e.message, http_code=503) + if http_code != 200: self.__exception(SdnError.CLEAR_ALL, http_code=http_code) self.logger.info("{} services deleted".format(response.content)) + return "{} services deleted".format(response.content) def check_connectivity(self): @@ -158,6 +168,7 @@ class DynpacConnector(SdnConnectorBase): if http_code != 200: self.__exception(SdnError.UNREACHABLE, http_code=http_code) + self.logger.info("Connectivity checked") def check_credentials(self): @@ -172,16 +183,20 @@ class DynpacConnector(SdnConnectorBase): if http_code != 200: self.__exception(SdnError.UNAUTHORIZED, http_code=http_code) + self.logger.info("Credentials checked") # Private functions def __exception(self, x, **kwargs): http_code = kwargs.get("http_code") + if hasattr(x, "value"): error = x.value else: error = x + self.logger.error(error) + raise SdnConnectorError(error, http_code=http_code) def __check_service(self, service_type, connection_points, kwargs): @@ -193,41 +208,56 @@ class DynpacConnector(SdnConnectorBase): for connection_point in connection_points: enc_type = connection_point.get(self.__ENCAPSULATION_TYPE_PARAM) + if enc_type not in self.__supported_encapsulation_types: self.__exception(SdnError.ENCAPSULATION_TYPE, http_code=400) # Commented out for as long as parameter isn't implemented # bandwidth = kwargs.get(self.__BANDWIDTH_PARAM) # if not isinstance(bandwidth, int): - # self.__exception(SdnError.BANDWIDTH, http_code=400) + # self.__exception(SdnError.BANDWIDTH, http_code=400) # Commented out for as long as parameter isn't implemented # backup = kwargs.get(self.__BACKUP_PARAM) # if not isinstance(backup, bool): - # self.__exception(SdnError.BACKUP, http_code=400) + # self.__exception(SdnError.BACKUP, http_code=400) def __get_body(self, service_type, connection_points, kwargs): port_mapping = self.__config.get("service_endpoint_mapping") selected_ports = [] + for connection_point in connection_points: endpoint_id = connection_point.get(self.__SERVICE_ENDPOINT_PARAM) - port = filter(lambda x: x.get(self.__WAN_SERVICE_ENDPOINT_PARAM) == endpoint_id, port_mapping)[0] + port = filter( + lambda x: x.get(self.__WAN_SERVICE_ENDPOINT_PARAM) == endpoint_id, + port_mapping, + )[0] port_info = port.get(self.__WAN_MAPPING_INFO_PARAM) selected_ports.append(port_info) + if service_type == "ELINE (L2)" or service_type == "ELINE": service_type = "L2" + body = { - "connection_points": [{ - "wan_switch_dpid": selected_ports[0].get(self.__SW_ID_PARAM), - "wan_switch_port": selected_ports[0].get(self.__SW_PORT_PARAM), - "wan_vlan": connection_points[0].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM) - }, { - "wan_switch_dpid": selected_ports[1].get(self.__SW_ID_PARAM), - "wan_switch_port": selected_ports[1].get(self.__SW_PORT_PARAM), - "wan_vlan": connection_points[1].get(self.__ENCAPSULATION_INFO_PARAM).get(self.__VLAN_PARAM) - }], + "connection_points": [ + { + "wan_switch_dpid": selected_ports[0].get(self.__SW_ID_PARAM), + "wan_switch_port": selected_ports[0].get(self.__SW_PORT_PARAM), + "wan_vlan": connection_points[0] + .get(self.__ENCAPSULATION_INFO_PARAM) + .get(self.__VLAN_PARAM), + }, + { + "wan_switch_dpid": selected_ports[1].get(self.__SW_ID_PARAM), + "wan_switch_port": selected_ports[1].get(self.__SW_PORT_PARAM), + "wan_vlan": connection_points[1] + .get(self.__ENCAPSULATION_INFO_PARAM) + .get(self.__VLAN_PARAM), + }, + ], "bandwidth": 100, # Hardcoded for as long as parameter isn't implemented "service_type": service_type, - "backup": False # Hardcoded for as long as parameter isn't implemented + "backup": False, # Hardcoded for as long as parameter isn't implemented } + return "body={}".format(json.dumps(body)) diff --git a/RO-SDN-dynpac/setup.py b/RO-SDN-dynpac/setup.py index a1c7c718..ba496225 100644 --- a/RO-SDN-dynpac/setup.py +++ b/RO-SDN-dynpac/setup.py @@ -30,26 +30,30 @@ osm-ro pluging for dynpac SDN setup( name=_name, - description='OSM ro sdn plugin for dynpac', + description="OSM ro sdn plugin for dynpac", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='OSM_TECH@LIST.ETSI.ORG', - maintainer='ETSI OSM', - maintainer_email='OSM_TECH@LIST.ETSI.ORG', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="OSM_TECH@LIST.ETSI.ORG", + maintainer="ETSI OSM", + maintainer_email="OSM_TECH@LIST.ETSI.ORG", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ "requests", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rosdn.plugins': ['rosdn_dynpac = osm_rosdn_dynpac.wimconn_dynpac:DynpacConnector'], + "osm_rosdn.plugins": [ + "rosdn_dynpac = osm_rosdn_dynpac.wimconn_dynpac:DynpacConnector" + ], }, ) diff --git a/RO-SDN-dynpac/tox.ini b/RO-SDN-dynpac/tox.ini index 2bd23ea3..cc6443f7 100644 --- a/RO-SDN-dynpac/tox.ini +++ b/RO-SDN-dynpac/tox.ini @@ -26,7 +26,7 @@ install_command = python3 -m pip install -r requirements.txt -U {opts} {packages basepython = python3 deps = flake8 commands = flake8 osm_rosdn_dynpac --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-SDN-floodlight_openflow/osm_rosdn_floodlightof/floodlight_of.py b/RO-SDN-floodlight_openflow/osm_rosdn_floodlightof/floodlight_of.py index 4829386d..238b4347 100644 --- a/RO-SDN-floodlight_openflow/osm_rosdn_floodlightof/floodlight_of.py +++ b/RO-SDN-floodlight_openflow/osm_rosdn_floodlightof/floodlight_of.py @@ -34,7 +34,11 @@ __date__ = "$28-oct-2014 12:07:15$" import json import requests import logging -from osm_ro_plugin.openflow_conn import OpenflowConn, OpenflowConnUnexpectedResponse, OpenflowConnConnectionException +from osm_ro_plugin.openflow_conn import ( + OpenflowConn, + OpenflowConnUnexpectedResponse, + OpenflowConnConnectionException, +) class OfConnFloodLight(OpenflowConn): @@ -59,12 +63,16 @@ class OfConnFloodLight(OpenflowConn): """ # check params url = params.get("of_url") + if not url: raise ValueError("'url' must be provided") + if not url.startswith("http"): url = "http://" + url + if not url.endswith("/"): url = url + "/" + self.url = url OpenflowConn.__init__(self, params) @@ -74,9 +82,12 @@ class OfConnFloodLight(OpenflowConn): self.pp2ofi = {} # From Physical Port to OpenFlow Index self.ofi2pp = {} # From OpenFlow Index to Physical Port - self.headers = {'content-type': 'application/json', 'Accept': 'application/json'} + self.headers = { + "content-type": "application/json", + "Accept": "application/json", + } self.version = None - self.logger = logging.getLogger('ro.sdn.floodlightof') + self.logger = logging.getLogger("ro.sdn.floodlightof") self.logger.setLevel(params.get("of_debug", "ERROR")) self._set_version(params.get("of_version")) @@ -125,42 +136,69 @@ class OfConnFloodLight(OpenflowConn): parameter is missing or wrong """ try: - of_response = requests.get(self.url + "wm/core/controller/switches/json", headers=self.headers) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + of_response = requests.get( + self.url + "wm/core/controller/switches/json", headers=self.headers + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + if of_response.status_code != 200: self.logger.warning("get_of_switches " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("get_of_switches " + error_text) info = of_response.json() + if not isinstance(info, (list, tuple)): - self.logger.error("get_of_switches. Unexpected response not a list %s", str(type(info))) - raise OpenflowConnUnexpectedResponse("Unexpected response, not a list. Wrong version?") + self.logger.error( + "get_of_switches. Unexpected response not a list %s", + str(type(info)), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response, not a list. Wrong version?" + ) + if len(info) == 0: return info + # autodiscover version if self.version is None: - if 'dpid' in info[0] and 'inetAddress' in info[0]: + if "dpid" in info[0] and "inetAddress" in info[0]: self._set_version("0.9") # elif 'switchDPID' in info[0] and 'inetAddress' in info[0]: # self._set_version("1.X") else: - self.logger.error("get_of_switches. Unexpected response, not found 'dpid' or 'switchDPID' " - "field: %s", str(info[0])) - raise OpenflowConnUnexpectedResponse("Unexpected response, not found 'dpid' or " - "'switchDPID' field. Wrong version?") + self.logger.error( + "get_of_switches. Unexpected response, not found 'dpid' or 'switchDPID' " + "field: %s", + str(info[0]), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response, not found 'dpid' or " + "'switchDPID' field. Wrong version?" + ) switch_list = [] for switch in info: - switch_list.append((switch[self.ver_names["dpid"]], switch['inetAddress'])) + switch_list.append( + (switch[self.ver_names["dpid"]], switch["inetAddress"]) + ) + return switch_list except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_switches " + error_text) + raise OpenflowConnConnectionException(error_text) except Exception as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_switches " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) def get_of_rules(self, translate_of_ports=True): @@ -179,48 +217,71 @@ class OfConnFloodLight(OpenflowConn): switch: DPID, all Raise an openflowconnUnexpectedResponse exception if fails with text_error """ - try: # get translation, autodiscover version + if len(self.ofi2pp) == 0: self.obtain_port_correspondence() - of_response = requests.get(self.url + "wm/{}/list/{}/json".format(self.ver_names["URLmodifier"], self.dpid), - headers=self.headers) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + of_response = requests.get( + self.url + + "wm/{}/list/{}/json".format(self.ver_names["URLmodifier"], self.dpid), + headers=self.headers, + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + if of_response.status_code != 200: self.logger.warning("get_of_rules " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("get_of_rules " + error_text) info = of_response.json() + if type(info) != dict: - self.logger.error("get_of_rules. Unexpected response not a dict %s", str(type(info))) - raise OpenflowConnUnexpectedResponse("Unexpected response, not a dict. Wrong version?") + self.logger.error( + "get_of_rules. Unexpected response not a dict %s", str(type(info)) + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response, not a dict. Wrong version?" + ) + rule_list = [] for switch, switch_info in info.items(): if switch_info is None: continue + if str(switch) != self.dpid: continue + for name, details in switch_info.items(): - rule = { - "name": name, - "switch": str(switch) - } + rule = {"name": name, "switch": str(switch)} # rule["active"] = "true" rule["priority"] = int(details["priority"]) + if self.version[0] == "0": if translate_of_ports: - rule["ingress_port"] = self.ofi2pp[details["match"]["inputPort"]] + rule["ingress_port"] = self.ofi2pp[ + details["match"]["inputPort"] + ] else: rule["ingress_port"] = str(details["match"]["inputPort"]) + dst_mac = details["match"]["dataLayerDestination"] + if dst_mac != "00:00:00:00:00:00": rule["dst_mac"] = dst_mac + vlan = details["match"]["dataLayerVirtualLan"] + if vlan != -1: rule["vlan_id"] = vlan + actionlist = [] + for action in details["actions"]: if action["type"] == "OUTPUT": if translate_of_ports: @@ -231,51 +292,82 @@ class OfConnFloodLight(OpenflowConn): elif action["type"] == "STRIP_VLAN": actionlist.append(("vlan", None)) elif action["type"] == "SET_VLAN_ID": - actionlist.append(("vlan", action["virtualLanIdentifier"])) + actionlist.append( + ("vlan", action["virtualLanIdentifier"]) + ) else: actionlist.append((action["type"], str(action))) - self.logger.warning("get_of_rules() Unknown action in rule %s: %s", rule["name"], - str(action)) + self.logger.warning( + "get_of_rules() Unknown action in rule %s: %s", + rule["name"], + str(action), + ) + rule["actions"] = actionlist elif self.version[0] == "1": if translate_of_ports: - rule["ingress_port"] = self.ofi2pp[details["match"]["in_port"]] + rule["ingress_port"] = self.ofi2pp[ + details["match"]["in_port"] + ] else: rule["ingress_port"] = details["match"]["in_port"] + if "eth_dst" in details["match"]: dst_mac = details["match"]["eth_dst"] if dst_mac != "00:00:00:00:00:00": rule["dst_mac"] = dst_mac + if "eth_vlan_vid" in details["match"]: vlan = int(details["match"]["eth_vlan_vid"], 16) & 0xFFF rule["vlan_id"] = str(vlan) + actionlist = [] - for action in details["instructions"]["instruction_apply_actions"]: + for action in details["instructions"][ + "instruction_apply_actions" + ]: if action == "output": if translate_of_ports: - port = self.ofi2pp[details["instructions"]["instruction_apply_actions"]["output"]] + port = self.ofi2pp[ + details["instructions"][ + "instruction_apply_actions" + ]["output"] + ] else: - port = details["instructions"]["instruction_apply_actions"]["output"] + port = details["instructions"][ + "instruction_apply_actions" + ]["output"] actionlist.append(("out", port)) elif action == "strip_vlan": actionlist.append(("vlan", None)) elif action == "set_vlan_vid": actionlist.append( - ("vlan", details["instructions"]["instruction_apply_actions"]["set_vlan_vid"])) + ( + "vlan", + details["instructions"][ + "instruction_apply_actions" + ]["set_vlan_vid"], + ) + ) else: - self.logger.error("get_of_rules Unknown action in rule %s: %s", rule["name"], - str(action)) + self.logger.error( + "get_of_rules Unknown action in rule %s: %s", + rule["name"], + str(action), + ) # actionlist.append((action, str(details["instructions"]["instruction_apply_actions"]))) + rule_list.append(rule) return rule_list except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_rules " + error_text) + raise OpenflowConnConnectionException(error_text) except Exception as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_rules " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) def obtain_port_correspondence(self): @@ -285,73 +377,112 @@ class OfConnFloodLight(OpenflowConn): Raise an openflowconnUnexpectedResponse exception if fails with text_error """ try: - of_response = requests.get(self.url + "wm/core/controller/switches/json", headers=self.headers) + of_response = requests.get( + self.url + "wm/core/controller/switches/json", headers=self.headers + ) # print vim_response.status_code - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + if of_response.status_code != 200: self.logger.warning("obtain_port_correspondence " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("obtain_port_correspondence " + error_text) info = of_response.json() if not isinstance(info, (list, tuple)): - raise OpenflowConnUnexpectedResponse("unexpected openflow response, not a list. Wrong version?") + raise OpenflowConnUnexpectedResponse( + "unexpected openflow response, not a list. Wrong version?" + ) index = -1 if len(info) > 0: # autodiscover version if self.version is None: - if 'dpid' in info[0] and 'ports' in info[0]: + if "dpid" in info[0] and "ports" in info[0]: self._set_version("0.9") - elif 'switchDPID' in info[0]: + elif "switchDPID" in info[0]: self._set_version("1.X") else: - raise OpenflowConnUnexpectedResponse("unexpected openflow response, Wrong version?") + raise OpenflowConnUnexpectedResponse( + "unexpected openflow response, Wrong version?" + ) for i, info_item in enumerate(info): if info_item[self.ver_names["dpid"]] == self.dpid: index = i break + if index == -1: - text = "DPID '{}' not present in controller {}".format(self.dpid, self.url) + text = "DPID '{}' not present in controller {}".format( + self.dpid, self.url + ) # print self.name, ": get_of_controller_info ERROR", text + raise OpenflowConnUnexpectedResponse(text) else: if self.version[0] == "0": ports = info[index]["ports"] else: # version 1.X - of_response = requests.get(self.url + "wm/core/switch/{}/port-desc/json".format(self.dpid), - headers=self.headers) + of_response = requests.get( + self.url + "wm/core/switch/{}/port-desc/json".format(self.dpid), + headers=self.headers, + ) # print vim_response.status_code - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + if of_response.status_code != 200: self.logger.warning("obtain_port_correspondence " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("obtain_port_correspondence " + error_text) info = of_response.json() + if type(info) != dict: - raise OpenflowConnUnexpectedResponse("unexpected openflow port-desc response, " - "not a dict. Wrong version?") + raise OpenflowConnUnexpectedResponse( + "unexpected openflow port-desc response, " + "not a dict. Wrong version?" + ) + if "portDesc" not in info: - raise OpenflowConnUnexpectedResponse("unexpected openflow port-desc response, " - "'portDesc' not found. Wrong version?") - if type(info["portDesc"]) != list and type(info["portDesc"]) != tuple: - raise OpenflowConnUnexpectedResponse("unexpected openflow port-desc response at " - "'portDesc', not a list. Wrong version?") + raise OpenflowConnUnexpectedResponse( + "unexpected openflow port-desc response, " + "'portDesc' not found. Wrong version?" + ) + + if ( + type(info["portDesc"]) != list + and type(info["portDesc"]) != tuple + ): + raise OpenflowConnUnexpectedResponse( + "unexpected openflow port-desc response at " + "'portDesc', not a list. Wrong version?" + ) + ports = info["portDesc"] + for port in ports: self.pp2ofi[str(port["name"])] = str(port["portNumber"]) self.ofi2pp[port["portNumber"]] = str(port["name"]) # print self.name, ": get_of_controller_info ports:", self.pp2ofi + return self.pp2ofi except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("obtain_port_correspondence " + error_text) + raise OpenflowConnConnectionException(error_text) except Exception as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("obtain_port_correspondence " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) def del_flow(self, flow_name): @@ -365,24 +496,34 @@ class OfConnFloodLight(OpenflowConn): if self.version is None: self.get_of_switches() - of_response = requests.delete(self.url + "wm/{}/json".format(self.ver_names["URLmodifier"]), - headers=self.headers, - data='{{"switch":"{}","name":"{}"}}'.format(self.dpid, flow_name)) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + of_response = requests.delete( + self.url + "wm/{}/json".format(self.ver_names["URLmodifier"]), + headers=self.headers, + data='{{"switch":"{}","name":"{}"}}'.format(self.dpid, flow_name), + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + if of_response.status_code != 200: self.logger.warning("del_flow " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("del_flow OK " + error_text) + return None except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("del_flow " + error_text) + raise OpenflowConnConnectionException(error_text) except Exception as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("del_flow " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) def new_flow(self, data): @@ -406,40 +547,57 @@ class OfConnFloodLight(OpenflowConn): try: # We have to build the data for the floodlight call from the generic data - sdata = {'active': "true", "name": data["name"]} + sdata = {"active": "true", "name": data["name"]} + if data.get("priority"): sdata["priority"] = str(data["priority"]) + if data.get("vlan_id"): sdata[self.ver_names["vlanid"]] = data["vlan_id"] + if data.get("dst_mac"): sdata[self.ver_names["destmac"]] = data["dst_mac"] - sdata['switch'] = self.dpid - if not data['ingress_port'] in self.pp2ofi: - error_text = 'Error. Port {} is not present in the switch'.format(data['ingress_port']) + + sdata["switch"] = self.dpid + if not data["ingress_port"] in self.pp2ofi: + error_text = "Error. Port {} is not present in the switch".format( + data["ingress_port"] + ) self.logger.warning("new_flow " + error_text) raise OpenflowConnUnexpectedResponse(error_text) - sdata[self.ver_names["inport"]] = self.pp2ofi[data['ingress_port']] - sdata['actions'] = "" + sdata[self.ver_names["inport"]] = self.pp2ofi[data["ingress_port"]] + sdata["actions"] = "" + + for action in data["actions"]: + if len(sdata["actions"]) > 0: + sdata["actions"] += "," - for action in data['actions']: - if len(sdata['actions']) > 0: - sdata['actions'] += ',' if action[0] == "vlan": if action[1] is None: - sdata['actions'] += self.ver_names["stripvlan"] + sdata["actions"] += self.ver_names["stripvlan"] else: - sdata['actions'] += self.ver_names["setvlan"] + "=" + str(action[1]) - elif action[0] == 'out': - sdata['actions'] += "output=" + self.pp2ofi[action[1]] + sdata["actions"] += ( + self.ver_names["setvlan"] + "=" + str(action[1]) + ) + elif action[0] == "out": + sdata["actions"] += "output=" + self.pp2ofi[action[1]] + + of_response = requests.post( + self.url + "wm/{}/json".format(self.ver_names["URLmodifier"]), + headers=self.headers, + data=json.dumps(sdata), + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) - of_response = requests.post(self.url + "wm/{}/json".format(self.ver_names["URLmodifier"]), - headers=self.headers, data=json.dumps(sdata)) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) if of_response.status_code != 200: self.logger.warning("new_flow " + error_text) raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("new_flow OK" + error_text) + return None except requests.exceptions.RequestException as e: @@ -466,20 +624,29 @@ class OfConnFloodLight(OpenflowConn): if len(sw_list) == 0: # empty return None - url = self.url + "wm/{}/clear/{}/json".format(self.ver_names["URLmodifier"], self.dpid) + url = self.url + "wm/{}/clear/{}/json".format( + self.ver_names["URLmodifier"], self.dpid + ) of_response = requests.get(url) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + if of_response.status_code < 200 or of_response.status_code >= 300: self.logger.warning("clear_all_flows " + error_text) raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("clear_all_flows OK " + error_text) + return None except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("clear_all_flows " + error_text) + raise OpenflowConnConnectionException(error_text) except Exception as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("clear_all_flows " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) diff --git a/RO-SDN-floodlight_openflow/osm_rosdn_floodlightof/sdnconn_floodlightof.py b/RO-SDN-floodlight_openflow/osm_rosdn_floodlightof/sdnconn_floodlightof.py index 2c7910b4..bd37be8b 100644 --- a/RO-SDN-floodlight_openflow/osm_rosdn_floodlightof/sdnconn_floodlightof.py +++ b/RO-SDN-floodlight_openflow/osm_rosdn_floodlightof/sdnconn_floodlightof.py @@ -25,11 +25,9 @@ from .floodlight_of import OfConnFloodLight class SdnConnectorFloodLightOf(SdnConnectorOpenFlow): - def __init__(self, wim, wim_account, config=None, logger=None): - """Creates a connectivity based on pro-active openflow rules - """ - self.logger = logging.getLogger('ro.sdn.floodlightof') + """Creates a connectivity based on pro-active openflow rules""" + self.logger = logging.getLogger("ro.sdn.floodlightof") super().__init__(wim, wim_account, config, logger) of_params = { "of_url": wim["wim_url"], diff --git a/RO-SDN-floodlight_openflow/setup.py b/RO-SDN-floodlight_openflow/setup.py index faf1ce7b..ce1fabde 100644 --- a/RO-SDN-floodlight_openflow/setup.py +++ b/RO-SDN-floodlight_openflow/setup.py @@ -30,27 +30,30 @@ osm-ro plugin for floodlight SDN using pre-computed openflow rules setup( name=_name, - description='OSM RO plugin for SDN with floodlight openflow rules', + description="OSM RO plugin for SDN with floodlight openflow rules", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='alfonso.tiernosepulveda@telefonica.com', - maintainer='Alfonso Tierno', - maintainer_email='alfonso.tiernosepulveda@telefonica.com', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="alfonso.tiernosepulveda@telefonica.com", + maintainer="Alfonso Tierno", + maintainer_email="alfonso.tiernosepulveda@telefonica.com", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ "requests", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rosdn.plugins': ['rosdn_floodlightof = osm_rosdn_floodlightof.sdnconn_floodlightof:' - 'SdnConnectorFloodLightOf'], + "osm_rosdn.plugins": [ + "rosdn_floodlightof = osm_rosdn_floodlightof.sdnconn_floodlightof:SdnConnectorFloodLightOf" + ], }, ) diff --git a/RO-SDN-floodlight_openflow/tox.ini b/RO-SDN-floodlight_openflow/tox.ini index bee1be4a..2312e5f3 100644 --- a/RO-SDN-floodlight_openflow/tox.ini +++ b/RO-SDN-floodlight_openflow/tox.ini @@ -27,7 +27,7 @@ commands=python3 -m unittest discover -v basepython = python3 deps = flake8 commands = flake8 osm_rosdn_floodlightof --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn/wimconn_ietfl2vpn.py b/RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn/wimconn_ietfl2vpn.py index 8ea422d0..c9c05a72 100644 --- a/RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn/wimconn_ietfl2vpn.py +++ b/RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn/wimconn_ietfl2vpn.py @@ -34,41 +34,48 @@ import requests import uuid import logging from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError -"""CHeck layer where we move it""" +"""Check layer where we move it""" -class WimconnectorIETFL2VPN(SdnConnectorBase): +class WimconnectorIETFL2VPN(SdnConnectorBase): def __init__(self, wim, wim_account, config=None, logger=None): - """IETF L2VPM WIM connector + """IETF L2VPN WIM connector Arguments: (To be completed) wim (dict): WIM record, as stored in the database wim_account (dict): WIM account record, as stored in the database """ - self.logger = logging.getLogger('ro.sdn.ietfl2vpn') + self.logger = logging.getLogger("ro.sdn.ietfl2vpn") super().__init__(wim, wim_account, config, logger) - self.headers = {'Content-Type': 'application/json'} - self.mappings = {m['service_endpoint_id']: m - for m in self.service_endpoint_mapping} + self.headers = {"Content-Type": "application/json"} + self.mappings = { + m["service_endpoint_id"]: m for m in self.service_endpoint_mapping + } self.user = wim_account.get("user") self.passwd = wim_account.get("passwordd") + if self.user and self.passwd is not None: self.auth = (self.user, self.passwd) else: self.auth = None + self.logger.info("IETFL2VPN Connector Initialized.") def check_credentials(self): - endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"]) + endpoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( + self.wim["wim_url"] + ) + try: - response = requests.get(endpoint, auth=self.auth) + response = requests.get(endpoint, auth=self.auth) http_code = response.status_code except requests.exceptions.RequestException as e: raise SdnConnectorError(e.message, http_code=503) if http_code != 200: raise SdnConnectorError("Failed while authenticating", http_code=http_code) + self.logger.info("Credentials checked") def get_connectivity_service_status(self, service_uuid, conn_info=None): @@ -87,18 +94,25 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): try: self.logger.info("Sending get connectivity service stuatus") servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format( - self.wim["wim_url"], service_uuid) + self.wim["wim_url"], service_uuid + ) response = requests.get(servicepoint, auth=self.auth) + if response.status_code != requests.codes.ok: - raise SdnConnectorError("Unable to obtain connectivity servcice status", http_code=response.status_code) - service_status = {'sdn_status': 'ACTIVE'} + raise SdnConnectorError( + "Unable to obtain connectivity servcice status", + http_code=response.status_code, + ) + + service_status = {"sdn_status": "ACTIVE"} + return service_status except requests.exceptions.ConnectionError: raise SdnConnectorError("Request Timeout", http_code=408) - + def search_mapp(self, connection_point): - id = connection_point['service_endpoint_id'] - if id not in self.mappings: + id = connection_point["service_endpoint_id"] + if id not in self.mappings: raise SdnConnectorError("Endpoint {} not located".format(str(id))) else: return self.mappings[id] @@ -151,10 +165,14 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): """ if service_type == "ELINE": if len(connection_points) > 2: - raise SdnConnectorError('Connections between more than 2 endpoints are not supported') + raise SdnConnectorError( + "Connections between more than 2 endpoints are not supported" + ) + if len(connection_points) < 2: - raise SdnConnectorError('Connections must be of at least 2 endpoints') - """ First step, create the vpn service """ + raise SdnConnectorError("Connections must be of at least 2 endpoints") + + """ First step, create the vpn service """ uuid_l2vpn = str(uuid.uuid4()) vpn_service = {} vpn_service["vpn-id"] = uuid_l2vpn @@ -167,89 +185,154 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): response_service_creation = None conn_info = [] self.logger.info("Sending vpn-service :{}".format(vpn_service_l)) + try: - endpoint_service_creation = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( - self.wim["wim_url"]) - response_service_creation = requests.post(endpoint_service_creation, headers=self.headers, - json=vpn_service_l, auth=self.auth) + endpoint_service_creation = ( + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( + self.wim["wim_url"] + ) + ) + response_service_creation = requests.post( + endpoint_service_creation, + headers=self.headers, + json=vpn_service_l, + auth=self.auth, + ) except requests.exceptions.ConnectionError: - raise SdnConnectorError("Request to create service Timeout", http_code=408) + raise SdnConnectorError( + "Request to create service Timeout", http_code=408 + ) + if response_service_creation.status_code == 409: - raise SdnConnectorError("Service already exists", http_code=response_service_creation.status_code) + raise SdnConnectorError( + "Service already exists", + http_code=response_service_creation.status_code, + ) elif response_service_creation.status_code != requests.codes.created: - raise SdnConnectorError("Request to create service not accepted", - http_code=response_service_creation.status_code) - """ Second step, create the connections and vpn attachments """ + raise SdnConnectorError( + "Request to create service not accepted", + http_code=response_service_creation.status_code, + ) + + """ Second step, create the connections and vpn attachments """ for connection_point in connection_points: connection_point_wan_info = self.search_mapp(connection_point) site_network_access = {} connection = {} + if connection_point["service_endpoint_encapsulation_type"] != "none": - if connection_point["service_endpoint_encapsulation_type"] == "dot1q": + if ( + connection_point["service_endpoint_encapsulation_type"] + == "dot1q" + ): """ The connection is a VLAN """ connection["encapsulation-type"] = "dot1q-vlan-tagged" tagged = {} tagged_interf = {} - service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"] + service_endpoint_encapsulation_info = connection_point[ + "service_endpoint_encapsulation_info" + ] + if service_endpoint_encapsulation_info["vlan"] is None: raise SdnConnectorError("VLAN must be provided") - tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"] + + tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[ + "vlan" + ] tagged["dot1q-vlan-tagged"] = tagged_interf connection["tagged-interface"] = tagged else: raise NotImplementedError("Encapsulation type not implemented") + site_network_access["connection"] = connection self.logger.info("Sending connection:{}".format(connection)) vpn_attach = {} vpn_attach["vpn-id"] = uuid_l2vpn - vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role" + vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role" site_network_access["vpn-attachment"] = vpn_attach self.logger.info("Sending vpn-attachement :{}".format(vpn_attach)) uuid_sna = str(uuid.uuid4()) site_network_access["network-access-id"] = uuid_sna - site_network_access["bearer"] = connection_point_wan_info["service_mapping_info"]["bearer"] + site_network_access["bearer"] = connection_point_wan_info[ + "service_mapping_info" + ]["bearer"] site_network_accesses = {} site_network_access_list = [] site_network_access_list.append(site_network_access) - site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list + site_network_accesses[ + "ietf-l2vpn-svc:site-network-access" + ] = site_network_access_list conn_info_d = {} - conn_info_d["site"] = connection_point_wan_info["service_mapping_info"]["site-id"] - conn_info_d["site-network-access-id"] = site_network_access["network-access-id"] + conn_info_d["site"] = connection_point_wan_info["service_mapping_info"][ + "site-id" + ] + conn_info_d["site-network-access-id"] = site_network_access[ + "network-access-id" + ] conn_info_d["mapping"] = None conn_info.append(conn_info_d) + try: - endpoint_site_network_access_creation = \ - "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format( - self.wim["wim_url"], connection_point_wan_info["service_mapping_info"]["site-id"]) + endpoint_site_network_access_creation = ( + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/" + "sites/site={}/site-network-accesses/".format( + self.wim["wim_url"], + connection_point_wan_info["service_mapping_info"][ + "site-id" + ], + ) + ) response_endpoint_site_network_access_creation = requests.post( endpoint_site_network_access_creation, headers=self.headers, json=site_network_accesses, - auth=self.auth) - - if response_endpoint_site_network_access_creation.status_code == 409: + auth=self.auth, + ) + + if ( + response_endpoint_site_network_access_creation.status_code + == 409 + ): self.delete_connectivity_service(vpn_service["vpn-id"]) - raise SdnConnectorError("Site_Network_Access with ID '{}' already exists".format( - site_network_access["network-access-id"]), - http_code=response_endpoint_site_network_access_creation.status_code) - - elif response_endpoint_site_network_access_creation.status_code == 400: + + raise SdnConnectorError( + "Site_Network_Access with ID '{}' already exists".format( + site_network_access["network-access-id"] + ), + http_code=response_endpoint_site_network_access_creation.status_code, + ) + elif ( + response_endpoint_site_network_access_creation.status_code + == 400 + ): self.delete_connectivity_service(vpn_service["vpn-id"]) - raise SdnConnectorError("Site {} does not exist".format( - connection_point_wan_info["service_mapping_info"]["site-id"]), - http_code=response_endpoint_site_network_access_creation.status_code) - - elif response_endpoint_site_network_access_creation.status_code != requests.codes.created and \ - response_endpoint_site_network_access_creation.status_code != requests.codes.no_content: + + raise SdnConnectorError( + "Site {} does not exist".format( + connection_point_wan_info["service_mapping_info"][ + "site-id" + ] + ), + http_code=response_endpoint_site_network_access_creation.status_code, + ) + elif ( + response_endpoint_site_network_access_creation.status_code + != requests.codes.created + and response_endpoint_site_network_access_creation.status_code + != requests.codes.no_content + ): self.delete_connectivity_service(vpn_service["vpn-id"]) - raise SdnConnectorError("Request no accepted", - http_code=response_endpoint_site_network_access_creation.status_code) - + + raise SdnConnectorError( + "Request no accepted", + http_code=response_endpoint_site_network_access_creation.status_code, + ) except requests.exceptions.ConnectionError: self.delete_connectivity_service(vpn_service["vpn-id"]) + raise SdnConnectorError("Request Timeout", http_code=408) + return uuid_l2vpn, conn_info - else: raise NotImplementedError @@ -262,88 +345,132 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): try: self.logger.info("Sending delete") servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services/vpn-service={}/".format( - self.wim["wim_url"], service_uuid) + self.wim["wim_url"], service_uuid + ) response = requests.delete(servicepoint, auth=self.auth) + if response.status_code != requests.codes.no_content: - raise SdnConnectorError("Error in the request", http_code=response.status_code) + raise SdnConnectorError( + "Error in the request", http_code=response.status_code + ) except requests.exceptions.ConnectionError: raise SdnConnectorError("Request Timeout", http_code=408) - def edit_connectivity_service(self, service_uuid, conn_info=None, - connection_points=None, **kwargs): + def edit_connectivity_service( + self, service_uuid, conn_info=None, connection_points=None, **kwargs + ): """Change an existing connectivity service, see ``create_connectivity_service``""" - # sites = {"sites": {}} # site_list = [] vpn_service = {} vpn_service["svc-topo"] = "any-to-any" counter = 0 + for connection_point in connection_points: site_network_access = {} connection_point_wan_info = self.search_mapp(connection_point) params_site = {} - params_site["site-id"] = connection_point_wan_info["service_mapping_info"]["site-id"] + params_site["site-id"] = connection_point_wan_info["service_mapping_info"][ + "site-id" + ] params_site["site-vpn-flavor"] = "site-vpn-flavor-single" device_site = {} device_site["device-id"] = connection_point_wan_info["device-id"] params_site["devices"] = device_site # network_access = {} connection = {} + if connection_point["service_endpoint_encapsulation_type"] != "none": if connection_point["service_endpoint_encapsulation_type"] == "dot1q": """ The connection is a VLAN """ connection["encapsulation-type"] = "dot1q-vlan-tagged" tagged = {} tagged_interf = {} - service_endpoint_encapsulation_info = connection_point["service_endpoint_encapsulation_info"] + service_endpoint_encapsulation_info = connection_point[ + "service_endpoint_encapsulation_info" + ] + if service_endpoint_encapsulation_info["vlan"] is None: raise SdnConnectorError("VLAN must be provided") - tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info["vlan"] + + tagged_interf["cvlan-id"] = service_endpoint_encapsulation_info[ + "vlan" + ] tagged["dot1q-vlan-tagged"] = tagged_interf connection["tagged-interface"] = tagged else: raise NotImplementedError("Encapsulation type not implemented") + site_network_access["connection"] = connection vpn_attach = {} vpn_attach["vpn-id"] = service_uuid - vpn_attach["site-role"] = vpn_service["svc-topo"]+"-role" + vpn_attach["site-role"] = vpn_service["svc-topo"] + "-role" site_network_access["vpn-attachment"] = vpn_attach uuid_sna = conn_info[counter]["site-network-access-id"] site_network_access["network-access-id"] = uuid_sna - site_network_access["bearer"] = connection_point_wan_info["service_mapping_info"]["bearer"] + site_network_access["bearer"] = connection_point_wan_info[ + "service_mapping_info" + ]["bearer"] site_network_accesses = {} site_network_access_list = [] site_network_access_list.append(site_network_access) - site_network_accesses["ietf-l2vpn-svc:site-network-access"] = site_network_access_list + site_network_accesses[ + "ietf-l2vpn-svc:site-network-access" + ] = site_network_access_list + try: - endpoint_site_network_access_edit = \ - "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/sites/site={}/site-network-accesses/".format( - self.wim["wim_url"], connection_point_wan_info["service_mapping_info"]["site-id"]) - response_endpoint_site_network_access_creation = requests.put(endpoint_site_network_access_edit, - headers=self.headers, - json=site_network_accesses, - auth=self.auth) + endpoint_site_network_access_edit = ( + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/" + "sites/site={}/site-network-accesses/".format( + self.wim["wim_url"], + connection_point_wan_info["service_mapping_info"]["site-id"], + ) + ) + response_endpoint_site_network_access_creation = requests.put( + endpoint_site_network_access_edit, + headers=self.headers, + json=site_network_accesses, + auth=self.auth, + ) + if response_endpoint_site_network_access_creation.status_code == 400: - raise SdnConnectorError("Service does not exist", - http_code=response_endpoint_site_network_access_creation.status_code) - elif response_endpoint_site_network_access_creation.status_code != 201 and \ - response_endpoint_site_network_access_creation.status_code != 204: - raise SdnConnectorError("Request no accepted", - http_code=response_endpoint_site_network_access_creation.status_code) + raise SdnConnectorError( + "Service does not exist", + http_code=response_endpoint_site_network_access_creation.status_code, + ) + elif ( + response_endpoint_site_network_access_creation.status_code != 201 + and response_endpoint_site_network_access_creation.status_code + != 204 + ): + raise SdnConnectorError( + "Request no accepted", + http_code=response_endpoint_site_network_access_creation.status_code, + ) except requests.exceptions.ConnectionError: raise SdnConnectorError("Request Timeout", http_code=408) + counter += 1 + return None def clear_all_connectivity_services(self): """Delete all WAN Links corresponding to a WIM""" try: self.logger.info("Sending clear all connectivity services") - servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"]) + servicepoint = ( + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( + self.wim["wim_url"] + ) + ) response = requests.delete(servicepoint, auth=self.auth) + if response.status_code != requests.codes.no_content: - raise SdnConnectorError("Unable to clear all connectivity services", http_code=response.status_code) + raise SdnConnectorError( + "Unable to clear all connectivity services", + http_code=response.status_code, + ) except requests.exceptions.ConnectionError: raise SdnConnectorError("Request Timeout", http_code=408) @@ -353,10 +480,19 @@ class WimconnectorIETFL2VPN(SdnConnectorBase): """ try: self.logger.info("Sending get all connectivity services") - servicepoint = "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format(self.wim["wim_url"]) + servicepoint = ( + "{}/restconf/data/ietf-l2vpn-svc:l2vpn-svc/vpn-services".format( + self.wim["wim_url"] + ) + ) response = requests.get(servicepoint, auth=self.auth) + if response.status_code != requests.codes.ok: - raise SdnConnectorError("Unable to get all connectivity services", http_code=response.status_code) + raise SdnConnectorError( + "Unable to get all connectivity services", + http_code=response.status_code, + ) + return response except requests.exceptions.ConnectionError: raise SdnConnectorError("Request Timeout", http_code=408) diff --git a/RO-SDN-ietfl2vpn/setup.py b/RO-SDN-ietfl2vpn/setup.py index 4c53d4b6..3295387a 100644 --- a/RO-SDN-ietfl2vpn/setup.py +++ b/RO-SDN-ietfl2vpn/setup.py @@ -30,26 +30,30 @@ osm-ro pluging for ietfl2vpn SDN setup( name=_name, - description='OSM ro sdn plugin for ietfl2vpn', + description="OSM ro sdn plugin for ietfl2vpn", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='OSM_TECH@LIST.ETSI.ORG', - maintainer='ETSI OSM', - maintainer_email='OSM_TECH@LIST.ETSI.ORG', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="OSM_TECH@LIST.ETSI.ORG", + maintainer="ETSI OSM", + maintainer_email="OSM_TECH@LIST.ETSI.ORG", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ "requests", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rosdn.plugins': ['rosdn_ietfl2vpn = osm_rosdn_ietfl2vpn.wimconn_ietfl2vpn:WimconnectorIETFL2VPN'], + "osm_rosdn.plugins": [ + "rosdn_ietfl2vpn = osm_rosdn_ietfl2vpn.wimconn_ietfl2vpn:WimconnectorIETFL2VPN" + ], }, ) diff --git a/RO-SDN-ietfl2vpn/tox.ini b/RO-SDN-ietfl2vpn/tox.ini index 23c8f530..98e35a0b 100644 --- a/RO-SDN-ietfl2vpn/tox.ini +++ b/RO-SDN-ietfl2vpn/tox.ini @@ -27,7 +27,7 @@ commands=python3 -m unittest discover -v basepython = python3 deps = flake8 commands = flake8 osm_rosdn_ietfl2vpn --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/rest_lib.py b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/rest_lib.py index 699655a3..963f6cfd 100644 --- a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/rest_lib.py +++ b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/rest_lib.py @@ -43,7 +43,6 @@ class ServiceUnavailableException(HttpException): class ContrailHttp(object): - def __init__(self, auth_info, logger): self._logger = logger # default don't verify client cert @@ -62,57 +61,87 @@ class ContrailHttp(object): def get_cmd(self, url, headers): self._logger.debug("") resp = self._request("GET", url, headers) + return resp.json() def post_headers_cmd(self, url, headers, post_fields_dict=None): self._logger.debug("") + # obfuscate password before logging dict - if post_fields_dict.get('auth', {}).get('identity', {}).get('password', {}).get('user', {}).get('password'): + if ( + post_fields_dict.get("auth", {}) + .get("identity", {}) + .get("password", {}) + .get("user", {}) + .get("password") + ): post_fields_dict_copy = copy.deepcopy(post_fields_dict) - post_fields_dict['auth']['identity']['password']['user']['password'] = '******' + post_fields_dict["auth"]["identity"]["password"]["user"][ + "password" + ] = "******" json_data_log = post_fields_dict_copy else: json_data_log = post_fields_dict + self._logger.debug("Request POSTFIELDS: {}".format(json.dumps(json_data_log))) resp = self._request("POST_HEADERS", url, headers, data=post_fields_dict) + return resp.text def post_cmd(self, url, headers, post_fields_dict=None): self._logger.debug("") + # obfuscate password before logging dict - if post_fields_dict.get('auth', {}).get('identity', {}).get('password', {}).get('user', {}).get('password'): + if ( + post_fields_dict.get("auth", {}) + .get("identity", {}) + .get("password", {}) + .get("user", {}) + .get("password") + ): post_fields_dict_copy = copy.deepcopy(post_fields_dict) - post_fields_dict['auth']['identity']['password']['user']['password'] = '******' + post_fields_dict["auth"]["identity"]["password"]["user"][ + "password" + ] = "******" json_data_log = post_fields_dict_copy else: json_data_log = post_fields_dict + self._logger.debug("Request POSTFIELDS: {}".format(json.dumps(json_data_log))) resp = self._request("POST", url, headers, data=post_fields_dict) + return resp.text def delete_cmd(self, url, headers): self._logger.debug("") resp = self._request("DELETE", url, headers) + return resp.text def _get_token(self, headers): if self.auth_url: - self._logger.debug('Current Token: {}'.format(self.token)) - auth_url = self.auth_url + 'auth/tokens' + self._logger.debug("Current Token: {}".format(self.token)) + auth_url = self.auth_url + "auth/tokens" + if self.token is None or self._token_expired(): if not self.auth_url: self.token = "" - resp = self._request_noauth(url=auth_url, op="POST", headers=headers, - data=self.auth_dict) - self.token = resp.headers.get('x-subject-token') + + resp = self._request_noauth( + url=auth_url, op="POST", headers=headers, data=self.auth_dict + ) + self.token = resp.headers.get("x-subject-token") self.last_token_time = time.time() - self._logger.debug('Obtained token: {}'.format(self.token)) + self._logger.debug("Obtained token: {}".format(self.token)) return self.token def _token_expired(self): current_time = time.time() - if self.last_token_time and (current_time - self.last_token_time < self.token_timeout): + + if self.last_token_time and ( + current_time - self.last_token_time < self.token_timeout + ): return False else: return True @@ -124,14 +153,18 @@ class ContrailHttp(object): # TODO add again token # token = self._get_token(headers) token = None + if token: - headers['X-Auth-Token'] = token + headers["X-Auth-Token"] = token + try: return self._request_noauth(op, url, headers, data) except AuthError: # If there is an auth error retry just once if retry_auth_error: - return self._request(self, op, url, headers, data, retry_auth_error=False) + return self._request( + self, op, url, headers, data, retry_auth_error=False + ) def _request_noauth(self, op, url, headers, data=None): # Method to execute http requests with error control @@ -158,6 +191,7 @@ class ContrailHttp(object): resp = self._http_delete(url, headers, json_data=data) else: raise HttpException("Unsupported operation: {}".format(op)) + self._logger.info("Response HTTPCODE: {}".format(resp.status_code)) # Check http return code @@ -168,23 +202,42 @@ class ContrailHttp(object): if status_code == 401: # Auth Error - set token to None to reload it and raise AuthError self.token = None + raise AuthError("Auth error executing operation") elif status_code == 409: - raise DuplicateFound("Duplicate resource url: {}, response: {}".format(url, resp.text)) + raise DuplicateFound( + "Duplicate resource url: {}, response: {}".format( + url, resp.text + ) + ) elif status_code == 404: - raise NotFound("Not found resource url: {}, response: {}".format(url, resp.text)) + raise NotFound( + "Not found resource url: {}, response: {}".format( + url, resp.text + ) + ) elif resp.status_code in [502, 503]: if not self.max_retries or retry >= self.max_retries: - raise ServiceUnavailableException("Service unavailable error url: {}".format(url)) + raise ServiceUnavailableException( + "Service unavailable error url: {}".format(url) + ) continue else: - raise HttpException("Error status_code: {}, error_text: {}".format(resp.status_code, resp.text)) + raise HttpException( + "Error status_code: {}, error_text: {}".format( + resp.status_code, resp.text + ) + ) except ConnectionError as e: - self._logger.error("Connection error executing request: {}".format(repr(e))) + self._logger.error( + "Connection error executing request: {}".format(repr(e)) + ) + if not self.max_retries or retry >= self.max_retries: raise ConnectionError + continue except Exception as e: self._logger.error("Error executing request: {}".format(repr(e))) diff --git a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py index 6c2f72b0..04943931 100644 --- a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py +++ b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py @@ -20,6 +20,7 @@ import json from osm_ro_plugin.sdnconn import SdnConnectorError from osm_rosdn_juniper_contrail.rest_lib import ContrailHttp from osm_rosdn_juniper_contrail.rest_lib import NotFound + # from osm_rosdn_juniper_contrail.rest_lib import DuplicateFound # from osm_rosdn_juniper_contrail.rest_lib import HttpException @@ -28,23 +29,25 @@ class UnderlayApi: """ Class with CRUD operations for the underlay API """ def __init__(self, url, config=None, user=None, password=None, logger=None): - self.logger = logger or logging.getLogger("ro.sdn.junipercontrail.sdnapi") self.controller_url = url if not url: raise SdnConnectorError("'url' must be provided") + if not url.startswith("http"): url = "http://" + url + if not url.endswith("/"): url = url + "/" - self.url = url + self.url = url self.auth_url = None self.project = None self.domain = None self.asn = None self.fabric = None + if config: self.auth_url = config.get("auth_url") self.project = config.get("project") @@ -53,32 +56,36 @@ class UnderlayApi: self.fabric = config.get("fabric") # Init http headers for all requests - self.http_header = {'Content-Type': 'application/json'} + self.http_header = {"Content-Type": "application/json"} if user: self.user = user + if password: self.password = password - self.logger.debug("Config parameters for the underlay controller: auth_url: {}, project: {}," - " domain: {}, user: {}, password: {}".format(self.auth_url, self.project, - self.domain, self.user, self.password)) + self.logger.debug( + "Config parameters for the underlay controller: auth_url: {}, project: {}," + " domain: {}, user: {}, password: {}".format( + self.auth_url, self.project, self.domain, self.user, self.password + ) + ) auth_dict = {} - auth_dict['auth'] = {} - auth_dict['auth']['scope'] = {} - auth_dict['auth']['scope']['project'] = {} - auth_dict['auth']['scope']['project']['domain'] = {} - auth_dict['auth']['scope']['project']['domain']["id"] = self.domain - auth_dict['auth']['scope']['project']['name'] = self.project - auth_dict['auth']['identity'] = {} - auth_dict['auth']['identity']['methods'] = ['password'] - auth_dict['auth']['identity']['password'] = {} - auth_dict['auth']['identity']['password']['user'] = {} - auth_dict['auth']['identity']['password']['user']['name'] = self.user - auth_dict['auth']['identity']['password']['user']['password'] = self.password - auth_dict['auth']['identity']['password']['user']['domain'] = {} - auth_dict['auth']['identity']['password']['user']['domain']['id'] = self.domain + auth_dict["auth"] = {} + auth_dict["auth"]["scope"] = {} + auth_dict["auth"]["scope"]["project"] = {} + auth_dict["auth"]["scope"]["project"]["domain"] = {} + auth_dict["auth"]["scope"]["project"]["domain"]["id"] = self.domain + auth_dict["auth"]["scope"]["project"]["name"] = self.project + auth_dict["auth"]["identity"] = {} + auth_dict["auth"]["identity"]["methods"] = ["password"] + auth_dict["auth"]["identity"]["password"] = {} + auth_dict["auth"]["identity"]["password"]["user"] = {} + auth_dict["auth"]["identity"]["password"]["user"]["name"] = self.user + auth_dict["auth"]["identity"]["password"]["user"]["password"] = self.password + auth_dict["auth"]["identity"]["password"]["user"]["domain"] = {} + auth_dict["auth"]["identity"]["password"]["user"]["domain"]["id"] = self.domain self.auth_dict = auth_dict # Init http lib @@ -87,18 +94,21 @@ class UnderlayApi: def check_auth(self): response = self.http.get_cmd(url=self.auth_url, headers=self.http_header) + return response # Helper methods for CRUD operations def get_all_by_type(self, controller_url, type): endpoint = controller_url + type response = self.http.get_cmd(url=endpoint, headers=self.http_header) + return response.get(type) def get_by_uuid(self, type, uuid): try: endpoint = self.controller_url + type + "/{}".format(uuid) response = self.http.get_cmd(url=endpoint, headers=self.http_header) + return response.get(type) except NotFound: return None @@ -113,15 +123,14 @@ class UnderlayApi: Returns: If resource not found returns None In case of error raises an Exception """ - payload = { - "type": type, - "fq_name": fq_name - } + payload = {"type": type, "fq_name": fq_name} + try: endpoint = self.controller_url + "fqname-to-id" - resp = self.http.post_cmd(url=endpoint, - headers=self.http_header, - post_fields_dict=payload) + resp = self.http.post_cmd( + url=endpoint, headers=self.http_header, post_fields_dict=payload + ) + return json.loads(resp).get("uuid") except NotFound: return None @@ -129,6 +138,7 @@ class UnderlayApi: def get_by_fq_name(self, type, fq_name): # Obtain uuid by fqdn and then get data by uuid uuid = self.get_uuid_from_fqname(type, fq_name) + if uuid: return self.get_by_uuid(type, uuid) else: @@ -140,10 +150,13 @@ class UnderlayApi: "uuid": uuid, "ref-type": ref_type, "ref-fq-name": ref_fq_name, - "operation": "DELETE" + "operation": "DELETE", } endpoint = self.controller_url + "ref-update" - resp = self.http.post_cmd(url=endpoint, headers=self.http_header, post_fields_dict=payload) + resp = self.http.post_cmd( + url=endpoint, headers=self.http_header, post_fields_dict=payload + ) + return resp # Aux methods to avoid code duplication of name conventions @@ -157,92 +170,93 @@ class UnderlayApi: def create_virtual_network(self, name, vni): self.logger.debug("create vname, name: {}, vni: {}".format(name, vni)) - routetarget = '{}:{}'.format(self.asn, vni) + routetarget = "{}:{}".format(self.asn, vni) vnet_dict = { "virtual-network": { "virtual_network_properties": { "vxlan_network_identifier": vni, }, "parent_type": "project", - "fq_name": [ - self.domain, - self.project, - name - ], - "route_target_list": { - "route_target": [ - "target:" + routetarget - ] - } + "fq_name": [self.domain, self.project, name], + "route_target_list": {"route_target": ["target:" + routetarget]}, } } - endpoint = self.controller_url + 'virtual-networks' - resp = self.http.post_cmd(url=endpoint, - headers=self.http_header, - post_fields_dict=vnet_dict) + endpoint = self.controller_url + "virtual-networks" + resp = self.http.post_cmd( + url=endpoint, headers=self.http_header, post_fields_dict=vnet_dict + ) + if not resp: - raise SdnConnectorError('Error creating virtual network: empty response') + raise SdnConnectorError("Error creating virtual network: empty response") + vnet_info = json.loads(resp) self.logger.debug("created vnet, vnet_info: {}".format(vnet_info)) - return vnet_info.get("virtual-network").get('uuid'), vnet_info.get("virtual-network") + + return vnet_info.get("virtual-network").get("uuid"), vnet_info.get( + "virtual-network" + ) def get_virtual_networks(self): - return self.get_all_by_type('virtual-networks') + return self.get_all_by_type("virtual-networks") def get_virtual_network(self, network_id): - return self.get_by_uuid('virtual-network', network_id) + return self.get_by_uuid("virtual-network", network_id) def delete_virtual_network(self, network_id): self.logger.debug("delete vnet uuid: {}".format(network_id)) - self.delete_by_uuid(self.controller_url, 'virtual-network', network_id) + self.delete_by_uuid(self.controller_url, "virtual-network", network_id) self.logger.debug("deleted vnet uuid: {}".format(network_id)) # Vpg operations def create_vpg(self, switch_id, switch_port): - self.logger.debug("create vpg, switch_id: {}, switch_port: {}".format(switch_id, switch_port)) + self.logger.debug( + "create vpg, switch_id: {}, switch_port: {}".format(switch_id, switch_port) + ) vpg_name = self.get_vpg_name(switch_id, switch_port) vpg_dict = { "virtual-port-group": { "parent_type": "fabric", - "fq_name": [ - "default-global-system-config", - self.fabric, - vpg_name - ] + "fq_name": ["default-global-system-config", self.fabric, vpg_name], } } - endpoint = self.controller_url + 'virtual-port-groups' - resp = self.http.post_cmd(url=endpoint, - headers=self.http_header, - post_fields_dict=vpg_dict) + endpoint = self.controller_url + "virtual-port-groups" + resp = self.http.post_cmd( + url=endpoint, headers=self.http_header, post_fields_dict=vpg_dict + ) + if not resp: - raise SdnConnectorError('Error creating virtual port group: empty response') + raise SdnConnectorError("Error creating virtual port group: empty response") + vpg_info = json.loads(resp) self.logger.debug("created vpg, vpg_info: {}".format(vpg_info)) - return vpg_info.get("virtual-port-group").get('uuid'), vpg_info.get("virtual-port-group") + + return vpg_info.get("virtual-port-group").get("uuid"), vpg_info.get( + "virtual-port-group" + ) def get_vpgs(self): - return self.get_all_by_type(self.controller_url, 'virtual-port-groups') + return self.get_all_by_type(self.controller_url, "virtual-port-groups") def get_vpg(self, vpg_id): return self.get_by_uuid(self.controller_url, "virtual-port-group", vpg_id) def get_vpg_by_name(self, vpg_name): - fq_name = ["default-global-system-config", - self.fabric, - vpg_name - ] + fq_name = ["default-global-system-config", self.fabric, vpg_name] + return self.get_by_fq_name("virtual-port-group", fq_name) def delete_vpg(self, vpg_id): self.logger.debug("delete vpg, uuid: {}".format(vpg_id)) - self.delete_by_uuid(self.controller_url, 'virtual-port-group', vpg_id) + self.delete_by_uuid(self.controller_url, "virtual-port-group", vpg_id) self.logger.debug("deleted vpg, uuid: {}".format(vpg_id)) def create_vmi(self, switch_id, switch_port, network, vlan): - self.logger.debug("create vmi, switch_id: {}, switch_port: {}, network: {}, vlan: {}".format( - switch_id, switch_port, network, vlan)) + self.logger.debug( + "create vmi, switch_id: {}, switch_port: {}, network: {}, vlan: {}".format( + switch_id, switch_port, network, vlan + ) + ) vmi_name = self.get_vmi_name(switch_id, switch_port, vlan) vpg_name = self.get_vpg_name(switch_id, switch_port) profile_dict = { @@ -251,71 +265,61 @@ class UnderlayApi: "port_id": switch_port.replace(":", "_"), "switch_id": switch_port.replace(":", "_"), "switch_info": switch_id, - "fabric": self.fabric + "fabric": self.fabric, } ] - } vmi_dict = { "virtual-machine-interface": { "parent_type": "project", - "fq_name": [ - self.domain, - self.project, - vmi_name - ], - "virtual_network_refs": [ - { - "to": [ - self.domain, - self.project, - network - ] - } - ], + "fq_name": [self.domain, self.project, vmi_name], + "virtual_network_refs": [{"to": [self.domain, self.project, network]}], "virtual_machine_interface_properties": { "sub_interface_vlan_tag": vlan }, "virtual_machine_interface_bindings": { "key_value_pair": [ - { - "key": "vnic_type", - "value": "baremetal" - }, - { - "key": "vif_type", - "value": "vrouter" - }, - { - "key": "vpg", - "value": vpg_name - }, - { - "key": "profile", - "value": json.dumps(profile_dict) - } + {"key": "vnic_type", "value": "baremetal"}, + {"key": "vif_type", "value": "vrouter"}, + {"key": "vpg", "value": vpg_name}, + {"key": "profile", "value": json.dumps(profile_dict)}, ] - } + }, } } - endpoint = self.controller_url + 'virtual-machine-interfaces' + endpoint = self.controller_url + "virtual-machine-interfaces" self.logger.debug("vmi_dict: {}".format(vmi_dict)) - resp = self.http.post_cmd(url=endpoint, - headers=self.http_header, - post_fields_dict=vmi_dict) + resp = self.http.post_cmd( + url=endpoint, + headers=self.http_header, + post_fields_dict=vmi_dict, + ) + if not resp: - raise SdnConnectorError('Error creating vmi: empty response') + raise SdnConnectorError("Error creating vmi: empty response") + vmi_info = json.loads(resp) self.logger.debug("created vmi, info: {}".format(vmi_info)) - return vmi_info.get("virtual-machine-interface").get('uuid'), vmi_info.get("virtual-machine-interface") + + return vmi_info.get("virtual-machine-interface").get("uuid"), vmi_info.get( + "virtual-machine-interface" + ) def get_vmi(self, vmi_uuid): - return self.get_by_uuid(self.controller_url, 'virtual-machine-interface', vmi_uuid) + return self.get_by_uuid( + self.controller_url, "virtual-machine-interface", vmi_uuid + ) def delete_vmi(self, uuid): self.logger.debug("delete vmi uuid: {}".format(uuid)) - self.delete_by_uuid(self.controller_url, 'virtual-machine-interface', uuid) + self.delete_by_uuid(self.controller_url, "virtual-machine-interface", uuid) self.logger.debug("deleted vmi: {}".format(uuid)) def unref_vmi_vpg(self, vpg_id, vmi_id, vmi_fq_name): - self.delete_ref("virtual-port-group", vpg_id, "virtual-machine-interface", vmi_id, vmi_fq_name) + self.delete_ref( + "virtual-port-group", + vpg_id, + "virtual-machine-interface", + vmi_id, + vmi_fq_name, + ) diff --git a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_assist_juniper_contrail.py b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_assist_juniper_contrail.py index a1cc3ded..29b187b8 100644 --- a/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_assist_juniper_contrail.py +++ b/RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_assist_juniper_contrail.py @@ -22,6 +22,7 @@ import yaml import random from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError + # from osm_rosdn_juniper_contrail.rest_lib import ContrailHttp # from osm_rosdn_juniper_contrail.rest_lib import NotFound from osm_rosdn_juniper_contrail.rest_lib import DuplicateFound @@ -39,6 +40,7 @@ class JuniperContrail(SdnConnectorBase): tutorial_with_rest.html - https://github.com/tonyliu0592/contrail-toolbox/blob/master/sriov/sriov """ + _WIM_LOGGER = "ro.sdn.junipercontrail" def __init__(self, wim, wim_account, config=None, logger=None): @@ -65,7 +67,9 @@ class JuniperContrail(SdnConnectorBase): :param logger (logging.Logger): optional logger object. If none is passed 'ro.sdn.sdnconn' is used. """ self.logger = logger or logging.getLogger(self._WIM_LOGGER) - self.logger.debug('wim: {}, wim_account: {}, config: {}'.format(wim, wim_account, config)) + self.logger.debug( + "wim: {}, wim_account: {}, config: {}".format(wim, wim_account, config) + ) super().__init__(wim, wim_account, config, logger) self.user = wim_account.get("user") @@ -79,6 +83,7 @@ class JuniperContrail(SdnConnectorBase): self.fabric = None overlay_url = None self.vni_range = None + if config: auth_url = config.get("auth_url") self.project = config.get("project") @@ -90,41 +95,56 @@ class JuniperContrail(SdnConnectorBase): if not url: raise SdnConnectorError("'url' must be provided") + if not url.startswith("http"): url = "http://" + url + if not url.endswith("/"): url = url + "/" + self.url = url if not self.vni_range: - self.vni_range = ['1000001-2000000'] + self.vni_range = ["1000001-2000000"] self.logger.info("No vni_range was provided. Using ['1000001-2000000']") + self.used_vni = set() if auth_url: if not auth_url.startswith("http"): auth_url = "http://" + auth_url + if not auth_url.endswith("/"): auth_url = auth_url + "/" + self.auth_url = auth_url if overlay_url: if not overlay_url.startswith("http"): overlay_url = "http://" + overlay_url + if not overlay_url.endswith("/"): overlay_url = overlay_url + "/" + self.overlay_url = overlay_url if not self.project: raise SdnConnectorError("'project' must be provided") + if not self.asn: # TODO: Get ASN from controller config; otherwise raise ERROR for the moment - raise SdnConnectorError("'asn' was not provided and it was not possible to obtain it") + raise SdnConnectorError( + "'asn' was not provided and it was not possible to obtain it" + ) + if not self.fabric: # TODO: Get FABRIC from controller config; otherwise raise ERROR for the moment - raise SdnConnectorError("'fabric' was not provided and was not possible to obtain it") + raise SdnConnectorError( + "'fabric' was not provided and was not possible to obtain it" + ) + if not self.domain: - self.domain = 'default-domain' + self.domain = "default-domain" self.logger.info("No domain was provided. Using 'default-domain'") underlay_api_config = { @@ -132,16 +152,22 @@ class JuniperContrail(SdnConnectorBase): "project": self.project, "domain": self.domain, "asn": self.asn, - "fabric": self.fabric + "fabric": self.fabric, } - self.underlay_api = UnderlayApi(url, underlay_api_config, user=self.user, password=self.password, logger=logger) + self.underlay_api = UnderlayApi( + url, + underlay_api_config, + user=self.user, + password=self.password, + logger=logger, + ) self._max_duplicate_retry = 2 self.logger.info("Juniper Contrail Connector Initialized.") def _generate_vni(self): """ - Method to get unused VxLAN Network Identifier (VNI) + Method to get unused VxLAN Network Identifier (VNI) Args: None Returns: @@ -151,15 +177,21 @@ class JuniperContrail(SdnConnectorBase): for vlanID_range in self.vni_range: try: start_vni, end_vni = map(int, vlanID_range.replace(" ", "").split("-")) + for i in range(start_vni, end_vni + 1): vni = random.randrange(start_vni, end_vni, 1) + if vni not in self.used_vni: return vni except Exception as exp: - raise SdnConnectorError("Exception {} occurred while searching a free VNI.".format(exp)) + raise SdnConnectorError( + "Exception {} occurred while searching a free VNI.".format(exp) + ) else: - raise SdnConnectorError("Unable to create the virtual network." - " All VNI in VNI range {} are in use.".format(self.vni_range)) + raise SdnConnectorError( + "Unable to create the virtual network." + " All VNI in VNI range {} are in use.".format(self.vni_range) + ) # Aux functions for testing def get_url(self): @@ -174,12 +206,16 @@ class JuniperContrail(SdnConnectorBase): 2 - It the virtual port group does not exist, create it 3 - Create virtual machine interface for the indicated network and vlan """ - self.logger.debug("create_port: switch_id: {}, switch_port: {}, network: {}, vlan: {}".format( - switch_id, switch_port, network, vlan)) + self.logger.debug( + "create_port: switch_id: {}, switch_port: {}, network: {}, vlan: {}".format( + switch_id, switch_port, network, vlan + ) + ) # 1 - Check if the vpg exists vpg_name = self.underlay_api.get_vpg_name(switch_id, switch_port) vpg = self.underlay_api.get_vpg_by_name(vpg_name) + if not vpg: # 2 - If it does not exist create it vpg_id, _ = self.underlay_api.create_vpg(switch_id, switch_port) @@ -194,7 +230,11 @@ class JuniperContrail(SdnConnectorBase): return vpg_id, vmi_id def _delete_port(self, switch_id, switch_port, vlan): - self.logger.debug("delete port, switch_id: {}, switch_port: {}, vlan: {}".format(switch_id, switch_port, vlan)) + self.logger.debug( + "delete port, switch_id: {}, switch_port: {}, vlan: {}".format( + switch_id, switch_port, vlan + ) + ) vpg_name = self.underlay_api.get_vpg_name(switch_id, switch_port) vmi_name = self.underlay_api.get_vmi_name(switch_id, switch_port, vlan) @@ -202,22 +242,30 @@ class JuniperContrail(SdnConnectorBase): # 1 - Obtain vpg by id (if not vpg_id must have been error creating ig, nothing to be done) vpg_fqdn = ["default-global-system-config", self.fabric, vpg_name] vpg = self.underlay_api.get_by_fq_name("virtual-port-group", vpg_fqdn) + if not vpg: self.logger.warning("vpg: {} to be deleted not found".format(vpg_name)) else: # 2 - Get vmi interfaces from vpg vmi_list = vpg.get("virtual_machine_interface_refs") + if not vmi_list: # must have been an error during port creation when vmi is created # may happen if there has been an error during creation - self.logger.warning("vpg: {} has not vmi, will delete nothing".format(vpg)) + self.logger.warning( + "vpg: {} has not vmi, will delete nothing".format(vpg) + ) else: num_vmis = len(vmi_list) + for vmi in vmi_list: fqdn = vmi.get("to") # check by name + if fqdn[2] == vmi_name: - self.underlay_api.unref_vmi_vpg(vpg.get("uuid"), vmi.get("uuid"), fqdn) + self.underlay_api.unref_vmi_vpg( + vpg.get("uuid"), vmi.get("uuid"), fqdn + ) self.underlay_api.delete_vmi(vmi.get("uuid")) num_vmis = num_vmis - 1 @@ -234,13 +282,15 @@ class JuniperContrail(SdnConnectorBase): external URLs, etc are detected. """ self.logger.debug("") + try: resp = self.underlay_api.check_auth() if not resp: - raise SdnConnectorError('Empty response') + raise SdnConnectorError("Empty response") except Exception as e: - self.logger.error('Error checking credentials') - raise SdnConnectorError('Error checking credentials: {}'.format(str(e))) + self.logger.error("Error checking credentials") + + raise SdnConnectorError("Error checking credentials: {}".format(str(e))) def get_connectivity_service_status(self, service_uuid, conn_info=None): """Monitor the status of the connectivity service established @@ -280,28 +330,36 @@ class JuniperContrail(SdnConnectorBase): new information available for the connectivity service. """ self.logger.debug("") + try: resp = self.underlay_api.get_virtual_network(service_uuid) if not resp: - raise SdnConnectorError('Empty response') + raise SdnConnectorError("Empty response") + if resp: vnet_info = resp # Check if conn_info reports error if conn_info.get("sdn_status") == "ERROR": - return {'sdn_status': 'ERROR', 'sdn_info': conn_info} + return {"sdn_status": "ERROR", "sdn_info": conn_info} else: - return {'sdn_status': 'ACTIVE', 'sdn_info': vnet_info} + return {"sdn_status": "ACTIVE", "sdn_info": vnet_info} else: - return {'sdn_status': 'ERROR', 'sdn_info': 'not found'} + return {"sdn_status": "ERROR", "sdn_info": "not found"} except SdnConnectorError: raise except HttpException as e: self.logger.error("Error getting connectivity service: {}".format(e)) - raise SdnConnectorError("Exception deleting connectivity service: {}".format(str(e))) + + raise SdnConnectorError( + "Exception deleting connectivity service: {}".format(str(e)) + ) except Exception as e: - self.logger.error('Exception getting connectivity service info: %s', e, exc_info=True) - return {'sdn_status': 'ERROR', 'error_msg': str(e)} + self.logger.error( + "Exception getting connectivity service info: %s", e, exc_info=True + ) + + return {"sdn_status": "ERROR", "error_msg": str(e)} def create_connectivity_service(self, service_type, connection_points, **kwargs): """ @@ -357,10 +415,16 @@ class JuniperContrail(SdnConnectorBase): # name = 'osm-plugin-' + overlay_name # Else: # name = 'osm-plugin-' + VNI - self.logger.info("create_connectivity_service, service_type: {}, connection_points: {}". - format(service_type, connection_points)) - if service_type.lower() != 'elan': - raise SdnConnectorError('Only ELAN network type is supported by Juniper Contrail.') + self.logger.info( + "create_connectivity_service, service_type: {}, connection_points: {}".format( + service_type, connection_points + ) + ) + + if service_type.lower() != "elan": + raise SdnConnectorError( + "Only ELAN network type is supported by Juniper Contrail." + ) try: # Initialize data @@ -370,26 +434,37 @@ class JuniperContrail(SdnConnectorBase): # This data will be returned even if no cp can be created if something is created work_cps = {} for cp in connection_points: - switch_id = cp.get("service_endpoint_encapsulation_info").get("switch_dpid") - switch_port = cp.get("service_endpoint_encapsulation_info").get("switch_port") + switch_id = cp.get("service_endpoint_encapsulation_info").get( + "switch_dpid" + ) + switch_port = cp.get("service_endpoint_encapsulation_info").get( + "switch_port" + ) service_endpoint_id = cp.get("service_endpoint_id") cp_name = self.underlay_api.get_vpg_name(switch_id, switch_port) add_cp = work_cps.get(cp_name) + if not add_cp: # check cp has vlan vlan = cp.get("service_endpoint_encapsulation_info").get("vlan") + if vlan: # add cp to dict service_endpoint_ids = [] service_endpoint_ids.append(service_endpoint_id) - add_cp = {"service_endpoint_ids": service_endpoint_ids, - "switch_dpid": switch_id, - "switch_port": switch_port, - "vlan": vlan} + add_cp = { + "service_endpoint_ids": service_endpoint_ids, + "switch_dpid": switch_id, + "switch_port": switch_port, + "vlan": vlan, + } work_cps[cp_name] = add_cp else: - self.logger.warning("cp service_endpoint_id : {} has no vlan, ignore".format( - service_endpoint_id)) + self.logger.warning( + "cp service_endpoint_id : {} has no vlan, ignore".format( + service_endpoint_id + ) + ) else: # add service_endpoint_id to list service_endpoint_ids = add_cp["service_endpoint_ids"] @@ -403,26 +478,32 @@ class JuniperContrail(SdnConnectorBase): retry = 0 while retry < self._max_duplicate_retry: try: - vnet_name = 'osm-plugin-' + str(vni) - vnet_id, _ = self.underlay_api.create_virtual_network(vnet_name, vni) + vnet_name = "osm-plugin-" + str(vni) + vnet_id, _ = self.underlay_api.create_virtual_network( + vnet_name, vni + ) self.used_vni.add(vni) break except DuplicateFound as e: - self.logger.debug("Duplicate error for vnet_name: {}".format(vnet_name)) + self.logger.debug( + "Duplicate error for vnet_name: {}".format(vnet_name) + ) self.used_vni.add(vni) retry += 1 + if retry >= self._max_duplicate_retry: raise e else: # Try to obtain a new vni vni = self._generate_vni() continue + conn_info = { "vnet": { "uuid": vnet_id, - "name": vnet_name + "name": vnet_name, }, - "connection_points": work_cps # dict with port_name as key + "connection_points": work_cps, # dict with port_name as key } # 4 - Create a port for each endpoint @@ -430,23 +511,33 @@ class JuniperContrail(SdnConnectorBase): switch_id = cp.get("switch_dpid") switch_port = cp.get("switch_port") vlan = cp.get("vlan") - vpg_id, vmi_id = self._create_port(switch_id, switch_port, vnet_name, vlan) + vpg_id, vmi_id = self._create_port( + switch_id, switch_port, vnet_name, vlan + ) cp["vpg_id"] = vpg_id cp["vmi_id"] = vmi_id - self.logger.info("created connectivity service, uuid: {}, name: {}".format(vnet_id, vnet_name)) - return vnet_id, conn_info + self.logger.info( + "created connectivity service, uuid: {}, name: {}".format( + vnet_id, vnet_name + ) + ) + return vnet_id, conn_info except Exception as e: # Log error if isinstance(e, SdnConnectorError) or isinstance(e, HttpException): self.logger.error("Error creating connectivity service: {}".format(e)) else: - self.logger.error("Error creating connectivity service: {}".format(e), exc_info=True) + self.logger.error( + "Error creating connectivity service: {}".format(e), exc_info=True + ) # If nothing is created raise error else return what has been created and mask as error if not conn_info: - raise SdnConnectorError("Exception create connectivity service: {}".format(str(e))) + raise SdnConnectorError( + "Exception create connectivity service: {}".format(str(e)) + ) else: conn_info["sdn_status"] = "ERROR" conn_info["sdn_info"] = repr(e) @@ -454,6 +545,7 @@ class JuniperContrail(SdnConnectorBase): for cp in work_cps.values(): if not cp.get("vmi_id") or not cp.get("vpg_id"): cp["sdn_status"] = "ERROR" + return vnet_id, conn_info def delete_connectivity_service(self, service_uuid, conn_info=None): @@ -466,34 +558,54 @@ class JuniperContrail(SdnConnectorBase): :return: None :raises: SdnConnectorException: In case of error. The parameter http_code must be filled """ - self.logger.info("delete_connectivity_service vnet_name: {}, connection_points: {}". - format(service_uuid, conn_info)) + self.logger.info( + "delete_connectivity_service vnet_name: {}, connection_points: {}".format( + service_uuid, conn_info + ) + ) try: vnet_uuid = service_uuid - # vnet_name = conn_info["vnet"]["name"] # always should exist as the network is the first thing created + # vnet_name = conn_info["vnet"]["name"] + # always should exist as the network is the first thing created work_cps = conn_info["connection_points"] # 1: For each connection point delete vlan from vpg and it is is the # last one, delete vpg for cp in work_cps.values(): - self._delete_port(cp.get("switch_dpid"), cp.get("switch_port"), cp.get("vlan")) + self._delete_port( + cp.get("switch_dpid"), cp.get("switch_port"), cp.get("vlan") + ) # 2: Delete vnet self.underlay_api.delete_virtual_network(vnet_uuid) - self.logger.info("deleted connectivity_service vnet_uuid: {}, connection_points: {}". - format(service_uuid, conn_info)) + self.logger.info( + "deleted connectivity_service vnet_uuid: {}, connection_points: {}".format( + service_uuid, conn_info + ) + ) except SdnConnectorError: raise except HttpException as e: self.logger.error("Error deleting connectivity service: {}".format(e)) - raise SdnConnectorError("Exception deleting connectivity service: {}".format(str(e))) + + raise SdnConnectorError( + "Exception deleting connectivity service: {}".format(str(e)) + ) except Exception as e: - self.logger.error("Error deleting connectivity service: {}".format(e), exc_info=True) - raise SdnConnectorError("Exception deleting connectivity service: {}".format(str(e))) + self.logger.error( + "Error deleting connectivity service: {}".format(e), + exc_info=True, + ) - def edit_connectivity_service(self, service_uuid, conn_info=None, connection_points=None, **kwargs): - """ Change an existing connectivity service. + raise SdnConnectorError( + "Exception deleting connectivity service: {}".format(str(e)) + ) + + def edit_connectivity_service( + self, service_uuid, conn_info=None, connection_points=None, **kwargs + ): + """Change an existing connectivity service. This method's arguments and return value follow the same convention as :meth:`~.create_connectivity_service`. @@ -517,8 +629,10 @@ class JuniperContrail(SdnConnectorBase): # 2 - Obtain network: Check vnet exists and obtain name # 3 - Delete unnecesary ports # 4 - Add new ports - self.logger.info("edit connectivity service, service_uuid: {}, conn_info: {}, " - "connection points: {} ".format(service_uuid, conn_info, connection_points)) + self.logger.info( + "edit connectivity service, service_uuid: {}, conn_info: {}, " + "connection points: {} ".format(service_uuid, conn_info, connection_points) + ) # conn_info should always exist and have connection_points and vnet elements old_cp = conn_info.get("connection_points", {}) @@ -534,7 +648,9 @@ class JuniperContrail(SdnConnectorBase): switch_port = cp.get("switch_port") old_vlan = cp.get("vlan") self._delete_port(switch_id, switch_port, old_vlan) - deleted_ports.append(self.underlay_api.get_vpg_name(switch_id, switch_port)) + deleted_ports.append( + self.underlay_api.get_vpg_name(switch_id, switch_port) + ) for port in deleted_ports: del old_cp[port] @@ -543,41 +659,63 @@ class JuniperContrail(SdnConnectorBase): if conn_info.get("vnet", {}).get("sdn_status"): del conn_info["vnet"]["sdn_status"] except HttpException as e: - self.logger.error("Error trying to delete old ports marked as error: {}".format(e)) + self.logger.error( + "Error trying to delete old ports marked as error: {}".format(e) + ) + raise SdnConnectorError(e) except SdnConnectorError as e: - self.logger.error("Error trying to delete old ports marked as error: {}".format(e)) + self.logger.error( + "Error trying to delete old ports marked as error: {}".format(e) + ) + raise except Exception as e: - self.logger.error("Error trying to delete old ports marked as error: {}".format(e), exc_info=True) - raise SdnConnectorError("Error trying to delete old ports marked as error: {}".format(e)) + self.logger.error( + "Error trying to delete old ports marked as error: {}".format(e), + exc_info=True, + ) - if connection_points: + raise SdnConnectorError( + "Error trying to delete old ports marked as error: {}".format(e) + ) + if connection_points: # Check and obtain what should be added and deleted, if there is an error here raise an exception try: work_cps = {} for cp in connection_points: - switch_id = cp.get("service_endpoint_encapsulation_info").get("switch_dpid") - switch_port = cp.get("service_endpoint_encapsulation_info").get("switch_port") + switch_id = cp.get("service_endpoint_encapsulation_info").get( + "switch_dpid" + ) + switch_port = cp.get("service_endpoint_encapsulation_info").get( + "switch_port" + ) service_endpoint_id = cp.get("service_endpoint_id") cp_name = self.underlay_api.get_vpg_name(switch_id, switch_port) add_cp = work_cps.get(cp_name) + if not add_cp: # add cp to dict # check cp has vlan vlan = cp.get("service_endpoint_encapsulation_info").get("vlan") + if vlan: service_endpoint_ids = [] service_endpoint_ids.append(service_endpoint_id) - add_cp = {"service_endpoint_ids": service_endpoint_ids, - "switch_dpid": switch_id, - "switch_port": switch_port, - "vlan": vlan} + add_cp = { + "service_endpoint_ids": service_endpoint_ids, + "switch_dpid": switch_id, + "switch_port": switch_port, + "vlan": vlan, + } work_cps[cp_name] = add_cp else: - self.logger.warning("cp service_endpoint_id : {} has no vlan, ignore". - format(service_endpoint_id)) + self.logger.warning( + "cp service_endpoint_id : {} has no vlan, ignore".format( + service_endpoint_id + ) + ) else: # add service_endpoint_id to list service_endpoint_ids = add_cp["service_endpoint_ids"] @@ -595,13 +733,19 @@ class JuniperContrail(SdnConnectorBase): if vnet: vnet_name = vnet["name"] else: - raise SdnConnectorError("vnet uuid: {} not found".format(service_uuid)) - + raise SdnConnectorError( + "vnet uuid: {} not found".format(service_uuid) + ) except SdnConnectorError: raise except Exception as e: - self.logger.error("Error edit connectivity service: {}".format(e), exc_info=True) - raise SdnConnectorError("Exception edit connectivity service: {}".format(str(e))) + self.logger.error( + "Error edit connectivity service: {}".format(e), exc_info=True + ) + + raise SdnConnectorError( + "Exception edit connectivity service: {}".format(str(e)) + ) # Delete unneeded ports and add new ones: if there is an error return conn_info try: @@ -616,7 +760,11 @@ class JuniperContrail(SdnConnectorBase): cp = conn_info_cp[port_name] switch_id = cp.get("switch_dpid") switch_port = cp.get("switch_port") - self.logger.debug("delete port switch_id={}, switch_port={}".format(switch_id, switch_port)) + self.logger.debug( + "delete port switch_id={}, switch_port={}".format( + switch_id, switch_port + ) + ) self._delete_port(switch_id, switch_port, vlan) deleted_ports.append(port_name) @@ -630,14 +778,23 @@ class JuniperContrail(SdnConnectorBase): switch_id = cp.get("switch_dpid") switch_port = cp.get("switch_port") vlan = cp.get("vlan") - self.logger.debug("add port switch_id={}, switch_port={}".format(switch_id, switch_port)) - vpg_id, vmi_id = self._create_port(switch_id, switch_port, vnet_name, vlan) + self.logger.debug( + "add port switch_id={}, switch_port={}".format( + switch_id, switch_port + ) + ) + vpg_id, vmi_id = self._create_port( + switch_id, switch_port, vnet_name, vlan + ) cp_added = cp.copy() cp_added["vpg_id"] = vpg_id cp_added["vmi_id"] = vmi_id conn_info_cp[port_name] = cp_added + # replace endpoints in case they have changed - conn_info_cp[port_name]["service_endpoint_ids"] = cp["service_endpoint_ids"] + conn_info_cp[port_name]["service_endpoint_ids"] = cp[ + "service_endpoint_ids" + ] conn_info["connection_points"] = conn_info_cp return conn_info @@ -645,7 +802,9 @@ class JuniperContrail(SdnConnectorBase): except Exception as e: # Log error if isinstance(e, SdnConnectorError) or isinstance(e, HttpException): - self.logger.error("Error edit connectivity service: {}".format(e), exc_info=True) + self.logger.error( + "Error edit connectivity service: {}".format(e), exc_info=True + ) else: self.logger.error("Error edit connectivity service: {}".format(e)) @@ -657,60 +816,71 @@ class JuniperContrail(SdnConnectorBase): for port_name, cp in work_cps.items(): curr_cp = conn_info_cp.get(port_name) + if not curr_cp: cp_error = work_cps.get(port_name).copy() cp_error["sdn_status"] = "ERROR" conn_info_cp[port_name] = cp_error - conn_info_cp[port_name]["service_endpoint_ids"] = cp["service_endpoint_ids"] + + conn_info_cp[port_name]["service_endpoint_ids"] = cp[ + "service_endpoint_ids" + ] conn_info["sdn_status"] = "ERROR" conn_info["sdn_info"] = repr(e) conn_info["connection_points"] = conn_info_cp - return conn_info + return conn_info else: # Connection points have not changed, so do nothing self.logger.info("no new connection_points provided, nothing to be done") + return -if __name__ == '__main__': +if __name__ == "__main__": # Init logger log_format = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(funcName)s(): %(message)s" - log_formatter = logging.Formatter(log_format, datefmt='%Y-%m-%dT%H:%M:%S') + log_formatter = logging.Formatter(log_format, datefmt="%Y-%m-%dT%H:%M:%S") handler = logging.StreamHandler() handler.setFormatter(log_formatter) - logger = logging.getLogger('ro.sdn.junipercontrail') + logger = logging.getLogger("ro.sdn.junipercontrail") # logger.setLevel(level=logging.ERROR) # logger.setLevel(level=logging.INFO) logger.setLevel(level=logging.DEBUG) logger.addHandler(handler) # Read config - with open('test.yaml') as f: + with open("test.yaml") as f: config = yaml.safe_load(f.read()) - wim = {'wim_url': config.pop('wim_url')} - wim_account = {'user': config.pop('user'), 'password': config.pop('password')} - logger.info('wim: {}, wim_account: {}, config: {}'.format(wim, wim_account, config)) + + wim = {"wim_url": config.pop("wim_url")} + wim_account = {"user": config.pop("user"), "password": config.pop("password")} + logger.info("wim: {}, wim_account: {}, config: {}".format(wim, wim_account, config)) # Init controller - juniper_contrail = JuniperContrail(wim=wim, wim_account=wim_account, config=config, logger=logger) + juniper_contrail = JuniperContrail( + wim=wim, wim_account=wim_account, config=config, logger=logger + ) # Tests # Generate VNI for i in range(5): vni = juniper_contrail._generate_vni() juniper_contrail.used_vni.add(vni) + print(juniper_contrail.used_vni) # juniper_contrail.used_vni.remove(1000003) print(juniper_contrail.used_vni) + for i in range(2): vni = juniper_contrail._generate_vni() juniper_contrail.used_vni.add(vni) + print(juniper_contrail.used_vni) # 0. Check credentials - print('0. Check credentials') + print("0. Check credentials") # juniper_contrail.check_credentials() # 1 - Create and delete connectivity service @@ -720,8 +890,8 @@ if __name__ == '__main__': "service_endpoint_encapsulation_info": { "switch_dpid": "LEAF-1", "switch_port": "xe-0/0/17", - "vlan": "501" - } + "vlan": "501", + }, } conn_point_1 = { "service_endpoint_id": "0000:81:10.3", @@ -729,8 +899,8 @@ if __name__ == '__main__': "service_endpoint_encapsulation_info": { "switch_dpid": "LEAF-2", "switch_port": "xe-0/0/16", - "vlan": "501" - } + "vlan": "501", + }, } conn_point_2 = { "service_endpoint_id": "0000:08:11.7", @@ -738,8 +908,8 @@ if __name__ == '__main__': "service_endpoint_encapsulation_info": { "switch_dpid": "LEAF-2", "switch_port": "xe-0/0/16", - "vlan": "502" - } + "vlan": "502", + }, } conn_point_3 = { "service_endpoint_id": "0000:83:10.4", @@ -747,15 +917,17 @@ if __name__ == '__main__': "service_endpoint_encapsulation_info": { "switch_dpid": "LEAF-1", "switch_port": "xe-0/0/17", - "vlan": "502" - } + "vlan": "502", + }, } # 1 - Define connection points logger.debug("create first connection service") print("Create connectivity service") connection_points = [conn_point_0, conn_point_1] - service_id, conn_info = juniper_contrail.create_connectivity_service("ELAN", connection_points) + service_id, conn_info = juniper_contrail.create_connectivity_service( + "ELAN", connection_points + ) logger.info("Created connectivity service 1") logger.info(service_id) logger.info(yaml.safe_dump(conn_info, indent=4, default_flow_style=False)) @@ -763,7 +935,9 @@ if __name__ == '__main__': logger.debug("create second connection service") print("Create connectivity service") connection_points = [conn_point_2, conn_point_3] - service_id2, conn_info2 = juniper_contrail.create_connectivity_service("ELAN", connection_points) + service_id2, conn_info2 = juniper_contrail.create_connectivity_service( + "ELAN", connection_points + ) logger.info("Created connectivity service 2") logger.info(service_id2) logger.info(yaml.safe_dump(conn_info2, indent=4, default_flow_style=False)) diff --git a/RO-SDN-juniper_contrail/setup.py b/RO-SDN-juniper_contrail/setup.py index fef59051..4f158e56 100644 --- a/RO-SDN-juniper_contrail/setup.py +++ b/RO-SDN-juniper_contrail/setup.py @@ -30,27 +30,31 @@ osm-ro plugin for Juniper Contrail SDN setup( name=_name, - description='OSM RO SDN plugin for Juniper Contrail', + description="OSM RO SDN plugin for Juniper Contrail", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='OSM_TECH@list.etsi.org', - maintainer='ETSI OSM', - maintainer_email='OSM_TECH@list.etsi.org', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="OSM_TECH@list.etsi.org", + maintainer="ETSI OSM", + maintainer_email="OSM_TECH@list.etsi.org", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, - #dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"], + # dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"], install_requires=[ "requests", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rosdn.plugins': ['rosdn_juniper_contrail = osm_rosdn_juniper_contrail.sdn_assist_juniper_contrail:JuniperContrail'], + "osm_rosdn.plugins": [ + "rosdn_juniper_contrail = osm_rosdn_juniper_contrail.sdn_assist_juniper_contrail:JuniperContrail" + ], }, ) diff --git a/RO-SDN-juniper_contrail/tox.ini b/RO-SDN-juniper_contrail/tox.ini index 4ecb427d..32a16102 100644 --- a/RO-SDN-juniper_contrail/tox.ini +++ b/RO-SDN-juniper_contrail/tox.ini @@ -27,7 +27,7 @@ commands=python3 -m unittest discover -v basepython = python3 deps = flake8 commands = flake8 osm_rosdn_juniper_contrail --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-SDN-odl_openflow/osm_rosdn_odlof/odl_of.py b/RO-SDN-odl_openflow/osm_rosdn_odlof/odl_of.py index 7cf7ee26..644507f9 100644 --- a/RO-SDN-odl_openflow/osm_rosdn_odlof/odl_of.py +++ b/RO-SDN-odl_openflow/osm_rosdn_odlof/odl_of.py @@ -32,7 +32,12 @@ import json import requests import base64 import logging -from osm_ro_plugin.openflow_conn import OpenflowConn, OpenflowConnConnectionException, OpenflowConnUnexpectedResponse +from osm_ro_plugin.openflow_conn import ( + OpenflowConn, + OpenflowConnConnectionException, + OpenflowConnUnexpectedResponse, +) + # OpenflowConnException, OpenflowConnAuthException, OpenflowConnNotFoundException, # OpenflowConnConflictException, OpenflowConnNotSupportedException, OpenflowConnNotImplemented @@ -44,45 +49,54 @@ class OfConnOdl(OpenflowConn): """OpenDayLight connector. No MAC learning is used""" def __init__(self, params): - """ Constructor. - Params: dictionary with the following keys: - of_dpid: DPID to use for this controller - of_url: must be [http://HOST:PORT/] - of_user: user credentials, can be missing or None - of_password: password credentials - of_debug: debug level for logging. Default to ERROR - other keys are ignored - Raise an exception if same parameter is missing or wrong + """Constructor. + Params: dictionary with the following keys: + of_dpid: DPID to use for this controller + of_url: must be [http://HOST:PORT/] + of_user: user credentials, can be missing or None + of_password: password credentials + of_debug: debug level for logging. Default to ERROR + other keys are ignored + Raise an exception if same parameter is missing or wrong """ - OpenflowConn.__init__(self, params) # check params url = params.get("of_url") + if not url: raise ValueError("'url' must be provided") + if not url.startswith("http"): url = "http://" + url + if not url.endswith("/"): url = url + "/" + self.url = url # internal variables self.name = "OpenDayLight" - self.headers = {'content-type': 'application/json', 'Accept': 'application/json'} + self.headers = { + "content-type": "application/json", + "Accept": "application/json", + } self.auth = None self.pp2ofi = {} # From Physical Port to OpenFlow Index self.ofi2pp = {} # From OpenFlow Index to Physical Port self.dpid = str(params["of_dpid"]) - self.id = 'openflow:'+str(int(self.dpid.replace(':', ''), 16)) + self.id = "openflow:" + str(int(self.dpid.replace(":", ""), 16)) + if params and params.get("of_user"): of_password = params.get("of_password", "") - self.auth = base64.b64encode(bytes(params["of_user"] + ":" + of_password, "utf-8")) + self.auth = base64.b64encode( + bytes(params["of_user"] + ":" + of_password, "utf-8") + ) self.auth = self.auth.decode() - self.headers['authorization'] = 'Basic ' + self.auth + self.headers["authorization"] = "Basic " + self.auth - self.logger = logging.getLogger('ro.sdn.onosof') + self.logger = logging.getLogger("ro.sdn.onosof") # self.logger.setLevel(getattr(logging, params.get("of_debug", "ERROR"))) self.logger.debug("odlof plugin initialized") @@ -93,66 +107,110 @@ class OfConnOdl(OpenflowConn): Raise an OpenflowConnConnectionException exception if fails with text_error """ try: - of_response = requests.get(self.url + "restconf/operational/opendaylight-inventory:nodes", - headers=self.headers) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + of_response = requests.get( + self.url + "restconf/operational/opendaylight-inventory:nodes", + headers=self.headers, + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + if of_response.status_code != 200: self.logger.warning("get_of_switches " + error_text) - raise OpenflowConnUnexpectedResponse("Error get_of_switches " + error_text) + + raise OpenflowConnUnexpectedResponse( + "Error get_of_switches " + error_text + ) self.logger.debug("get_of_switches " + error_text) info = of_response.json() if not isinstance(info, dict): - self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info)) - raise OpenflowConnUnexpectedResponse("Unexpected response, not a dict. Wrong version?") + self.logger.error( + "get_of_switches. Unexpected response, not a dict: %s", + str(info), + ) - nodes = info.get('nodes') + raise OpenflowConnUnexpectedResponse( + "Unexpected response, not a dict. Wrong version?" + ) + + nodes = info.get("nodes") if type(nodes) is not dict: - self.logger.error("get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s", - str(type(info))) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes', not found or not a dict." - " Wrong version?") + self.logger.error( + "get_of_switches. Unexpected response at 'nodes', not found or not a dict: %s", + str(type(info)), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'nodes', not found or not a dict." + " Wrong version?" + ) - node_list = nodes.get('node') + node_list = nodes.get("node") if type(node_list) is not list: - self.logger.error("get_of_switches. Unexpected response, at 'nodes':'node', " - "not found or not a list: %s", str(type(node_list))) - raise OpenflowConnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found " - "or not a list. Wrong version?") + self.logger.error( + "get_of_switches. Unexpected response, at 'nodes':'node', " + "not found or not a list: %s", + str(type(node_list)), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response, at 'nodes':'node', not found " + "or not a list. Wrong version?" + ) switch_list = [] for node in node_list: - node_id = node.get('id') + node_id = node.get("id") if node_id is None: - self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s", - str(node)) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', not found. " - "Wrong version?") + self.logger.error( + "get_of_switches. Unexpected response at 'nodes':'node'[]:'id', not found: %s", + str(node), + ) - if node_id == 'controller-config': + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'nodes':'node'[]:'id', not found. " + "Wrong version?" + ) + + if node_id == "controller-config": continue - node_ip_address = node.get('flow-node-inventory:ip-address') + node_ip_address = node.get("flow-node-inventory:ip-address") if node_ip_address is None: - self.logger.error("get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:" - "ip-address', not found: %s", str(node)) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:" - "'flow-node-inventory:ip-address', not found. Wrong version?") - - node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16) - switch_list.append((':'.join(a+b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])), - node_ip_address)) - return switch_list + self.logger.error( + "get_of_switches. Unexpected response at 'nodes':'node'[]:'flow-node-inventory:" + "ip-address', not found: %s", + str(node), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'nodes':'node'[]:" + "'flow-node-inventory:ip-address', not found. Wrong version?" + ) + + node_id_hex = hex(int(node_id.split(":")[1])).split("x")[1].zfill(16) + switch_list.append( + ( + ":".join( + a + b for a, b in zip(node_id_hex[::2], node_id_hex[1::2]) + ), + node_ip_address, + ) + ) + return switch_list except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_switches " + error_text) + raise OpenflowConnConnectionException(error_text) except ValueError as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_switches " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) def obtain_port_correspondence(self): @@ -162,42 +220,73 @@ class OfConnOdl(OpenflowConn): Raise a OpenflowConnConnectionException expection in case of failure """ try: - of_response = requests.get(self.url + "restconf/operational/opendaylight-inventory:nodes", - headers=self.headers) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + of_response = requests.get( + self.url + "restconf/operational/opendaylight-inventory:nodes", + headers=self.headers, + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + if of_response.status_code != 200: self.logger.warning("obtain_port_correspondence " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("obtain_port_correspondence " + error_text) info = of_response.json() if not isinstance(info, dict): - self.logger.error("obtain_port_correspondence. Unexpected response not a dict: %s", str(info)) - raise OpenflowConnUnexpectedResponse("Unexpected openflow response, not a dict. Wrong version?") + self.logger.error( + "obtain_port_correspondence. Unexpected response not a dict: %s", + str(info), + ) - nodes = info.get('nodes') - if not isinstance(nodes, dict): - self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes', " - "not found or not a dict: %s", str(type(nodes))) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes',not found or not a dict. " - "Wrong version?") + raise OpenflowConnUnexpectedResponse( + "Unexpected openflow response, not a dict. Wrong version?" + ) - node_list = nodes.get('node') + nodes = info.get("nodes") + if not isinstance(nodes, dict): + self.logger.error( + "obtain_port_correspondence. Unexpected response at 'nodes', " + "not found or not a dict: %s", + str(type(nodes)), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'nodes',not found or not a dict. " + "Wrong version?" + ) + + node_list = nodes.get("node") if not isinstance(node_list, list): - self.logger.error("obtain_port_correspondence. Unexpected response, at 'nodes':'node', " - "not found or not a list: %s", str(type(node_list))) - raise OpenflowConnUnexpectedResponse("Unexpected response, at 'nodes':'node', not found or not a list." - " Wrong version?") + self.logger.error( + "obtain_port_correspondence. Unexpected response, at 'nodes':'node', " + "not found or not a list: %s", + str(type(node_list)), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response, at 'nodes':'node', not found or not a list." + " Wrong version?" + ) for node in node_list: - node_id = node.get('id') + node_id = node.get("id") if node_id is None: - self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'id', " - "not found: %s", str(node)) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'id', not found. " - "Wrong version?") - - if node_id == 'controller-config': + self.logger.error( + "obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:'id', " + "not found: %s", + str(node), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'nodes':'node'[]:'id', not found. " + "Wrong version?" + ) + + if node_id == "controller-config": continue # Figure out if this is the appropriate switch. The 'id' is 'openflow:' plus the decimal value @@ -206,23 +295,39 @@ class OfConnOdl(OpenflowConn): if self.id != node_id: continue - node_connector_list = node.get('node-connector') + node_connector_list = node.get("node-connector") if not isinstance(node_connector_list, list): - self.logger.error("obtain_port_correspondence. Unexpected response at " - "'nodes':'node'[]:'node-connector', not found or not a list: %s", str(node)) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:'node-connector', " - "not found or not a list. Wrong version?") + self.logger.error( + "obtain_port_correspondence. Unexpected response at " + "'nodes':'node'[]:'node-connector', not found or not a list: %s", + str(node), + ) - for node_connector in node_connector_list: - self.pp2ofi[str(node_connector['flow-node-inventory:name'])] = str(node_connector['id']) - self.ofi2pp[node_connector['id']] = str(node_connector['flow-node-inventory:name']) + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'nodes':'node'[]:'node-connector', " + "not found or not a list. Wrong version?" + ) - node_ip_address = node.get('flow-node-inventory:ip-address') + for node_connector in node_connector_list: + self.pp2ofi[str(node_connector["flow-node-inventory:name"])] = str( + node_connector["id"] + ) + self.ofi2pp[node_connector["id"]] = str( + node_connector["flow-node-inventory:name"] + ) + + node_ip_address = node.get("flow-node-inventory:ip-address") if node_ip_address is None: - self.logger.error("obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:" - "'flow-node-inventory:ip-address', not found: %s", str(node)) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'nodes':'node'[]:" - "'flow-node-inventory:ip-address', not found. Wrong version?") + self.logger.error( + "obtain_port_correspondence. Unexpected response at 'nodes':'node'[]:" + "'flow-node-inventory:ip-address', not found: %s", + str(node), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'nodes':'node'[]:" + "'flow-node-inventory:ip-address', not found. Wrong version?" + ) # If we found the appropriate dpid no need to continue in the for loop break @@ -232,11 +337,13 @@ class OfConnOdl(OpenflowConn): except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("obtain_port_correspondence " + error_text) + raise OpenflowConnConnectionException(error_text) except ValueError as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("obtain_port_correspondence " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) def get_of_rules(self, translate_of_ports=True): @@ -256,22 +363,28 @@ class OfConnOdl(OpenflowConn): Raise a OpenflowConnConnectionException exception in case of failure """ - try: # get rules if len(self.ofi2pp) == 0: self.obtain_port_correspondence() - of_response = requests.get(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id + - "/table/0", headers=self.headers) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + of_response = requests.get( + self.url + + "restconf/config/opendaylight-inventory:nodes/node/" + + self.id + + "/table/0", + headers=self.headers, + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) # The configured page does not exist if there are no rules installed. In that case we return an empty dict if of_response.status_code == 404: return [] - elif of_response.status_code != 200: self.logger.warning("get_of_rules " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) self.logger.debug("get_of_rules " + error_text) @@ -279,116 +392,172 @@ class OfConnOdl(OpenflowConn): info = of_response.json() if not isinstance(info, dict): - self.logger.error("get_of_rules. Unexpected response not a dict: %s", str(info)) - raise OpenflowConnUnexpectedResponse("Unexpected openflow response, not a dict. Wrong version?") + self.logger.error( + "get_of_rules. Unexpected response not a dict: %s", str(info) + ) - table = info.get('flow-node-inventory:table') - if not isinstance(table, list): - self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table', " - "not a list: %s", str(type(table))) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table', not a list. " - "Wrong version?") + raise OpenflowConnUnexpectedResponse( + "Unexpected openflow response, not a dict. Wrong version?" + ) - flow_list = table[0].get('flow') + table = info.get("flow-node-inventory:table") + if not isinstance(table, list): + self.logger.error( + "get_of_rules. Unexpected response at 'flow-node-inventory:table', " + "not a list: %s", + str(type(table)), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'flow-node-inventory:table', not a list. " + "Wrong version?" + ) + + flow_list = table[0].get("flow") if flow_list is None: return [] if not isinstance(flow_list, list): - self.logger.error("get_of_rules. Unexpected response at 'flow-node-inventory:table'[0]:'flow', not a " - "list: %s", str(type(flow_list))) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'flow-node-inventory:table'[0]:'flow', " - "not a list. Wrong version?") + self.logger.error( + "get_of_rules. Unexpected response at 'flow-node-inventory:table'[0]:'flow', not a " + "list: %s", + str(type(flow_list)), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'flow-node-inventory:table'[0]:'flow', " + "not a list. Wrong version?" + ) # TODO translate ports according to translate_of_ports parameter rules = [] # Response list for flow in flow_list: - if not ('id' in flow and 'match' in flow and 'instructions' in flow and - 'instruction' in flow['instructions'] and - 'apply-actions' in flow['instructions']['instruction'][0] and - 'action' in flow['instructions']['instruction'][0]['apply-actions']): - raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more elements are " - "missing. Wrong version?") - - flow['instructions']['instruction'][0]['apply-actions']['action'] + if not ( + "id" in flow + and "match" in flow + and "instructions" in flow + and "instruction" in flow["instructions"] + and "apply-actions" in flow["instructions"]["instruction"][0] + and "action" + in flow["instructions"]["instruction"][0]["apply-actions"] + ): + raise OpenflowConnUnexpectedResponse( + "unexpected openflow response, one or more elements are " + "missing. Wrong version?" + ) + + flow["instructions"]["instruction"][0]["apply-actions"]["action"] rule = dict() - rule['switch'] = self.dpid - rule['priority'] = flow.get('priority') + rule["switch"] = self.dpid + rule["priority"] = flow.get("priority") # rule['name'] = flow['id'] # rule['cookie'] = flow['cookie'] - if 'in-port' in flow['match']: - in_port = flow['match']['in-port'] + if "in-port" in flow["match"]: + in_port = flow["match"]["in-port"] if in_port not in self.ofi2pp: - raise OpenflowConnUnexpectedResponse("Error: Ingress port {} is not in switch port list". - format(in_port)) + raise OpenflowConnUnexpectedResponse( + "Error: Ingress port {} is not in switch port list".format( + in_port + ) + ) if translate_of_ports: in_port = self.ofi2pp[in_port] - rule['ingress_port'] = in_port - - if 'vlan-match' in flow['match'] and 'vlan-id' in flow['match']['vlan-match'] and \ - 'vlan-id' in flow['match']['vlan-match']['vlan-id'] and \ - 'vlan-id-present' in flow['match']['vlan-match']['vlan-id'] and \ - flow['match']['vlan-match']['vlan-id']['vlan-id-present'] is True: - rule['vlan_id'] = flow['match']['vlan-match']['vlan-id']['vlan-id'] - - if 'ethernet-match' in flow['match'] and 'ethernet-destination' in flow['match']['ethernet-match'] \ - and 'address' in flow['match']['ethernet-match']['ethernet-destination']: - rule['dst_mac'] = flow['match']['ethernet-match']['ethernet-destination']['address'] - - instructions = flow['instructions']['instruction'][0]['apply-actions']['action'] + rule["ingress_port"] = in_port + + if ( + "vlan-match" in flow["match"] + and "vlan-id" in flow["match"]["vlan-match"] + and "vlan-id" in flow["match"]["vlan-match"]["vlan-id"] + and "vlan-id-present" in flow["match"]["vlan-match"]["vlan-id"] + and flow["match"]["vlan-match"]["vlan-id"]["vlan-id-present"] + is True + ): + rule["vlan_id"] = flow["match"]["vlan-match"]["vlan-id"][ + "vlan-id" + ] + + if ( + "ethernet-match" in flow["match"] + and "ethernet-destination" in flow["match"]["ethernet-match"] + and "address" + in flow["match"]["ethernet-match"]["ethernet-destination"] + ): + rule["dst_mac"] = flow["match"]["ethernet-match"][ + "ethernet-destination" + ]["address"] + + instructions = flow["instructions"]["instruction"][0]["apply-actions"][ + "action" + ] max_index = 0 for instruction in instructions: - if instruction['order'] > max_index: - max_index = instruction['order'] + if instruction["order"] > max_index: + max_index = instruction["order"] - actions = [None]*(max_index+1) + actions = [None] * (max_index + 1) for instruction in instructions: - if 'output-action' in instruction: - if 'output-node-connector' not in instruction['output-action']: - raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more elementa " - "are missing. Wrong version?") + if "output-action" in instruction: + if "output-node-connector" not in instruction["output-action"]: + raise OpenflowConnUnexpectedResponse( + "unexpected openflow response, one or more elementa " + "are missing. Wrong version?" + ) + + out_port = instruction["output-action"]["output-node-connector"] - out_port = instruction['output-action']['output-node-connector'] if out_port not in self.ofi2pp: - raise OpenflowConnUnexpectedResponse("Error: Output port {} is not in switch port list". - format(out_port)) + raise OpenflowConnUnexpectedResponse( + "Error: Output port {} is not in switch port list".format( + out_port + ) + ) if translate_of_ports: out_port = self.ofi2pp[out_port] - actions[instruction['order']] = ('out', out_port) - - elif 'strip-vlan-action' in instruction: - actions[instruction['order']] = ('vlan', None) - - elif 'set-field' in instruction: - if not ('vlan-match' in instruction['set-field'] and - 'vlan-id' in instruction['set-field']['vlan-match'] and - 'vlan-id' in instruction['set-field']['vlan-match']['vlan-id']): - raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more elements " - "are missing. Wrong version?") - - actions[instruction['order']] = ('vlan', - instruction['set-field']['vlan-match']['vlan-id']['vlan-id']) + actions[instruction["order"]] = ("out", out_port) + elif "strip-vlan-action" in instruction: + actions[instruction["order"]] = ("vlan", None) + elif "set-field" in instruction: + if not ( + "vlan-match" in instruction["set-field"] + and "vlan-id" in instruction["set-field"]["vlan-match"] + and "vlan-id" + in instruction["set-field"]["vlan-match"]["vlan-id"] + ): + raise OpenflowConnUnexpectedResponse( + "unexpected openflow response, one or more elements " + "are missing. Wrong version?" + ) + + actions[instruction["order"]] = ( + "vlan", + instruction["set-field"]["vlan-match"]["vlan-id"][ + "vlan-id" + ], + ) actions = [x for x in actions if x is not None] - rule['actions'] = list(actions) + rule["actions"] = list(actions) rules.append(rule) return rules except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_rules " + error_text) + raise OpenflowConnConnectionException(error_text) except ValueError as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_rules " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) def del_flow(self, flow_name): @@ -397,20 +566,32 @@ class OfConnOdl(OpenflowConn): :param flow_name: flow_name, this is the rule name :return: Raise a OpenflowConnConnectionException expection in case of failure """ - try: - of_response = requests.delete(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id + - "/table/0/flow/" + flow_name, headers=self.headers) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + of_response = requests.delete( + self.url + + "restconf/config/opendaylight-inventory:nodes/node/" + + self.id + + "/table/0/flow/" + + flow_name, + headers=self.headers, + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + if of_response.status_code != 200: self.logger.warning("del_flow " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("del_flow OK " + error_text) + return None except requests.exceptions.RequestException as e: # raise an exception in case of contection error error_text = type(e).__name__ + ": " + str(e) self.logger.error("del_flow " + error_text) + raise OpenflowConnConnectionException(error_text) def new_flow(self, data): @@ -427,92 +608,119 @@ class OfConnOdl(OpenflowConn): ('out', port): send to this port :return: Raise a OpenflowConnConnectionException exception in case of failure """ - try: self.logger.debug("new_flow data: {}".format(data)) + if len(self.pp2ofi) == 0: self.obtain_port_correspondence() # We have to build the data for the opendaylight call from the generic data flow = { - 'id': data['name'], - 'flow-name': data['name'], - 'idle-timeout': 0, - 'hard-timeout': 0, - 'table_id': 0, - 'priority': data.get('priority'), - 'match': {} + "id": data["name"], + "flow-name": data["name"], + "idle-timeout": 0, + "hard-timeout": 0, + "table_id": 0, + "priority": data.get("priority"), + "match": {}, } - sdata = {'flow-node-inventory:flow': [flow]} - if not data['ingress_port'] in self.pp2ofi: - error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch' + sdata = {"flow-node-inventory:flow": [flow]} + + if not data["ingress_port"] in self.pp2ofi: + error_text = ( + "Error. Port " + + data["ingress_port"] + + " is not present in the switch" + ) self.logger.warning("new_flow " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) - flow['match']['in-port'] = self.pp2ofi[data['ingress_port']] - if data.get('dst_mac'): - flow['match']['ethernet-match'] = { - 'ethernet-destination': {'address': data['dst_mac']} + + flow["match"]["in-port"] = self.pp2ofi[data["ingress_port"]] + + if data.get("dst_mac"): + flow["match"]["ethernet-match"] = { + "ethernet-destination": {"address": data["dst_mac"]} } - if data.get('vlan_id'): - flow['match']['vlan-match'] = { - 'vlan-id': { - 'vlan-id-present': True, - 'vlan-id': int(data['vlan_id']) + + if data.get("vlan_id"): + flow["match"]["vlan-match"] = { + "vlan-id": { + "vlan-id-present": True, + "vlan-id": int(data["vlan_id"]), } } + actions = [] - flow['instructions'] = { - 'instruction': [{ - 'order': 1, - 'apply-actions': {'action': actions} - }] + flow["instructions"] = { + "instruction": [{"order": 1, "apply-actions": {"action": actions}}] } order = 0 - for action in data['actions']: - new_action = {'order': order} + for action in data["actions"]: + new_action = {"order": order} if action[0] == "vlan": if action[1] is None: # strip vlan - new_action['strip-vlan-action'] = {} + new_action["strip-vlan-action"] = {} else: - new_action['set-field'] = { - 'vlan-match': { - 'vlan-id': { - 'vlan-id-present': True, - 'vlan-id': int(action[1]) + new_action["set-field"] = { + "vlan-match": { + "vlan-id": { + "vlan-id-present": True, + "vlan-id": int(action[1]), } } } - elif action[0] == 'out': - new_action['output-action'] = {} + elif action[0] == "out": + new_action["output-action"] = {} + if not action[1] in self.pp2ofi: - error_msg = 'Port ' + action[1] + ' is not present in the switch' + error_msg = ( + "Port " + action[1] + " is not present in the switch" + ) + raise OpenflowConnUnexpectedResponse(error_msg) - new_action['output-action']['output-node-connector'] = self.pp2ofi[action[1]] + new_action["output-action"]["output-node-connector"] = self.pp2ofi[ + action[1] + ] else: error_msg = "Unknown item '{}' in action list".format(action[0]) self.logger.error("new_flow " + error_msg) + raise OpenflowConnUnexpectedResponse(error_msg) actions.append(new_action) order += 1 # print json.dumps(sdata) - of_response = requests.put(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id + - "/table/0/flow/" + data['name'], headers=self.headers, data=json.dumps(sdata)) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + of_response = requests.put( + self.url + + "restconf/config/opendaylight-inventory:nodes/node/" + + self.id + + "/table/0/flow/" + + data["name"], + headers=self.headers, + data=json.dumps(sdata), + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + if of_response.status_code != 200: self.logger.warning("new_flow " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("new_flow OK " + error_text) - return None + return None except requests.exceptions.RequestException as e: # raise an exception in case of contection error error_text = type(e).__name__ + ": " + str(e) self.logger.error("new_flow " + error_text) + raise OpenflowConnConnectionException(error_text) def clear_all_flows(self): @@ -521,15 +729,27 @@ class OfConnOdl(OpenflowConn): :return: Raise a OpenflowConnConnectionException expection in case of failure """ try: - of_response = requests.delete(self.url + "restconf/config/opendaylight-inventory:nodes/node/" + self.id + - "/table/0", headers=self.headers) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) - if of_response.status_code != 200 and of_response.status_code != 404: # HTTP_Not_Found + of_response = requests.delete( + self.url + + "restconf/config/opendaylight-inventory:nodes/node/" + + self.id + + "/table/0", + headers=self.headers, + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + + if of_response.status_code != 200 and of_response.status_code != 404: self.logger.warning("clear_all_flows " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("clear_all_flows OK " + error_text) + return None except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("clear_all_flows " + error_text) + raise OpenflowConnConnectionException(error_text) diff --git a/RO-SDN-odl_openflow/osm_rosdn_odlof/sdnconn_odlof.py b/RO-SDN-odl_openflow/osm_rosdn_odlof/sdnconn_odlof.py index 0b139218..ce53b0df 100644 --- a/RO-SDN-odl_openflow/osm_rosdn_odlof/sdnconn_odlof.py +++ b/RO-SDN-odl_openflow/osm_rosdn_odlof/sdnconn_odlof.py @@ -25,11 +25,9 @@ from .odl_of import OfConnOdl class SdnConnectorOdlOf(SdnConnectorOpenFlow): - def __init__(self, wim, wim_account, config=None, logger=None): - """Creates a connectivity based on pro-active openflow rules - """ - self.logger = logging.getLogger('ro.sdn.odlof') + """Creates a connectivity based on pro-active openflow rules""" + self.logger = logging.getLogger("ro.sdn.odlof") super().__init__(wim, wim_account, config, logger) of_params = { "of_url": wim["wim_url"], diff --git a/RO-SDN-odl_openflow/setup.py b/RO-SDN-odl_openflow/setup.py index 32ebde10..4d6580a6 100644 --- a/RO-SDN-odl_openflow/setup.py +++ b/RO-SDN-odl_openflow/setup.py @@ -30,26 +30,30 @@ osm-ro plugin for OpenDayLight SDN using pre-computed openflow rules setup( name=_name, - description='OSM RO plugin for SDN with odl openflow rules', + description="OSM RO plugin for SDN with odl openflow rules", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='alfonso.tiernosepulveda@telefonica.com', - maintainer='Alfonso Tierno', - maintainer_email='alfonso.tiernosepulveda@telefonica.com', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="alfonso.tiernosepulveda@telefonica.com", + maintainer="Alfonso Tierno", + maintainer_email="alfonso.tiernosepulveda@telefonica.com", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ "requests", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rosdn.plugins': ['rosdn_odlof = osm_rosdn_odlof.sdnconn_odlof:SdnConnectorOdlOf'], + "osm_rosdn.plugins": [ + "rosdn_odlof = osm_rosdn_odlof.sdnconn_odlof:SdnConnectorOdlOf" + ], }, ) diff --git a/RO-SDN-odl_openflow/tox.ini b/RO-SDN-odl_openflow/tox.ini index 68ba259d..47743ef7 100644 --- a/RO-SDN-odl_openflow/tox.ini +++ b/RO-SDN-odl_openflow/tox.ini @@ -29,7 +29,7 @@ deps = flake8 -r{toxinidir}/requirements.txt install_command = python3 -m pip install -U {opts} {packages} commands = flake8 osm_rosdn_odlof --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py b/RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py index a29b4082..cba505cb 100644 --- a/RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py +++ b/RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py @@ -32,7 +32,12 @@ import json import requests import base64 import logging -from osm_ro_plugin.openflow_conn import OpenflowConn, OpenflowConnConnectionException, OpenflowConnUnexpectedResponse +from osm_ro_plugin.openflow_conn import ( + OpenflowConn, + OpenflowConnConnectionException, + OpenflowConnUnexpectedResponse, +) + # OpenflowConnException, OpenflowConnAuthException, OpenflowConnNotFoundException, \ # OpenflowConnConflictException, OpenflowConnNotSupportedException, OpenflowConnNotImplemented @@ -44,49 +49,58 @@ class OfConnOnos(OpenflowConn): """ ONOS connector. No MAC learning is used """ + def __init__(self, params): - """ Constructor. - :param params: dictionary with the following keys: - of_dpid: DPID to use for this controller ?? Does a controller have a dpid? - of_url: must be [http://HOST:PORT/] - of_user: user credentials, can be missing or None - of_password: password credentials - of_debug: debug level for logging. Default to ERROR - other keys are ignored - Raise an exception if same parameter is missing or wrong + """Constructor. + :param params: dictionary with the following keys: + of_dpid: DPID to use for this controller ?? Does a controller have a dpid? + of_url: must be [http://HOST:PORT/] + of_user: user credentials, can be missing or None + of_password: password credentials + of_debug: debug level for logging. Default to ERROR + other keys are ignored + Raise an exception if same parameter is missing or wrong """ - OpenflowConn.__init__(self, params) # check params url = params.get("of_url") + if not url: raise ValueError("'url' must be provided") + if not url.startswith("http"): url = "http://" + url + if not url.endswith("/"): url = url + "/" + self.url = url + "onos/v1/" # internal variables self.name = "onosof" - self.headers = {'content-type': 'application/json', 'accept': 'application/json'} + self.headers = { + "content-type": "application/json", + "accept": "application/json", + } self.auth = "None" self.pp2ofi = {} # From Physical Port to OpenFlow Index self.ofi2pp = {} # From OpenFlow Index to Physical Port self.dpid = str(params["of_dpid"]) - self.id = 'of:'+str(self.dpid.replace(':', '')) + self.id = "of:" + str(self.dpid.replace(":", "")) # TODO This may not be straightforward if params.get("of_user"): of_password = params.get("of_password", "") - self.auth = base64.b64encode(bytes(params["of_user"] + ":" + of_password, "utf-8")) + self.auth = base64.b64encode( + bytes(params["of_user"] + ":" + of_password, "utf-8") + ) self.auth = self.auth.decode() - self.headers['authorization'] = 'Basic ' + self.auth + self.headers["authorization"] = "Basic " + self.auth - self.logger = logging.getLogger('ro.sdn.onosof') + self.logger = logging.getLogger("ro.sdn.onosof") # self.logger.setLevel( getattr(logging, params.get("of_debug", "ERROR")) ) self.logger.debug("onosof plugin initialized") self.ip_address = None @@ -98,60 +112,89 @@ class OfConnOnos(OpenflowConn): Raise a openflowconnUnexpectedResponse expection in case of failure """ try: - self.headers['content-type'] = 'text/plain' + self.headers["content-type"] = "text/plain" of_response = requests.get(self.url + "devices", headers=self.headers) - error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + error_text = "Openflow response %d: %s" % ( + of_response.status_code, + of_response.text, + ) + if of_response.status_code != 200: self.logger.warning("get_of_switches " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) self.logger.debug("get_of_switches " + error_text) info = of_response.json() if type(info) != dict: - self.logger.error("get_of_switches. Unexpected response, not a dict: %s", str(info)) - raise OpenflowConnUnexpectedResponse("Unexpected response, not a dict. Wrong version?") + self.logger.error( + "get_of_switches. Unexpected response, not a dict: %s", str(info) + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response, not a dict. Wrong version?" + ) - node_list = info.get('devices') + node_list = info.get("devices") if type(node_list) is not list: self.logger.error( "get_of_switches. Unexpected response, at 'devices', not found or not a list: %s", - str(type(node_list))) - raise OpenflowConnUnexpectedResponse("Unexpected response, at 'devices', not found " - "or not a list. Wrong version?") + str(type(node_list)), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response, at 'devices', not found " + "or not a list. Wrong version?" + ) switch_list = [] for node in node_list: - node_id = node.get('id') + node_id = node.get("id") if node_id is None: - self.logger.error("get_of_switches. Unexpected response at 'device':'id', not found: %s", - str(node)) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'device':'id', " - "not found . Wrong version?") + self.logger.error( + "get_of_switches. Unexpected response at 'device':'id', not found: %s", + str(node), + ) - node_ip_address = node.get('annotations').get('managementAddress') + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'device':'id', " + "not found . Wrong version?" + ) + + node_ip_address = node.get("annotations").get("managementAddress") if node_ip_address is None: self.logger.error( "get_of_switches. Unexpected response at 'device':'managementAddress', not found: %s", - str(node)) - raise OpenflowConnUnexpectedResponse( - "Unexpected response at 'device':'managementAddress', not found. Wrong version?") + str(node), + ) - node_id_hex = hex(int(node_id.split(':')[1])).split('x')[1].zfill(16) + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'device':'managementAddress', not found. Wrong version?" + ) + node_id_hex = hex(int(node_id.split(":")[1])).split("x")[1].zfill(16) switch_list.append( - (':'.join(a + b for a, b in zip(node_id_hex[::2], node_id_hex[1::2])), node_ip_address)) - return switch_list + ( + ":".join( + a + b for a, b in zip(node_id_hex[::2], node_id_hex[1::2]) + ), + node_ip_address, + ) + ) + return switch_list except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_switches " + error_text) + raise OpenflowConnConnectionException(error_text) except ValueError as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_switches " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) def obtain_port_correspondence(self): @@ -161,36 +204,55 @@ class OfConnOnos(OpenflowConn): Raise a openflowconnUnexpectedResponse expection in case of failure """ try: - self.headers['content-type'] = 'text/plain' - of_response = requests.get(self.url + "devices/" + self.id + "/ports", headers=self.headers) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + self.headers["content-type"] = "text/plain" + of_response = requests.get( + self.url + "devices/" + self.id + "/ports", headers=self.headers + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) + if of_response.status_code != 200: self.logger.warning("obtain_port_correspondence " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) self.logger.debug("obtain_port_correspondence " + error_text) info = of_response.json() - node_connector_list = info.get('ports') + node_connector_list = info.get("ports") if type(node_connector_list) is not list: self.logger.error( "obtain_port_correspondence. Unexpected response at 'ports', not found or not a list: %s", - str(node_connector_list)) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'ports', not found or not " - "a list. Wrong version?") + str(node_connector_list), + ) - for node_connector in node_connector_list: - if node_connector['port'] != "local": - self.pp2ofi[str(node_connector['annotations']['portName'])] = str(node_connector['port']) - self.ofi2pp[str(node_connector['port'])] = str(node_connector['annotations']['portName']) + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'ports', not found or not " + "a list. Wrong version?" + ) - node_ip_address = info['annotations']['managementAddress'] + for node_connector in node_connector_list: + if node_connector["port"] != "local": + self.pp2ofi[str(node_connector["annotations"]["portName"])] = str( + node_connector["port"] + ) + self.ofi2pp[str(node_connector["port"])] = str( + node_connector["annotations"]["portName"] + ) + + node_ip_address = info["annotations"]["managementAddress"] if node_ip_address is None: self.logger.error( "obtain_port_correspondence. Unexpected response at 'managementAddress', not found: %s", - str(self.id)) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'managementAddress', " - "not found. Wrong version?") + str(self.id), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'managementAddress', " + "not found. Wrong version?" + ) + self.ip_address = node_ip_address # print self.name, ": obtain_port_correspondence ports:", self.pp2ofi @@ -198,11 +260,13 @@ class OfConnOnos(OpenflowConn): except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("obtain_port_correspondence " + error_text) + raise OpenflowConnConnectionException(error_text) except ValueError as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("obtain_port_correspondence " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) def get_of_rules(self, translate_of_ports=True): @@ -221,106 +285,138 @@ class OfConnOnos(OpenflowConn): switch: DPID, all Raise a openflowconnUnexpectedResponse exception in case of failure """ - try: - if len(self.ofi2pp) == 0: self.obtain_port_correspondence() # get rules - self.headers['content-type'] = 'text/plain' - of_response = requests.get(self.url + "flows/" + self.id, headers=self.headers) - error_text = "Openflow response %d: %s" % (of_response.status_code, of_response.text) + self.headers["content-type"] = "text/plain" + of_response = requests.get( + self.url + "flows/" + self.id, headers=self.headers + ) + error_text = "Openflow response %d: %s" % ( + of_response.status_code, + of_response.text, + ) # The configured page does not exist if there are no rules installed. In that case we return an empty dict if of_response.status_code == 404: return [] - elif of_response.status_code != 200: self.logger.warning("get_of_rules " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) + self.logger.debug("get_of_rules " + error_text) info = of_response.json() if type(info) != dict: - self.logger.error("get_of_rules. Unexpected response, not a dict: %s", str(info)) - raise OpenflowConnUnexpectedResponse("Unexpected openflow response, not a dict. " - "Wrong version?") + self.logger.error( + "get_of_rules. Unexpected response, not a dict: %s", + str(info), + ) - flow_list = info.get('flows') + raise OpenflowConnUnexpectedResponse( + "Unexpected openflow response, not a dict. Wrong version?" + ) + + flow_list = info.get("flows") if flow_list is None: return [] + if type(flow_list) is not list: self.logger.error( "get_of_rules. Unexpected response at 'flows', not a list: %s", - str(type(flow_list))) - raise OpenflowConnUnexpectedResponse("Unexpected response at 'flows', not a list. " - "Wrong version?") + str(type(flow_list)), + ) + + raise OpenflowConnUnexpectedResponse( + "Unexpected response at 'flows', not a list. Wrong version?" + ) rules = [] # Response list for flow in flow_list: - if not ('id' in flow and 'selector' in flow and 'treatment' in flow and - 'instructions' in flow['treatment'] and 'criteria' in - flow['selector']): - raise OpenflowConnUnexpectedResponse("unexpected openflow response, one or more " - "elements are missing. Wrong version?") + if not ( + "id" in flow + and "selector" in flow + and "treatment" in flow + and "instructions" in flow["treatment"] + and "criteria" in flow["selector"] + ): + raise OpenflowConnUnexpectedResponse( + "unexpected openflow response, one or more " + "elements are missing. Wrong version?" + ) rule = dict() - rule['switch'] = self.dpid - rule['priority'] = flow.get('priority') - rule['name'] = flow['id'] + rule["switch"] = self.dpid + rule["priority"] = flow.get("priority") + rule["name"] = flow["id"] - for criteria in flow['selector']['criteria']: - if criteria['type'] == 'IN_PORT': - in_port = str(criteria['port']) + for criteria in flow["selector"]["criteria"]: + if criteria["type"] == "IN_PORT": + in_port = str(criteria["port"]) if in_port != "CONTROLLER": if in_port not in self.ofi2pp: - raise OpenflowConnUnexpectedResponse("Error: Ingress port {} is not " - "in switch port list".format(in_port)) + raise OpenflowConnUnexpectedResponse( + "Error: Ingress port {} is not " + "in switch port list".format(in_port) + ) + if translate_of_ports: in_port = self.ofi2pp[in_port] - rule['ingress_port'] = in_port - - elif criteria['type'] == 'VLAN_VID': - rule['vlan_id'] = criteria['vlanId'] - elif criteria['type'] == 'ETH_DST': - rule['dst_mac'] = str(criteria['mac']).lower() + rule["ingress_port"] = in_port + elif criteria["type"] == "VLAN_VID": + rule["vlan_id"] = criteria["vlanId"] + elif criteria["type"] == "ETH_DST": + rule["dst_mac"] = str(criteria["mac"]).lower() actions = [] - for instruction in flow['treatment']['instructions']: - if instruction['type'] == "OUTPUT": - out_port = str(instruction['port']) + for instruction in flow["treatment"]["instructions"]: + if instruction["type"] == "OUTPUT": + out_port = str(instruction["port"]) if out_port != "CONTROLLER": if out_port not in self.ofi2pp: - raise OpenflowConnUnexpectedResponse("Error: Output port {} is not in " - "switch port list".format(out_port)) + raise OpenflowConnUnexpectedResponse( + "Error: Output port {} is not in " + "switch port list".format(out_port) + ) if translate_of_ports: out_port = self.ofi2pp[out_port] - actions.append(('out', out_port)) + actions.append(("out", out_port)) - if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_POP": - actions.append(('vlan', 'None')) - if instruction['type'] == "L2MODIFICATION" and instruction['subtype'] == "VLAN_ID": - actions.append(('vlan', instruction['vlanId'])) + if ( + instruction["type"] == "L2MODIFICATION" + and instruction["subtype"] == "VLAN_POP" + ): + actions.append(("vlan", "None")) - rule['actions'] = actions + if ( + instruction["type"] == "L2MODIFICATION" + and instruction["subtype"] == "VLAN_ID" + ): + actions.append(("vlan", instruction["vlanId"])) + + rule["actions"] = actions rules.append(rule) - return rules + return rules except requests.exceptions.RequestException as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_rules " + error_text) + raise OpenflowConnConnectionException(error_text) except ValueError as e: # ValueError in the case that JSON can not be decoded error_text = type(e).__name__ + ": " + str(e) self.logger.error("get_of_rules " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) def del_flow(self, flow_name): @@ -329,23 +425,28 @@ class OfConnOnos(OpenflowConn): :param flow_name: :return: Raise a openflowconnUnexpectedResponse expection in case of failure """ - try: self.logger.debug("del_flow: delete flow name {}".format(flow_name)) - self.headers['content-type'] = None - of_response = requests.delete(self.url + "flows/" + self.id + "/" + flow_name, headers=self.headers) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + self.headers["content-type"] = None + of_response = requests.delete( + self.url + "flows/" + self.id + "/" + flow_name, headers=self.headers + ) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) if of_response.status_code != 204: self.logger.warning("del_flow " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) self.logger.debug("del_flow: {} OK,: {} ".format(flow_name, error_text)) - return None + return None except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("del_flow " + error_text) + raise OpenflowConnConnectionException(error_text) def new_flow(self, data): @@ -370,85 +471,102 @@ class OfConnOnos(OpenflowConn): # Build the dictionary with the flow rule information for ONOS flow = dict() - # flow['id'] = data['name'] - flow['tableId'] = 0 - flow['priority'] = data.get('priority') - flow['timeout'] = 0 - flow['isPermanent'] = "true" - flow['appId'] = 10 # FIXME We should create an appId for OSM - flow['selector'] = dict() - flow['selector']['criteria'] = list() + # flow["id"] = data["name"] + flow["tableId"] = 0 + flow["priority"] = data.get("priority") + flow["timeout"] = 0 + flow["isPermanent"] = "true" + flow["appId"] = 10 # FIXME We should create an appId for OSM + flow["selector"] = dict() + flow["selector"]["criteria"] = list() # Flow rule matching criteria - if not data['ingress_port'] in self.pp2ofi: - error_text = 'Error. Port ' + data['ingress_port'] + ' is not present in the switch' + if not data["ingress_port"] in self.pp2ofi: + error_text = ( + "Error. Port " + + data["ingress_port"] + + " is not present in the switch" + ) self.logger.warning("new_flow " + error_text) + raise OpenflowConnUnexpectedResponse(error_text) ingress_port_criteria = dict() - ingress_port_criteria['type'] = "IN_PORT" - ingress_port_criteria['port'] = self.pp2ofi[data['ingress_port']] - flow['selector']['criteria'].append(ingress_port_criteria) + ingress_port_criteria["type"] = "IN_PORT" + ingress_port_criteria["port"] = self.pp2ofi[data["ingress_port"]] + flow["selector"]["criteria"].append(ingress_port_criteria) - if 'dst_mac' in data: + if "dst_mac" in data: dst_mac_criteria = dict() dst_mac_criteria["type"] = "ETH_DST" - dst_mac_criteria["mac"] = data['dst_mac'] - flow['selector']['criteria'].append(dst_mac_criteria) + dst_mac_criteria["mac"] = data["dst_mac"] + flow["selector"]["criteria"].append(dst_mac_criteria) - if data.get('vlan_id'): + if data.get("vlan_id"): vlan_criteria = dict() vlan_criteria["type"] = "VLAN_VID" - vlan_criteria["vlanId"] = int(data['vlan_id']) - flow['selector']['criteria'].append(vlan_criteria) + vlan_criteria["vlanId"] = int(data["vlan_id"]) + flow["selector"]["criteria"].append(vlan_criteria) # Flow rule treatment - flow['treatment'] = dict() - flow['treatment']['instructions'] = list() - flow['treatment']['deferred'] = list() + flow["treatment"] = dict() + flow["treatment"]["instructions"] = list() + flow["treatment"]["deferred"] = list() - for action in data['actions']: + for action in data["actions"]: new_action = dict() if action[0] == "vlan": - new_action['type'] = "L2MODIFICATION" + new_action["type"] = "L2MODIFICATION" + if action[1] is None: - new_action['subtype'] = "VLAN_POP" + new_action["subtype"] = "VLAN_POP" else: - new_action['subtype'] = "VLAN_ID" - new_action['vlanId'] = int(action[1]) - elif action[0] == 'out': - new_action['type'] = "OUTPUT" + new_action["subtype"] = "VLAN_ID" + new_action["vlanId"] = int(action[1]) + elif action[0] == "out": + new_action["type"] = "OUTPUT" + if not action[1] in self.pp2ofi: - error_msj = 'Port ' + action[1] + ' is not present in the switch' + error_msj = ( + "Port " + action[1] + " is not present in the switch" + ) + raise OpenflowConnUnexpectedResponse(error_msj) - new_action['port'] = self.pp2ofi[action[1]] + + new_action["port"] = self.pp2ofi[action[1]] else: error_msj = "Unknown item '%s' in action list" % action[0] self.logger.error("new_flow " + error_msj) + raise OpenflowConnUnexpectedResponse(error_msj) - flow['treatment']['instructions'].append(new_action) + flow["treatment"]["instructions"].append(new_action) - self.headers['content-type'] = 'application/json' + self.headers["content-type"] = "application/json" path = self.url + "flows/" + self.id self.logger.debug("new_flow post: {}".format(flow)) - of_response = requests.post(path, headers=self.headers, data=json.dumps(flow)) + of_response = requests.post( + path, headers=self.headers, data=json.dumps(flow) + ) - error_text = "Openflow response {}: {}".format(of_response.status_code, of_response.text) + error_text = "Openflow response {}: {}".format( + of_response.status_code, of_response.text + ) if of_response.status_code != 201: self.logger.warning("new_flow " + error_text) - raise OpenflowConnUnexpectedResponse(error_text) - flowId = of_response.headers['location'][path.__len__() + 1:] + raise OpenflowConnUnexpectedResponse(error_text) - data['name'] = flowId + flowId = of_response.headers["location"][path.__len__() + 1 :] + data["name"] = flowId self.logger.debug("new_flow id: {},: {} ".format(flowId, error_text)) - return None + return None except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("new_flow " + error_text) + raise OpenflowConnConnectionException(error_text) def clear_all_flows(self): @@ -463,9 +581,10 @@ class OfConnOnos(OpenflowConn): self.del_flow(rule) self.logger.debug("clear_all_flows OK ") - return None + return None except requests.exceptions.RequestException as e: error_text = type(e).__name__ + ": " + str(e) self.logger.error("clear_all_flows " + error_text) + raise OpenflowConnConnectionException(error_text) diff --git a/RO-SDN-onos_openflow/osm_rosdn_onosof/sdnconn_onosof.py b/RO-SDN-onos_openflow/osm_rosdn_onosof/sdnconn_onosof.py index fa820d2b..ed70b326 100644 --- a/RO-SDN-onos_openflow/osm_rosdn_onosof/sdnconn_onosof.py +++ b/RO-SDN-onos_openflow/osm_rosdn_onosof/sdnconn_onosof.py @@ -25,11 +25,9 @@ from .onos_of import OfConnOnos class SdnConnectorOnosOf(SdnConnectorOpenFlow): - def __init__(self, wim, wim_account, config=None, logger=None): - """Creates a connectivity based on pro-active openflow rules - """ - self.logger = logging.getLogger('ro.sdn.onosof') + """Creates a connectivity based on pro-active openflow rules""" + self.logger = logging.getLogger("ro.sdn.onosof") super().__init__(wim, wim_account, config, logger) of_params = { "of_url": wim["wim_url"], @@ -39,5 +37,8 @@ class SdnConnectorOnosOf(SdnConnectorOpenFlow): } self.openflow_conn = OfConnOnos(of_params) super().__init__(wim, wim_account, config, logger, self.openflow_conn) - self.logger.debug("Init sdn plugin '{}' dpid={} user={}".format(of_params["of_url"], of_params["of_dpid"], - of_params["of_user"])) + self.logger.debug( + "Init sdn plugin '{}' dpid={} user={}".format( + of_params["of_url"], of_params["of_dpid"], of_params["of_user"] + ) + ) diff --git a/RO-SDN-onos_openflow/setup.py b/RO-SDN-onos_openflow/setup.py index 66427b2b..84d9cb7a 100644 --- a/RO-SDN-onos_openflow/setup.py +++ b/RO-SDN-onos_openflow/setup.py @@ -30,26 +30,30 @@ osm-ro plugin for onos SDN using pre-computed openflow rules setup( name=_name, - description='OSM RO plugin for SDN with onos openflow rules', + description="OSM RO plugin for SDN with onos openflow rules", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='alfonso.tiernosepulveda@telefonica.com', - maintainer='Alfonso Tierno', - maintainer_email='alfonso.tiernosepulveda@telefonica.com', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="alfonso.tiernosepulveda@telefonica.com", + maintainer="Alfonso Tierno", + maintainer_email="alfonso.tiernosepulveda@telefonica.com", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ "requests", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rosdn.plugins': ['rosdn_onosof = osm_rosdn_onosof.sdnconn_onosof:SdnConnectorOnosOf'], + "osm_rosdn.plugins": [ + "rosdn_onosof = osm_rosdn_onosof.sdnconn_onosof:SdnConnectorOnosOf" + ], }, ) diff --git a/RO-SDN-onos_openflow/tox.ini b/RO-SDN-onos_openflow/tox.ini index 61778145..0e17031b 100644 --- a/RO-SDN-onos_openflow/tox.ini +++ b/RO-SDN-onos_openflow/tox.ini @@ -29,7 +29,7 @@ deps = flake8 -r{toxinidir}/requirements.txt install_command = python3 -m pip install -U {opts} {packages} commands = flake8 osm_rosdn_onosof --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-SDN-onos_vpls/osm_rosdn_onos_vpls/sdn_assist_onos_vpls.py b/RO-SDN-onos_vpls/osm_rosdn_onos_vpls/sdn_assist_onos_vpls.py index 372de665..1c68fe77 100644 --- a/RO-SDN-onos_vpls/osm_rosdn_onos_vpls/sdn_assist_onos_vpls.py +++ b/RO-SDN-onos_vpls/osm_rosdn_onos_vpls/sdn_assist_onos_vpls.py @@ -34,6 +34,7 @@ class OnosVpls(SdnConnectorBase): """ https://wiki.onosproject.org/display/ONOS/VPLS+User+Guide """ + _WIM_LOGGER = "ro.sdn.onosvpls" def __init__(self, wim, wim_account, config=None, logger=None): @@ -42,79 +43,126 @@ class OnosVpls(SdnConnectorBase): self.user = wim_account.get("user") self.password = wim_account.get("password") url = wim.get("wim_url") + if not url: raise SdnConnectorError("'url' must be provided") + if not url.startswith("http"): url = "http://" + url + if not url.endswith("/"): url = url + "/" + self.url = url + "onos/v1/network/configuration" self.logger.info("ONOS VPLS Connector Initialized.") def check_credentials(self): status_code = 503 onos_config_req = None + try: - onos_config_req = requests.get(self.url, auth=HTTPBasicAuth(self.user, self.password)) + onos_config_req = requests.get( + self.url, auth=HTTPBasicAuth(self.user, self.password) + ) onos_config_req.raise_for_status() except Exception as e: if onos_config_req: status_code = onos_config_req.status_code - self.logger.exception('Error checking credentials: {}'.format(e)) - raise SdnConnectorError('Error checking credentials: {}'.format(e), http_code=status_code) + + self.logger.exception("Error checking credentials: {}".format(e)) + + raise SdnConnectorError( + "Error checking credentials: {}".format(e), http_code=status_code + ) def get_connectivity_service_status(self, service_uuid, conn_info=None): try: onos_config = self._get_onos_netconfig() - vpls_config = onos_config.get('apps', {}).get('org.onosproject.vpls') + vpls_config = onos_config.get("apps", {}).get("org.onosproject.vpls") if vpls_config: - for vpls in vpls_config.get('vpls', {}).get('vplsList'): - if vpls.get('name') == service_uuid: - return {'sdn_status': 'ACTIVE', 'sdn_info': vpls} - - return {'sdn_status': 'ERROR', 'sdn_info': 'not found'} + for vpls in vpls_config.get("vpls", {}).get("vplsList"): + if vpls.get("name") == service_uuid: + return {"sdn_status": "ACTIVE", "sdn_info": vpls} + return {"sdn_status": "ERROR", "sdn_info": "not found"} except Exception as e: - self.logger.error('Exception getting connectivity service info: %s', e) - return {'sdn_status': 'ERROR', 'error_msg': str(e)} + self.logger.error("Exception getting connectivity service info: %s", e) + + return {"sdn_status": "ERROR", "error_msg": str(e)} def _get_onos_netconfig(self): try: - onos_config_req = requests.get(self.url, auth=HTTPBasicAuth(self.user, self.password)) + onos_config_req = requests.get( + self.url, auth=HTTPBasicAuth(self.user, self.password) + ) status_code = onos_config_req.status_code + if status_code == requests.codes.ok: return onos_config_req.json() else: - self.logger.info("Error obtaining network config, status code: {}".format(status_code)) - raise SdnConnectorError("Error obtaining network config status code: {}".format(status_code), - http_code=status_code) + self.logger.info( + "Error obtaining network config, status code: {}".format( + status_code + ) + ) + + raise SdnConnectorError( + "Error obtaining network config status code: {}".format( + status_code + ), + http_code=status_code, + ) except requests.exceptions.ConnectionError as e: - self.logger.info('Exception connecting to onos: %s', e) + self.logger.info("Exception connecting to onos: %s", e) + raise SdnConnectorError("Error connecting to onos: {}".format(e)) except Exception as e: - self.logger.error('Exception getting onos network config: %s', e) - raise SdnConnectorError("Exception getting onos network config: {}".format(e)) + self.logger.error("Exception getting onos network config: %s", e) + + raise SdnConnectorError( + "Exception getting onos network config: {}".format(e) + ) def _post_onos_netconfig(self, onos_config): try: - onos_config_resp = requests.post(self.url, json=onos_config, auth=HTTPBasicAuth(self.user, self.password)) + onos_config_resp = requests.post( + self.url, json=onos_config, auth=HTTPBasicAuth(self.user, self.password) + ) status_code = onos_config_resp.status_code + if status_code != requests.codes.ok: - self.logger.info("Error updating network config, status code: {}".format(status_code)) - raise SdnConnectorError("Error obtaining network config status code: {}".format(status_code), - http_code=status_code) + self.logger.info( + "Error updating network config, status code: {}".format(status_code) + ) + + raise SdnConnectorError( + "Error obtaining network config status code: {}".format( + status_code + ), + http_code=status_code, + ) except requests.exceptions.ConnectionError as e: - self.logger.info('Exception connecting to onos: %s', e) + self.logger.info("Exception connecting to onos: %s", e) + raise SdnConnectorError("Error connecting to onos: {}".format(e)) except Exception as e: - self.logger.info('Exception posting onos network config: %s', e) - raise SdnConnectorError("Exception posting onos network config: {}".format(e)) + self.logger.info("Exception posting onos network config: %s", e) + + raise SdnConnectorError( + "Exception posting onos network config: {}".format(e) + ) def create_connectivity_service(self, service_type, connection_points, **kwargs): - self.logger.debug("create_connectivity_service, service_type: {}, connection_points: {}". - format(service_type, connection_points)) - if service_type.lower() == 'etree': - raise SdnConnectorError('Only ELINE/ELAN network type is supported by ONOS VPLS.') + self.logger.debug( + "create_connectivity_service, service_type: {}, connection_points: {}".format( + service_type, connection_points + ) + ) + + if service_type.lower() == "etree": + raise SdnConnectorError( + "Only ELINE/ELAN network type is supported by ONOS VPLS." + ) # FIXME ¿must check number of connection_points? service_uuid = str(uuid.uuid4()) @@ -128,38 +176,49 @@ class OnosVpls(SdnConnectorBase): # Create missing interfaces, append to created_items if returned, append_port_to_onos_config # returns null if it was already created created_items = [] + for port in connection_points: created_ifz = self._append_port_to_onos_config(port, onos_config) if created_ifz: created_items.append(created_ifz[1]) + self._post_onos_netconfig(onos_config) # Add vpls service to config encapsulation = self._get_encapsulation(connection_points) interfaces = [port.get("service_endpoint_id") for port in connection_points] - if 'org.onosproject.vpls' in onos_config['apps']: - if 'vpls' not in onos_config['apps']['org.onosproject.vpls']: - onos_config['apps']['org.onosproject.vpls']['vpls'] = { - 'vplsList': [] + + if "org.onosproject.vpls" in onos_config["apps"]: + if "vpls" not in onos_config["apps"]["org.onosproject.vpls"]: + onos_config["apps"]["org.onosproject.vpls"]["vpls"] = { + "vplsList": [] } - for vpls in onos_config['apps']['org.onosproject.vpls']['vpls']['vplsList']: - if vpls['name'] == service_uuid: - raise SdnConnectorError('Network {} already exists.'.format(service_uuid)) - onos_config['apps']['org.onosproject.vpls']['vpls']['vplsList'].append({ - 'name': service_uuid, - 'interfaces': interfaces, - 'encapsulation': encapsulation - }) + + for vpls in onos_config["apps"]["org.onosproject.vpls"]["vpls"][ + "vplsList" + ]: + if vpls["name"] == service_uuid: + raise SdnConnectorError( + "Network {} already exists.".format(service_uuid) + ) + + onos_config["apps"]["org.onosproject.vpls"]["vpls"]["vplsList"].append( + { + "name": service_uuid, + "interfaces": interfaces, + "encapsulation": encapsulation, + } + ) self._pop_last_update_time(onos_config) else: - onos_config['apps'] = { - 'org.onosproject.vpls': { - 'vpls': { + onos_config["apps"] = { + "org.onosproject.vpls": { + "vpls": { "vplsList": [ { - 'name': service_uuid, - 'interfaces': interfaces, - 'encapsulation': encapsulation + "name": service_uuid, + "interfaces": interfaces, + "encapsulation": encapsulation, } ] } @@ -169,21 +228,29 @@ class OnosVpls(SdnConnectorBase): # self.logger.debug("original config: %s", onos_config) self._post_onos_netconfig(onos_config) - self.logger.debug("created connectivity_service, service_uuid: {}, created_items: {}". - format(service_uuid, created_items)) + self.logger.debug( + "created connectivity_service, service_uuid: {}, created_items: {}".format( + service_uuid, created_items + ) + ) + return service_uuid, {"interfaces": created_items} except Exception as e: - self.logger.error('Exception add connection_service: %s', e) + self.logger.error("Exception add connection_service: %s", e) + # try to rollback push original config try: self._post_onos_netconfig(onos_config_orig) except Exception as e: - self.logger.error('Exception rolling back to original config: %s', e) + self.logger.error("Exception rolling back to original config: %s", e) + # raise exception if isinstance(e, SdnConnectorError): raise else: - raise SdnConnectorError("Exception create_connectivity_service: {}".format(e)) + raise SdnConnectorError( + "Exception create_connectivity_service: {}".format(e) + ) def _get_encapsulation(self, connection_points): """ @@ -197,11 +264,16 @@ class OnosVpls(SdnConnectorBase): if connection_point.get("service_endpoint_encapsulation_type") == "dot1q": encapsulation = "VLAN" break + return encapsulation - def edit_connectivity_service(self, service_uuid, conn_info=None, connection_points=None, **kwargs): - self.logger.debug("edit connectivity service, service_uuid: {}, conn_info: {}, " - "connection points: {} ".format(service_uuid, conn_info, connection_points)) + def edit_connectivity_service( + self, service_uuid, conn_info=None, connection_points=None, **kwargs + ): + self.logger.debug( + "edit connectivity service, service_uuid: {}, conn_info: {}, " + "connection points: {} ".format(service_uuid, conn_info, connection_points) + ) conn_info = conn_info or {} created_ifs = conn_info.get("interfaces", []) @@ -211,20 +283,27 @@ class OnosVpls(SdnConnectorBase): onos_config = copy.deepcopy(onos_config_orig) # get current service data and check if it does not exists - for vpls in onos_config.get('apps', {}).get('org.onosproject.vpls', {}).get('vpls', {}).get('vplsList', {}): - if vpls['name'] == service_uuid: + for vpls in ( + onos_config.get("apps", {}) + .get("org.onosproject.vpls", {}) + .get("vpls", {}) + .get("vplsList", {}) + ): + if vpls["name"] == service_uuid: self.logger.debug("service exists") curr_interfaces = vpls.get("interfaces", []) curr_encapsulation = vpls.get("encapsulation") break else: - raise SdnConnectorError("service uuid: {} does not exist".format(service_uuid)) - + raise SdnConnectorError( + "service uuid: {} does not exist".format(service_uuid) + ) + self.logger.debug("current interfaces: {}".format(curr_interfaces)) self.logger.debug("current encapsulation: {}".format(curr_encapsulation)) # new interfaces names - new_interfaces = [port['service_endpoint_id'] for port in connection_points] + new_interfaces = [port["service_endpoint_id"] for port in connection_points] # obtain interfaces to delete, list will contain port ifs_delete = list(set(curr_interfaces) - set(new_interfaces)) @@ -236,16 +315,25 @@ class OnosVpls(SdnConnectorBase): # in that case delete it and add it again ifs_remain = list(set(new_interfaces) & set(curr_interfaces)) for port in connection_points: - if port['service_endpoint_id'] in ifs_remain: + if port["service_endpoint_id"] in ifs_remain: # check if there are some changes - curr_port_name, curr_vlan = self._get_current_port_data(onos_config, port['service_endpoint_id']) - new_port_name = 'of:{}/{}'.format(port['service_endpoint_encapsulation_info']['switch_dpid'], - port['service_endpoint_encapsulation_info']['switch_port']) - new_vlan = port['service_endpoint_encapsulation_info']['vlan'] - if (curr_port_name != new_port_name or curr_vlan != new_vlan): - self.logger.debug("TODO: must update data interface: {}".format(port['service_endpoint_id'])) - ifs_delete.append(port['service_endpoint_id']) - ifs_add.append(port['service_endpoint_id']) + curr_port_name, curr_vlan = self._get_current_port_data( + onos_config, port["service_endpoint_id"] + ) + new_port_name = "of:{}/{}".format( + port["service_endpoint_encapsulation_info"]["switch_dpid"], + port["service_endpoint_encapsulation_info"]["switch_port"], + ) + new_vlan = port["service_endpoint_encapsulation_info"]["vlan"] + + if curr_port_name != new_port_name or curr_vlan != new_vlan: + self.logger.debug( + "TODO: must update data interface: {}".format( + port["service_endpoint_id"] + ) + ) + ifs_delete.append(port["service_endpoint_id"]) + ifs_add.append(port["service_endpoint_id"]) new_encapsulation = self._get_encapsulation(connection_points) @@ -253,48 +341,69 @@ class OnosVpls(SdnConnectorBase): # Delete interfaces, only will delete interfaces that are in provided conn_info # because these are the ones that have been created for this service if ifs_delete: - for port in onos_config['ports'].values(): - for port_interface in port['interfaces']: - interface_name = port_interface['name'] - self.logger.debug("interface name: {}".format(port_interface['name'])) - if interface_name in ifs_delete and interface_name in created_ifs: - self.logger.debug("delete interface name: {}".format(interface_name)) - port['interfaces'].remove(port_interface) + for port in onos_config["ports"].values(): + for port_interface in port["interfaces"]: + interface_name = port_interface["name"] + self.logger.debug( + "interface name: {}".format(port_interface["name"]) + ) + + if ( + interface_name in ifs_delete + and interface_name in created_ifs + ): + self.logger.debug( + "delete interface name: {}".format(interface_name) + ) + port["interfaces"].remove(port_interface) created_ifs.remove(interface_name) # Add new interfaces for port in connection_points: - if port['service_endpoint_id'] in ifs_add: + if port["service_endpoint_id"] in ifs_add: created_ifz = self._append_port_to_onos_config(port, onos_config) if created_ifz: created_ifs.append(created_ifz[1]) + self._pop_last_update_time(onos_config) self._post_onos_netconfig(onos_config) - self.logger.debug("onos config after updating interfaces: {}".format(onos_config)) - self.logger.debug("created_ifs after updating interfaces: {}".format(created_ifs)) + self.logger.debug( + "onos config after updating interfaces: {}".format(onos_config) + ) + self.logger.debug( + "created_ifs after updating interfaces: {}".format(created_ifs) + ) # Update interfaces list in vpls service - for vpls in onos_config.get('apps', {}).get('org.onosproject.vpls', {}).get('vpls', {}).get('vplsList', {}): - if vpls['name'] == service_uuid: - vpls['interfaces'] = new_interfaces - vpls['encapsulation'] = new_encapsulation + for vpls in ( + onos_config.get("apps", {}) + .get("org.onosproject.vpls", {}) + .get("vpls", {}) + .get("vplsList", {}) + ): + if vpls["name"] == service_uuid: + vpls["interfaces"] = new_interfaces + vpls["encapsulation"] = new_encapsulation self._pop_last_update_time(onos_config) self._post_onos_netconfig(onos_config) + return {"interfaces": created_ifs} except Exception as e: - self.logger.error('Exception add connection_service: %s', e) + self.logger.error("Exception add connection_service: %s", e) # try to rollback push original config try: self._post_onos_netconfig(onos_config_orig) except Exception as e2: - self.logger.error('Exception rolling back to original config: %s', e2) + self.logger.error("Exception rolling back to original config: %s", e2) # raise exception if isinstance(e, SdnConnectorError): raise else: - raise SdnConnectorError("Exception create_connectivity_service: {}".format(e)) + raise SdnConnectorError( + "Exception create_connectivity_service: {}".format(e) + ) def delete_connectivity_service(self, service_uuid, conn_info=None): self.logger.debug("delete_connectivity_service uuid: {}".format(service_uuid)) @@ -306,54 +415,85 @@ class OnosVpls(SdnConnectorBase): try: # Removes ports used by network from onos config - for vpls in onos_config.get('apps', {}).get('org.onosproject.vpls', {}).get('vpls', {}).get('vplsList', {}): - if vpls['name'] == service_uuid: + for vpls in ( + onos_config.get("apps", {}) + .get("org.onosproject.vpls", {}) + .get("vpls", {}) + .get("vplsList", {}) + ): + if vpls["name"] == service_uuid: # iterate interfaces to check if must delete them - for interface in vpls['interfaces']: - for port in onos_config['ports'].values(): - for port_interface in port['interfaces']: - if port_interface['name'] == interface: + for interface in vpls["interfaces"]: + for port in onos_config["ports"].values(): + for port_interface in port["interfaces"]: + if port_interface["name"] == interface: # Delete only created ifzs - if port_interface['name'] in created_ifs: - self.logger.debug("Delete ifz: {}".format(port_interface['name'])) - port['interfaces'].remove(port_interface) - onos_config['apps']['org.onosproject.vpls']['vpls']['vplsList'].remove(vpls) + if port_interface["name"] in created_ifs: + self.logger.debug( + "Delete ifz: {}".format( + port_interface["name"] + ) + ) + port["interfaces"].remove(port_interface) + onos_config["apps"]["org.onosproject.vpls"]["vpls"][ + "vplsList" + ].remove(vpls) break else: - raise SdnConnectorError("service uuid: {} does not exist".format(service_uuid)) + raise SdnConnectorError( + "service uuid: {} does not exist".format(service_uuid) + ) self._pop_last_update_time(onos_config) self._post_onos_netconfig(onos_config) - self.logger.debug("deleted connectivity service uuid: {}".format(service_uuid)) + self.logger.debug( + "deleted connectivity service uuid: {}".format(service_uuid) + ) except SdnConnectorError: raise except Exception as e: - self.logger.error('Exception delete connection_service: %s', e, exc_info=True) - raise SdnConnectorError("Exception delete connectivity service: {}".format(str(e))) + self.logger.error( + "Exception delete connection_service: %s", e, exc_info=True + ) + + raise SdnConnectorError( + "Exception delete connectivity service: {}".format(str(e)) + ) def _pop_last_update_time(self, onos_config): """ Needed before post when there are already configured vpls services to apply changes """ - onos_config['apps']['org.onosproject.vpls']['vpls'].pop('lastUpdateTime', None) + onos_config["apps"]["org.onosproject.vpls"]["vpls"].pop("lastUpdateTime", None) def _get_current_port_data(self, onos_config, interface_name): - for port_name, port in onos_config['ports'].items(): - for port_interface in port['interfaces']: - if port_interface['name'] == interface_name: - return port_name, port_interface['vlan'] + for port_name, port in onos_config["ports"].items(): + for port_interface in port["interfaces"]: + if port_interface["name"] == interface_name: + return port_name, port_interface["vlan"] def _append_port_to_onos_config(self, port, onos_config): created_item = None - port_name = 'of:{}/{}'.format(port['service_endpoint_encapsulation_info']['switch_dpid'], - port['service_endpoint_encapsulation_info']['switch_port']) - interface_config = {'name': port['service_endpoint_id']} - if 'vlan' in port['service_endpoint_encapsulation_info'] \ - and port['service_endpoint_encapsulation_info']['vlan']: - interface_config['vlan'] = port['service_endpoint_encapsulation_info']['vlan'] - if port_name in onos_config['ports'] and 'interfaces' in onos_config['ports'][port_name]: - for interface in onos_config['ports'][port_name]['interfaces']: - if interface['name'] == port['service_endpoint_id']: + port_name = "of:{}/{}".format( + port["service_endpoint_encapsulation_info"]["switch_dpid"], + port["service_endpoint_encapsulation_info"]["switch_port"], + ) + interface_config = {"name": port["service_endpoint_id"]} + + if ( + "vlan" in port["service_endpoint_encapsulation_info"] + and port["service_endpoint_encapsulation_info"]["vlan"] + ): + interface_config["vlan"] = port["service_endpoint_encapsulation_info"][ + "vlan" + ] + + if ( + port_name in onos_config["ports"] + and "interfaces" in onos_config["ports"][port_name] + ): + for interface in onos_config["ports"][port_name]["interfaces"]: + if interface["name"] == port["service_endpoint_id"]: # self.logger.debug("interface with same name and port exits") # interface already exists TODO ¿check vlan? ¿delete and recreate? # by the moment use and do not touch @@ -361,39 +501,38 @@ class OnosVpls(SdnConnectorBase): break else: # self.logger.debug("port with same name exits but not interface") - onos_config['ports'][port_name]['interfaces'].append(interface_config) - created_item = (port_name, port['service_endpoint_id']) + onos_config["ports"][port_name]["interfaces"].append(interface_config) + created_item = (port_name, port["service_endpoint_id"]) else: # self.logger.debug("create port and interface") - onos_config['ports'][port_name] = { - 'interfaces': [interface_config] - } - created_item = (port_name, port['service_endpoint_id']) + onos_config["ports"][port_name] = {"interfaces": [interface_config]} + created_item = (port_name, port["service_endpoint_id"]) + return created_item -if __name__ == '__main__': - logger = logging.getLogger('ro.sdn.onos_vpls') +if __name__ == "__main__": + logger = logging.getLogger("ro.sdn.onos_vpls") logging.basicConfig() logger.setLevel(getattr(logging, "DEBUG")) # wim_url = "http://10.95.172.251:8181" wim_url = "http://192.168.56.106:8181" user = "karaf" password = "karaf" - wim = {'wim_url': wim_url} - wim_account = {'user': user, 'password': password} + wim = {"wim_url": wim_url} + wim_account = {"user": user, "password": password} onos_vpls = OnosVpls(wim=wim, wim_account=wim_account, logger=logger) # conn_service = onos_vpls.get_connectivity_service_status("4e1f4c8a-a874-425d-a9b5-955cb77178f8") # print(conn_service) - service_type = 'ELAN' + service_type = "ELAN" conn_point_0 = { "service_endpoint_id": "switch1:ifz1", "service_endpoint_encapsulation_type": "dot1q", "service_endpoint_encapsulation_info": { "switch_dpid": "0000000000000011", "switch_port": "1", - "vlan": "600" - } + "vlan": "600", + }, } conn_point_1 = { "service_endpoint_id": "switch3:ifz1", @@ -401,8 +540,8 @@ if __name__ == '__main__': "service_endpoint_encapsulation_info": { "switch_dpid": "0000000000000031", "switch_port": "3", - "vlan": "600" - } + "vlan": "600", + }, } connection_points = [conn_point_0, conn_point_1] # service_uuid, conn_info = onos_vpls.create_connectivity_service(service_type, connection_points) @@ -410,7 +549,7 @@ if __name__ == '__main__': # print(conn_info) # conn_info = None - conn_info = {"interfaces": ['switch1:ifz1', 'switch3:ifz1']} + conn_info = {"interfaces": ["switch1:ifz1", "switch3:ifz1"]} # onos_vpls.delete_connectivity_service("70248a41-11cb-44f3-9039-c41387394a30", conn_info) conn_point_0 = { @@ -419,8 +558,8 @@ if __name__ == '__main__': "service_endpoint_encapsulation_info": { "switch_dpid": "0000000000000011", "switch_port": "1", - "vlan": "500" - } + "vlan": "500", + }, } conn_point_2 = { "service_endpoint_id": "switch1:ifz3", @@ -428,8 +567,8 @@ if __name__ == '__main__': "service_endpoint_encapsulation_info": { "switch_dpid": "0000000000000011", "switch_port": "3", - "vlan": "500" - } + "vlan": "500", + }, } conn_point_3 = { "service_endpoint_id": "switch2:ifz2", @@ -437,14 +576,16 @@ if __name__ == '__main__': "service_endpoint_encapsulation_info": { "switch_dpid": "0000000000000022", "switch_port": "2", - "vlan": "500" - } + "vlan": "500", + }, } connection_points_2 = [conn_point_0, conn_point_3] # conn_info = onos_vpls.edit_connectivity_service("c65d88be-73aa-4933-927d-57ec6bee6b41", # conn_info, connection_points_2) # print(conn_info) - service_status = onos_vpls.get_connectivity_service_status("c65d88be-73aa-4933-927d-57ec6bee6b41", conn_info) + service_status = onos_vpls.get_connectivity_service_status( + "c65d88be-73aa-4933-927d-57ec6bee6b41", conn_info + ) print("service status") print(service_status) diff --git a/RO-SDN-onos_vpls/setup.py b/RO-SDN-onos_vpls/setup.py index 562954b7..df7f366b 100644 --- a/RO-SDN-onos_vpls/setup.py +++ b/RO-SDN-onos_vpls/setup.py @@ -30,26 +30,30 @@ osm-ro pluging for ONOS VPLS SDN setup( name=_name, - description='OSM ro sdn plugin for ONOS VPLS', + description="OSM ro sdn plugin for ONOS VPLS", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='OSM_TECH@list.etsi.org', - maintainer='ETSI OSM', - maintainer_email='OSM_TECH@list.etsi.org', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="OSM_TECH@list.etsi.org", + maintainer="ETSI OSM", + maintainer_email="OSM_TECH@list.etsi.org", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ "requests", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rosdn.plugins': ['rosdn_onos_vpls = osm_rosdn_onos_vpls.sdn_assist_onos_vpls:OnosVpls'], + "osm_rosdn.plugins": [ + "rosdn_onos_vpls = osm_rosdn_onos_vpls.sdn_assist_onos_vpls:OnosVpls" + ], }, ) diff --git a/RO-SDN-onos_vpls/tox.ini b/RO-SDN-onos_vpls/tox.ini index a7b17d06..e0ec3485 100644 --- a/RO-SDN-onos_vpls/tox.ini +++ b/RO-SDN-onos_vpls/tox.ini @@ -27,7 +27,7 @@ commands=python3 -m unittest discover -v basepython = python3 deps = flake8 commands = flake8 osm_rosdn_onos_vpls --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py b/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py index 11442922..9dec75ab 100644 --- a/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py +++ b/RO-VIM-aws/osm_rovim_aws/vimconn_aws.py @@ -21,9 +21,9 @@ # contact with: saboor.ahmad@xflowresearch.com ## -''' +""" AWS-connector implements all the methods to interact with AWS using the BOTO client -''' +""" __author__ = "Saboor Ahmad" __date__ = "10-Apr-2017" @@ -40,40 +40,67 @@ import boto.vpc class vimconnector(vimconn.VimConnector): - def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None, - config={}, persistent_info={}): - """ Params: uuid - id asigned to this VIM - name - name assigned to this VIM, can be used for logging - tenant_id - ID to be used for tenant - tenant_name - name of tenant to be used VIM tenant to be used - url_admin - optional, url used for administrative tasks - user - credentials of the VIM user - passwd - credentials of the VIM user - log_level - if must use a different log_level than the general one - config - dictionary with misc VIM information - region_name - name of region to deploy the instances - vpc_cidr_block - default CIDR block for VPC - security_groups - default security group to specify this instance - persistent_info - dict where the class can store information that will be available among class - destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an - empty dict. Useful to store login/tokens information for speed up communication + def __init__( + self, + uuid, + name, + tenant_id, + tenant_name, + url, + url_admin=None, + user=None, + passwd=None, + log_level=None, + config={}, + persistent_info={}, + ): + """Params: + uuid - id asigned to this VIM + name - name assigned to this VIM, can be used for logging + tenant_id - ID to be used for tenant + tenant_name - name of tenant to be used VIM tenant to be used + url_admin - optional, url used for administrative tasks + user - credentials of the VIM user + passwd - credentials of the VIM user + log_level - if must use a different log_level than the general one + config - dictionary with misc VIM information + region_name - name of region to deploy the instances + vpc_cidr_block - default CIDR block for VPC + security_groups - default security group to specify this instance + persistent_info - dict where the class can store information that will be available among class + destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an + empty dict. Useful to store login/tokens information for speed up communication """ - - vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, - config, persistent_info) + vimconn.VimConnector.__init__( + self, + uuid, + name, + tenant_id, + tenant_name, + url, + url_admin, + user, + passwd, + log_level, + config, + persistent_info, + ) self.persistent_info = persistent_info self.a_creds = {} + if user: - self.a_creds['aws_access_key_id'] = user + self.a_creds["aws_access_key_id"] = user else: raise vimconn.VimConnAuthException("Username is not specified") + if passwd: - self.a_creds['aws_secret_access_key'] = passwd + self.a_creds["aws_secret_access_key"] = passwd else: raise vimconn.VimConnAuthException("Password is not specified") - if 'region_name' in config: - self.region = config.get('region_name') + + if "region_name" in config: + self.region = config.get("region_name") else: raise vimconn.VimConnException("AWS region_name is not specified at config") @@ -83,68 +110,80 @@ class vimconnector(vimconn.VimConnector): self.conn_vpc = None self.account_id = None - self.vpc_id = self.get_tenant_list()[0]['id'] + self.vpc_id = self.get_tenant_list()[0]["id"] # we take VPC CIDR block if specified, otherwise we use the default CIDR # block suggested by AWS while creating instance - self.vpc_cidr_block = '10.0.0.0/24' + self.vpc_cidr_block = "10.0.0.0/24" if tenant_id: self.vpc_id = tenant_id - if 'vpc_cidr_block' in config: - self.vpc_cidr_block = config['vpc_cidr_block'] + + if "vpc_cidr_block" in config: + self.vpc_cidr_block = config["vpc_cidr_block"] self.security_groups = None - if 'security_groups' in config: - self.security_groups = config['security_groups'] + if "security_groups" in config: + self.security_groups = config["security_groups"] self.key_pair = None - if 'key_pair' in config: - self.key_pair = config['key_pair'] + if "key_pair" in config: + self.key_pair = config["key_pair"] self.flavor_info = None - if 'flavor_info' in config: - flavor_data = config.get('flavor_info') + if "flavor_info" in config: + flavor_data = config.get("flavor_info") if isinstance(flavor_data, str): try: if flavor_data[0] == "@": # read from a file - with open(flavor_data[1:], 'r') as stream: + with open(flavor_data[1:], "r") as stream: self.flavor_info = yaml.load(stream, Loader=yaml.Loader) else: self.flavor_info = yaml.load(flavor_data, Loader=yaml.Loader) except yaml.YAMLError as e: self.flavor_info = None - raise vimconn.VimConnException("Bad format at file '{}': {}".format(flavor_data[1:], e)) + + raise vimconn.VimConnException( + "Bad format at file '{}': {}".format(flavor_data[1:], e) + ) except IOError as e: - raise vimconn.VimConnException("Error reading file '{}': {}".format(flavor_data[1:], e)) + raise vimconn.VimConnException( + "Error reading file '{}': {}".format(flavor_data[1:], e) + ) elif isinstance(flavor_data, dict): self.flavor_info = flavor_data - self.logger = logging.getLogger('ro.vim.aws') + self.logger = logging.getLogger("ro.vim.aws") + if log_level: self.logger.setLevel(getattr(logging, log_level)) def __setitem__(self, index, value): - """Params: index - name of value of set - value - value to set + """Params: + index - name of value of set + value - value to set """ - if index == 'user': - self.a_creds['aws_access_key_id'] = value - elif index == 'passwd': - self.a_creds['aws_secret_access_key'] = value - elif index == 'region': + if index == "user": + self.a_creds["aws_access_key_id"] = value + elif index == "passwd": + self.a_creds["aws_secret_access_key"] = value + elif index == "region": self.region = value else: vimconn.VimConnector.__setitem__(self, index, value) def _reload_connection(self): - """Returns: sets boto.EC2 and boto.VPC connection to work with AWS services - """ - + """Returns: sets boto.EC2 and boto.VPC connection to work with AWS services""" try: - self.conn = boto.ec2.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'], - aws_secret_access_key=self.a_creds['aws_secret_access_key']) - self.conn_vpc = boto.vpc.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'], - aws_secret_access_key=self.a_creds['aws_secret_access_key']) + self.conn = boto.ec2.connect_to_region( + self.region, + aws_access_key_id=self.a_creds["aws_access_key_id"], + aws_secret_access_key=self.a_creds["aws_secret_access_key"], + ) + self.conn_vpc = boto.vpc.connect_to_region( + self.region, + aws_access_key_id=self.a_creds["aws_access_key_id"], + aws_secret_access_key=self.a_creds["aws_secret_access_key"], + ) # client = boto3.client("sts", aws_access_key_id=self.a_creds['aws_access_key_id'], # aws_secret_access_key=self.a_creds['aws_secret_access_key']) # self.account_id = client.get_caller_identity()["Account"] @@ -155,20 +194,20 @@ class vimconnector(vimconn.VimConnector): """Params: an Exception object Returns: Raises the exception 'e' passed in mehtod parameters """ - self.conn = None self.conn_vpc = None + raise vimconn.VimConnConnectionException(type(e).__name__ + ": " + str(e)) def get_availability_zones_list(self): - """Obtain AvailabilityZones from AWS - """ - + """Obtain AvailabilityZones from AWS""" try: self._reload_connection() az_list = [] + for az in self.conn.get_all_zones(): az_list.append(az.name) + return az_list except Exception as e: self.format_vimconn_exception(e) @@ -182,20 +221,29 @@ class vimconnector(vimconn.VimConnector): Returns the tenant list of dictionaries, and empty list if no tenant match all the filers: [{'name':', 'id':', ...}, ...] """ - try: self._reload_connection() vpc_ids = [] tfilters = {} + if filter_dict != {}: - if 'id' in filter_dict: - vpc_ids.append(filter_dict['id']) - tfilters['name'] = filter_dict['id'] + if "id" in filter_dict: + vpc_ids.append(filter_dict["id"]) + tfilters["name"] = filter_dict["id"] + tenants = self.conn_vpc.get_all_vpcs(vpc_ids, tfilters) tenant_list = [] + for tenant in tenants: - tenant_list.append({'id': str(tenant.id), 'name': str(tenant.id), 'status': str(tenant.state), - 'cidr_block': str(tenant.cidr_block)}) + tenant_list.append( + { + "id": str(tenant.id), + "name": str(tenant.id), + "status": str(tenant.state), + "cidr_block": str(tenant.cidr_block), + } + ) + return tenant_list except Exception as e: self.format_vimconn_exception(e) @@ -206,8 +254,8 @@ class vimconnector(vimconn.VimConnector): "tenant_description": string max length 256 returns the tenant identifier or raise exception """ - self.logger.debug("Adding a new VPC") + try: self._reload_connection() vpc = self.conn_vpc.create_vpc(self.vpc_cidr_block) @@ -217,11 +265,16 @@ class vimconnector(vimconn.VimConnector): gateway = self.conn_vpc.create_internet_gateway() self.conn_vpc.attach_internet_gateway(gateway.id, vpc.id) route_table = self.conn_vpc.create_route_table(vpc.id) - self.conn_vpc.create_route(route_table.id, '0.0.0.0/0', gateway.id) + self.conn_vpc.create_route(route_table.id, "0.0.0.0/0", gateway.id) + + self.vpc_data[vpc.id] = { + "gateway": gateway.id, + "route_table": route_table.id, + "subnets": self.subnet_sizes( + len(self.get_availability_zones_list()), self.vpc_cidr_block + ), + } - self.vpc_data[vpc.id] = {'gateway': gateway.id, 'route_table': route_table.id, - 'subnets': self.subnet_sizes(len(self.get_availability_zones_list()), - self.vpc_cidr_block)} return vpc.id except Exception as e: self.format_vimconn_exception(e) @@ -231,36 +284,44 @@ class vimconnector(vimconn.VimConnector): tenant_id: returned VIM tenant_id on "new_tenant" Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException """ - self.logger.debug("Deleting specified VPC") + try: self._reload_connection() vpc = self.vpc_data.get(tenant_id) - if 'gateway' in vpc and 'route_table' in vpc: - gateway_id, route_table_id = vpc['gateway'], vpc['route_table'] + + if "gateway" in vpc and "route_table" in vpc: + gateway_id, route_table_id = vpc["gateway"], vpc["route_table"] self.conn_vpc.detach_internet_gateway(gateway_id, tenant_id) self.conn_vpc.delete_vpc(tenant_id) - self.conn_vpc.delete_route(route_table_id, '0.0.0.0/0') + self.conn_vpc.delete_route(route_table_id, "0.0.0.0/0") else: self.conn_vpc.delete_vpc(tenant_id) except Exception as e: self.format_vimconn_exception(e) def subnet_sizes(self, availability_zones, cidr): - """Calcualtes possible subnets given CIDR value of VPC - """ - + """Calculates possible subnets given CIDR value of VPC""" if availability_zones != 2 and availability_zones != 3: self.logger.debug("Number of AZs should be 2 or 3") + raise vimconn.VimConnNotSupportedException("Number of AZs should be 2 or 3") - netmasks = ('255.255.252.0', '255.255.254.0', '255.255.255.0', '255.255.255.128') + netmasks = ( + "255.255.252.0", + "255.255.254.0", + "255.255.255.0", + "255.255.255.128", + ) ip = netaddr.IPNetwork(cidr) mask = ip.netmask if str(mask) not in netmasks: self.logger.debug("Netmask " + str(mask) + " not found") - raise vimconn.VimConnNotFoundException("Netmask " + str(mask) + " not found") + + raise vimconn.VimConnNotFoundException( + "Netmask " + str(mask) + " not found" + ) if availability_zones == 2: for n, netmask in enumerate(netmasks): @@ -272,13 +333,25 @@ class vimconnector(vimconn.VimConnector): pub_net = list(ip.subnet(n + 24)) pri_subs = pub_net[1:] pub_mask = pub_net[0].netmask - pub_split = list(ip.subnet(26)) if (str(pub_mask) == '255.255.255.0') else list(ip.subnet(27)) + + pub_split = ( + list(ip.subnet(26)) + if (str(pub_mask) == "255.255.255.0") + else list(ip.subnet(27)) + ) pub_subs = pub_split[:3] subnets = pub_subs + pri_subs return map(str, subnets) - def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): + def new_network( + self, + net_name, + net_type, + ip_profile=None, + shared=False, + provider_network_profile=None, + ): """Adds a tenant network to VIM Params: 'net_name': name of the network @@ -302,33 +375,51 @@ class vimconnector(vimconn.VimConnector): Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same as not present. """ - self.logger.debug("Adding a subnet to VPC") + try: created_items = {} self._reload_connection() subnet = None vpc_id = self.vpc_id + if self.vpc_data.get(vpc_id, None): - cidr_block = list(set(self.vpc_data[vpc_id]['subnets']) - - set(self.get_network_details({'tenant_id': vpc_id}, detail='cidr_block')))[0] + cidr_block = list( + set(self.vpc_data[vpc_id]["subnets"]) + - set( + self.get_network_details( + {"tenant_id": vpc_id}, detail="cidr_block" + ) + ) + )[0] else: - vpc = self.get_tenant_list({'id': vpc_id})[0] - subnet_list = self.subnet_sizes(len(self.get_availability_zones_list()), vpc['cidr_block']) - cidr_block = list(set(subnet_list) - set(self.get_network_details({'tenant_id': vpc['id']}, - detail='cidr_block')))[0] + vpc = self.get_tenant_list({"id": vpc_id})[0] + subnet_list = self.subnet_sizes( + len(self.get_availability_zones_list()), vpc["cidr_block"] + ) + cidr_block = list( + set(subnet_list) + - set( + self.get_network_details( + {"tenant_id": vpc["id"]}, detail="cidr_block" + ) + ) + )[0] + subnet = self.conn_vpc.create_subnet(vpc_id, cidr_block) + return subnet.id, created_items except Exception as e: self.format_vimconn_exception(e) def get_network_details(self, filters, detail): - """Get specified details related to a subnet - """ + """Get specified details related to a subnet""" detail_list = [] subnet_list = self.get_network_list(filters) + for net in subnet_list: detail_list.append(net[detail]) + return detail_list def get_network_list(self, filter_dict={}): @@ -351,20 +442,33 @@ class vimconnector(vimconn.VimConnector): List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity, authorization, or some other unspecific error """ - self.logger.debug("Getting all subnets from VIM") + try: self._reload_connection() tfilters = {} + if filter_dict != {}: - if 'tenant_id' in filter_dict: - tfilters['vpcId'] = filter_dict['tenant_id'] - subnets = self.conn_vpc.get_all_subnets(subnet_ids=filter_dict.get('name', None), filters=tfilters) + if "tenant_id" in filter_dict: + tfilters["vpcId"] = filter_dict["tenant_id"] + + subnets = self.conn_vpc.get_all_subnets( + subnet_ids=filter_dict.get("name", None), filters=tfilters + ) net_list = [] + for net in subnets: net_list.append( - {'id': str(net.id), 'name': str(net.id), 'status': str(net.state), 'vpc_id': str(net.vpc_id), - 'cidr_block': str(net.cidr_block), 'type': 'bridge'}) + { + "id": str(net.id), + "name": str(net.id), + "status": str(net.state), + "vpc_id": str(net.vpc_id), + "cidr_block": str(net.cidr_block), + "type": "bridge", + } + ) + return net_list except Exception as e: self.format_vimconn_exception(e) @@ -379,13 +483,19 @@ class vimconnector(vimconn.VimConnector): other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param Raises an exception upon error or when network is not found """ - self.logger.debug("Getting Subnet from VIM") + try: self._reload_connection() subnet = self.conn_vpc.get_all_subnets(net_id)[0] - return {'id': str(subnet.id), 'name': str(subnet.id), 'status': str(subnet.state), - 'vpc_id': str(subnet.vpc_id), 'cidr_block': str(subnet.cidr_block)} + + return { + "id": str(subnet.id), + "name": str(subnet.id), + "status": str(subnet.state), + "vpc_id": str(subnet.vpc_id), + "cidr_block": str(subnet.cidr_block), + } except Exception as e: self.format_vimconn_exception(e) @@ -396,12 +506,13 @@ class vimconnector(vimconn.VimConnector): :param created_items: dictionary with extra items to be deleted. provided by method new_network Returns the network identifier or raises an exception upon error or when network is not found """ - self.logger.debug("Deleting subnet from VIM") + try: self._reload_connection() self.logger.debug("DELETING NET_ID: " + str(net_id)) self.conn_vpc.delete_subnet(net_id) + return net_id except Exception as e: self.format_vimconn_exception(e) @@ -423,31 +534,38 @@ class vimconnector(vimconn.VimConnector): vim_info: #Text with plain information obtained from vim (yaml.safe_dump) 'net_id2': ... """ - self._reload_connection() + try: dict_entry = {} + for net_id in net_list: subnet_dict = {} subnet = None + try: subnet = self.conn_vpc.get_all_subnets(net_id)[0] + if subnet.state == "pending": - subnet_dict['status'] = "BUILD" + subnet_dict["status"] = "BUILD" elif subnet.state == "available": - subnet_dict['status'] = 'ACTIVE' + subnet_dict["status"] = "ACTIVE" else: - subnet_dict['status'] = 'ERROR' - subnet_dict['error_msg'] = '' + subnet_dict["status"] = "ERROR" + subnet_dict["error_msg"] = "" except Exception: - subnet_dict['status'] = 'DELETED' - subnet_dict['error_msg'] = 'Network not found' + subnet_dict["status"] = "DELETED" + subnet_dict["error_msg"] = "Network not found" finally: try: - subnet_dict['vim_info'] = yaml.safe_dump(subnet, default_flow_style=True, width=256) + subnet_dict["vim_info"] = yaml.safe_dump( + subnet, default_flow_style=True, width=256 + ) except yaml.YAMLError: - subnet_dict['vim_info'] = str(subnet) + subnet_dict["vim_info"] = str(subnet) + dict_entry[net_id] = subnet_dict + return dict_entry except Exception as e: self.format_vimconn_exception(e) @@ -457,13 +575,15 @@ class vimconnector(vimconn.VimConnector): Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } Raises an exception upon error or if not found """ - self.logger.debug("Getting instance type") + try: if flavor_id in self.flavor_info: return self.flavor_info[flavor_id] else: - raise vimconn.VimConnNotFoundException("Cannot find flavor with this flavor ID/Name") + raise vimconn.VimConnNotFoundException( + "Cannot find flavor with this flavor ID/Name" + ) except Exception as e: self.format_vimconn_exception(e) @@ -477,31 +597,44 @@ class vimconnector(vimconn.VimConnector): #todo: complete parameters for EPA Returns the flavor_id or raises a vimconnNotFoundException """ - self.logger.debug("Getting flavor id from data") + try: flavor = None for key, values in self.flavor_info.items(): if (values["ram"], values["cpus"], values["disk"]) == ( - flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]): + flavor_dict["ram"], + flavor_dict["vcpus"], + flavor_dict["disk"], + ): flavor = (key, values) break elif (values["ram"], values["cpus"], values["disk"]) >= ( - flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]): + flavor_dict["ram"], + flavor_dict["vcpus"], + flavor_dict["disk"], + ): if not flavor: flavor = (key, values) else: if (flavor[1]["ram"], flavor[1]["cpus"], flavor[1]["disk"]) >= ( - values["ram"], values["cpus"], values["disk"]): + values["ram"], + values["cpus"], + values["disk"], + ): flavor = (key, values) + if flavor: return flavor[0] - raise vimconn.VimConnNotFoundException("Cannot find flavor with this flavor ID/Name") + + raise vimconn.VimConnNotFoundException( + "Cannot find flavor with this flavor ID/Name" + ) except Exception as e: self.format_vimconn_exception(e) def new_image(self, image_dict): - """ Adds a tenant image to VIM + """Adds a tenant image to VIM Params: image_dict name (string) - The name of the AMI. Valid only for EBS-based images. description (string) - The description of the AMI. @@ -520,22 +653,27 @@ class vimconnector(vimconn.VimConnector): volumes behind after instance termination is not free Returns: image_id - image ID of the newly created image """ - try: self._reload_connection() - image_location = image_dict.get('image_location', None) + image_location = image_dict.get("image_location", None) + if image_location: image_location = str(self.account_id) + str(image_location) - image_id = self.conn.register_image(image_dict.get('name', None), image_dict.get('description', None), - image_location, image_dict.get('architecture', None), - image_dict.get('kernel_id', None), - image_dict.get('root_device_name', None), - image_dict.get('block_device_map', None), - image_dict.get('virtualization_type', None), - image_dict.get('sriov_net_support', None), - image_dict.get('snapshot_id', None), - image_dict.get('delete_root_volume_on_termination', None)) + image_id = self.conn.register_image( + image_dict.get("name", None), + image_dict.get("description", None), + image_location, + image_dict.get("architecture", None), + image_dict.get("kernel_id", None), + image_dict.get("root_device_name", None), + image_dict.get("block_device_map", None), + image_dict.get("virtualization_type", None), + image_dict.get("sriov_net_support", None), + image_dict.get("snapshot_id", None), + image_dict.get("delete_root_volume_on_termination", None), + ) + return image_id except Exception as e: self.format_vimconn_exception(e) @@ -547,23 +685,27 @@ class vimconnector(vimconn.VimConnector): try: self._reload_connection() self.conn.deregister_image(image_id) + return image_id except Exception as e: self.format_vimconn_exception(e) def get_image_id_from_path(self, path): - ''' + """ Params: path - location of the image Returns: image_id - ID of the matching image - ''' + """ self._reload_connection() try: filters = {} + if path: - tokens = path.split('/') - filters['owner_id'] = tokens[0] - filters['name'] = '/'.join(tokens[1:]) + tokens = path.split("/") + filters["owner_id"] = tokens[0] + filters["name"] = "/".join(tokens[1:]) + image = self.conn.get_all_images(filters=filters)[0] + return image.id except Exception as e: self.format_vimconn_exception(e) @@ -579,33 +721,58 @@ class vimconnector(vimconn.VimConnector): [{}, ...] List can be empty """ - self.logger.debug("Getting image list from VIM") + try: self._reload_connection() image_id = None filters = {} - if 'id' in filter_dict: - image_id = filter_dict['id'] - if 'name' in filter_dict: - filters['name'] = filter_dict['name'] - if 'location' in filter_dict: - filters['location'] = filter_dict['location'] + + if "id" in filter_dict: + image_id = filter_dict["id"] + + if "name" in filter_dict: + filters["name"] = filter_dict["name"] + + if "location" in filter_dict: + filters["location"] = filter_dict["location"] + # filters['image_type'] = 'machine' # filter_dict['owner_id'] = self.account_id images = self.conn.get_all_images(image_id, filters=filters) image_list = [] + for image in images: - image_list.append({'id': str(image.id), 'name': str(image.name), 'status': str(image.state), - 'owner': str(image.owner_id), 'location': str(image.location), - 'is_public': str(image.is_public), 'architecture': str(image.architecture), - 'platform': str(image.platform)}) + image_list.append( + { + "id": str(image.id), + "name": str(image.name), + "status": str(image.state), + "owner": str(image.owner_id), + "location": str(image.location), + "is_public": str(image.is_public), + "architecture": str(image.architecture), + "platform": str(image.platform), + } + ) + return image_list except Exception as e: self.format_vimconn_exception(e) - def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, - disk_list=None, availability_zone_index=None, availability_zone_list=None): + def new_vminstance( + self, + name, + description, + start, + image_id, + flavor_id, + net_list, + cloud_config=None, + disk_list=None, + availability_zone_index=None, + availability_zone_list=None, + ): """Create a new VM/instance in AWS Params: name decription @@ -659,8 +826,8 @@ class vimconnector(vimconn.VimConnector): Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same as not present. """ - self.logger.debug("Creating a new VM instance") + try: self._reload_connection() instance = None @@ -672,17 +839,22 @@ class vimconnector(vimconn.VimConnector): key_name=self.key_pair, instance_type=flavor_id, security_groups=self.security_groups, - user_data=userdata + user_data=userdata, ) else: for index, subnet in enumerate(net_list): - net_intr = boto.ec2.networkinterface.NetworkInterfaceSpecification(subnet_id=subnet.get('net_id'), - groups=None, - associate_public_ip_address=True) + net_intr = boto.ec2.networkinterface.NetworkInterfaceSpecification( + subnet_id=subnet.get("net_id"), + groups=None, + associate_public_ip_address=True, + ) - if subnet.get('elastic_ip'): + if subnet.get("elastic_ip"): eip = self.conn.allocate_address() - self.conn.associate_address(allocation_id=eip.allocation_id, network_interface_id=net_intr.id) + self.conn.associate_address( + allocation_id=eip.allocation_id, + network_interface_id=net_intr.id, + ) if index == 0: reservation = self.conn.run_instances( @@ -690,31 +862,41 @@ class vimconnector(vimconn.VimConnector): key_name=self.key_pair, instance_type=flavor_id, security_groups=self.security_groups, - network_interfaces=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr), - user_data=userdata + network_interfaces=boto.ec2.networkinterface.NetworkInterfaceCollection( + net_intr + ), + user_data=userdata, ) else: while True: try: self.conn.attach_network_interface( - network_interface_id=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr), - instance_id=instance.id, device_index=0) + network_interface_id=boto.ec2.networkinterface.NetworkInterfaceCollection( + net_intr + ), + instance_id=instance.id, + device_index=0, + ) break except Exception: time.sleep(10) - net_list[index]['vim_id'] = reservation.instances[0].interfaces[index].id + + net_list[index]["vim_id"] = ( + reservation.instances[0].interfaces[index].id + ) instance = reservation.instances[0] + return instance.id, None except Exception as e: self.format_vimconn_exception(e) def get_vminstance(self, vm_id): """Returns the VM instance information from VIM""" - try: self._reload_connection() reservation = self.conn.get_all_instances(vm_id) + return reservation[0].instances[0].__dict__ except Exception as e: self.format_vimconn_exception(e) @@ -722,17 +904,17 @@ class vimconnector(vimconn.VimConnector): def delete_vminstance(self, vm_id, created_items=None): """Removes a VM instance from VIM Returns the instance identifier""" - try: self._reload_connection() self.logger.debug("DELETING VM_ID: " + str(vm_id)) self.conn.terminate_instances(vm_id) + return vm_id except Exception as e: self.format_vimconn_exception(e) def refresh_vms_status(self, vm_list): - """ Get the status of the virtual machines and their interfaces/ports + """Get the status of the virtual machines and their interfaces/ports Params: the list of VM identifiers Returns a dictionary with: vm_id: #VIM id of this Virtual Machine @@ -754,42 +936,59 @@ class vimconnector(vimconn.VimConnector): ip_address - The IP address of the interface within the subnet. """ self.logger.debug("Getting VM instance information from VIM") + try: self._reload_connection() reservation = self.conn.get_all_instances(vm_list)[0] instances = {} instance_dict = {} + for instance in reservation.instances: try: if instance.state in ("pending"): - instance_dict['status'] = "BUILD" + instance_dict["status"] = "BUILD" elif instance.state in ("available", "running", "up"): - instance_dict['status'] = 'ACTIVE' + instance_dict["status"] = "ACTIVE" else: - instance_dict['status'] = 'ERROR' - instance_dict['error_msg'] = "" - instance_dict['interfaces'] = [] + instance_dict["status"] = "ERROR" + + instance_dict["error_msg"] = "" + instance_dict["interfaces"] = [] interface_dict = {} + for interface in instance.interfaces: - interface_dict['vim_interface_id'] = interface.id - interface_dict['vim_net_id'] = interface.subnet_id - interface_dict['mac_address'] = interface.mac_address - if hasattr(interface, 'publicIp') and interface.publicIp is not None: - interface_dict['ip_address'] = interface.publicIp + ";" + interface.private_ip_address + interface_dict["vim_interface_id"] = interface.id + interface_dict["vim_net_id"] = interface.subnet_id + interface_dict["mac_address"] = interface.mac_address + + if ( + hasattr(interface, "publicIp") + and interface.publicIp is not None + ): + interface_dict["ip_address"] = ( + interface.publicIp + ";" + interface.private_ip_address + ) else: - interface_dict['ip_address'] = interface.private_ip_address - instance_dict['interfaces'].append(interface_dict) + interface_dict["ip_address"] = interface.private_ip_address + + instance_dict["interfaces"].append(interface_dict) except Exception as e: - self.logger.error("Exception getting vm status: %s", str(e), exc_info=True) - instance_dict['status'] = "DELETED" - instance_dict['error_msg'] = str(e) + self.logger.error( + "Exception getting vm status: %s", str(e), exc_info=True + ) + instance_dict["status"] = "DELETED" + instance_dict["error_msg"] = str(e) finally: try: - instance_dict['vim_info'] = yaml.safe_dump(instance, default_flow_style=True, width=256) + instance_dict["vim_info"] = yaml.safe_dump( + instance, default_flow_style=True, width=256 + ) except yaml.YAMLError: # self.logger.error("Exception getting vm status: %s", str(e), exc_info=True) - instance_dict['vim_info'] = str(instance) + instance_dict["vim_info"] = str(instance) + instances[instance.id] = instance_dict + return instances except Exception as e: self.logger.error("Exception getting vm status: %s", str(e), exc_info=True) @@ -810,6 +1009,7 @@ class vimconnector(vimconn.VimConnector): self.conn.terminate_instances(vm_id) elif "reboot" in action_dict: self.conn.reboot_instances(vm_id) + return None except Exception as e: self.format_vimconn_exception(e) diff --git a/RO-VIM-aws/setup.py b/RO-VIM-aws/setup.py index 12de7053..ea58fb4d 100644 --- a/RO-VIM-aws/setup.py +++ b/RO-VIM-aws/setup.py @@ -30,26 +30,31 @@ osm-ro pluging for aws VIM setup( name=_name, - description='OSM ro vim plugin for aws', + description="OSM ro vim plugin for aws", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='OSM_TECH@LIST.ETSI.ORG', - maintainer='ETSI OSM', - maintainer_email='OSM_TECH@LIST.ETSI.ORG', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="OSM_TECH@LIST.ETSI.ORG", + maintainer="ETSI OSM", + maintainer_email="OSM_TECH@LIST.ETSI.ORG", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ - "requests", "netaddr", "PyYAML", "boto", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "requests", + "netaddr", + "PyYAML", + "boto", + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rovim.plugins': ['rovim_aws = osm_rovim_aws.vimconn_aws:vimconnector'], + "osm_rovim.plugins": ["rovim_aws = osm_rovim_aws.vimconn_aws:vimconnector"], }, ) diff --git a/RO-VIM-aws/tox.ini b/RO-VIM-aws/tox.ini index 0c0e401a..5002bbd4 100644 --- a/RO-VIM-aws/tox.ini +++ b/RO-VIM-aws/tox.ini @@ -27,7 +27,7 @@ commands=python3 -m unittest discover -v basepython = python3 deps = flake8 commands = flake8 osm_rovim_aws --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py b/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py index f2b18052..485bf05d 100755 --- a/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py +++ b/RO-VIM-azure/osm_rovim_azure/vimconn_azure.py @@ -30,14 +30,16 @@ from msrest.exceptions import AuthenticationError import msrestazure.tools as azure_tools from requests.exceptions import ConnectionError -__author__ = 'Isabel Lloret, Sergio Gonzalez, Alfonso Tierno' -__date__ = '$18-apr-2019 23:59:59$' +__author__ = "Isabel Lloret, Sergio Gonzalez, Alfonso Tierno" +__date__ = "$18-apr-2019 23:59:59$" -if getenv('OSMRO_PDB_DEBUG'): +if getenv("OSMRO_PDB_DEBUG"): import sys + print(sys.path) import pdb + pdb.set_trace() @@ -52,7 +54,7 @@ class vimconnector(vimconn.VimConnector): "Updating": "BUILD", "Deleting": "INACTIVE", "Succeeded": "ACTIVE", - "Failed": "ERROR" + "Failed": "ERROR", } # Translate azure power state to OSM provision state @@ -63,13 +65,25 @@ class vimconnector(vimconn.VimConnector): "stopped": "INACTIVE", "unknown": "OTHER", "deallocated": "BUILD", - "deallocating": "BUILD" + "deallocating": "BUILD", } AZURE_ZONES = ["1", "2", "3"] - def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None, - config={}, persistent_info={}): + def __init__( + self, + uuid, + name, + tenant_id, + tenant_name, + url, + url_admin=None, + user=None, + passwd=None, + log_level=None, + config={}, + persistent_info={}, + ): """ Constructor of VIM. Raise an exception is some needed parameter is missing, but it must not do any connectivity checking against the VIM @@ -84,74 +98,97 @@ class vimconnector(vimconn.VimConnector): "^((?!Standard_B).)*$" will filter out Standard_B range that is cheap but is very overused "^Standard_B" will select a serie B maybe for test environment """ - - vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, - config, persistent_info) + vimconn.VimConnector.__init__( + self, + uuid, + name, + tenant_id, + tenant_name, + url, + url_admin, + user, + passwd, + log_level, + config, + persistent_info, + ) # Variable that indicates if client must be reloaded or initialized self.reload_client = True self.vnet_address_space = None # LOGGER - self.logger = logging.getLogger('ro.vim.azure') + self.logger = logging.getLogger("ro.vim.azure") + if log_level: logging.basicConfig() self.logger.setLevel(getattr(logging, log_level)) - self.tenant = (tenant_id or tenant_name) + self.tenant = tenant_id or tenant_name # Store config to create azure subscription later self._config = { "user": user, "passwd": passwd, - "tenant": tenant_id or tenant_name + "tenant": tenant_id or tenant_name, } # SUBSCRIPTION - if 'subscription_id' in config: - self._config["subscription_id"] = config.get('subscription_id') - # self.logger.debug('Setting subscription to: %s', self.config["subscription_id"]) + if "subscription_id" in config: + self._config["subscription_id"] = config.get("subscription_id") + # self.logger.debug("Setting subscription to: %s", self.config["subscription_id"]) else: - raise vimconn.VimConnException('Subscription not specified') + raise vimconn.VimConnException("Subscription not specified") # REGION - if 'region_name' in config: - self.region = config.get('region_name') + if "region_name" in config: + self.region = config.get("region_name") else: - raise vimconn.VimConnException('Azure region_name is not specified at config') + raise vimconn.VimConnException( + "Azure region_name is not specified at config" + ) # RESOURCE_GROUP - if 'resource_group' in config: - self.resource_group = config.get('resource_group') + if "resource_group" in config: + self.resource_group = config.get("resource_group") else: - raise vimconn.VimConnException('Azure resource_group is not specified at config') + raise vimconn.VimConnException( + "Azure resource_group is not specified at config" + ) # VNET_NAME - if 'vnet_name' in config: + if "vnet_name" in config: self.vnet_name = config["vnet_name"] - + # public ssh key - self.pub_key = config.get('pub_key') + self.pub_key = config.get("pub_key") # flavor pattern regex - if 'flavors_pattern' in config: - self._config['flavors_pattern'] = config['flavors_pattern'] - + if "flavors_pattern" in config: + self._config["flavors_pattern"] = config["flavors_pattern"] + def _reload_connection(self): """ Called before any operation, checks python azure clients """ if self.reload_client: - self.logger.debug('reloading azure client') + self.logger.debug("reloading azure client") + try: self.credentials = ServicePrincipalCredentials( client_id=self._config["user"], secret=self._config["passwd"], - tenant=self._config["tenant"] + tenant=self._config["tenant"], + ) + self.conn = ResourceManagementClient( + self.credentials, self._config["subscription_id"] + ) + self.conn_compute = ComputeManagementClient( + self.credentials, self._config["subscription_id"] + ) + self.conn_vnet = NetworkManagementClient( + self.credentials, self._config["subscription_id"] ) - self.conn = ResourceManagementClient(self.credentials, self._config["subscription_id"]) - self.conn_compute = ComputeManagementClient(self.credentials, self._config["subscription_id"]) - self.conn_vnet = NetworkManagementClient(self.credentials, self._config["subscription_id"]) self._check_or_create_resource_group() self._check_or_create_vnet() @@ -165,44 +202,62 @@ class vimconnector(vimconn.VimConnector): Obtains resource_name from the azure complete identifier: resource_name will always be last item """ try: - resource = str(resource_id.split('/')[-1]) + resource = str(resource_id.split("/")[-1]) + return resource except Exception as e: - raise vimconn.VimConnException("Unable to get resource name from resource_id '{}' Error: '{}'". - format(resource_id, e)) + raise vimconn.VimConnException( + "Unable to get resource name from resource_id '{}' Error: '{}'".format( + resource_id, e + ) + ) def _get_location_from_resource_group(self, resource_group_name): try: location = self.conn.resource_groups.get(resource_group_name).location + return location except Exception: - raise vimconn.VimConnNotFoundException("Location '{}' not found".format(resource_group_name)) + raise vimconn.VimConnNotFoundException( + "Location '{}' not found".format(resource_group_name) + ) def _get_resource_group_name_from_resource_id(self, resource_id): - try: - rg = str(resource_id.split('/')[4]) + rg = str(resource_id.split("/")[4]) + return rg except Exception: - raise vimconn.VimConnException("Unable to get resource group from invalid resource_id format '{}'". - format(resource_id)) + raise vimconn.VimConnException( + "Unable to get resource group from invalid resource_id format '{}'".format( + resource_id + ) + ) def _get_net_name_from_resource_id(self, resource_id): - try: - net_name = str(resource_id.split('/')[8]) + net_name = str(resource_id.split("/")[8]) + return net_name except Exception: - raise vimconn.VimConnException("Unable to get azure net_name from invalid resource_id format '{}'". - format(resource_id)) + raise vimconn.VimConnException( + "Unable to get azure net_name from invalid resource_id format '{}'".format( + resource_id + ) + ) def _check_subnets_for_vm(self, net_list): # All subnets must belong to the same resource group and vnet - rg_vnet = set(self._get_resource_group_name_from_resource_id(net['net_id']) + - self._get_net_name_from_resource_id(net['net_id']) for net in net_list) + rg_vnet = set( + self._get_resource_group_name_from_resource_id(net["net_id"]) + + self._get_net_name_from_resource_id(net["net_id"]) + for net in net_list + ) if len(rg_vnet) != 1: - raise self._format_vimconn_exception('Azure VMs can only attach to subnets in same VNET') + raise self._format_vimconn_exception( + "Azure VMs can only attach to subnets in same VNET" + ) def _format_vimconn_exception(self, e): """ @@ -211,13 +266,14 @@ class vimconnector(vimconn.VimConnector): if isinstance(e, vimconn.VimConnException): raise elif isinstance(e, AuthenticationError): - raise vimconn.VimConnAuthException(type(e).__name__ + ': ' + str(e)) + raise vimconn.VimConnAuthException(type(e).__name__ + ": " + str(e)) elif isinstance(e, ConnectionError): - raise vimconn.VimConnConnectionException(type(e).__name__ + ': ' + str(e)) + raise vimconn.VimConnConnectionException(type(e).__name__ + ": " + str(e)) else: # In case of generic error recreate client self.reload_client = True - raise vimconn.VimConnException(type(e).__name__ + ': ' + str(e)) + + raise vimconn.VimConnException(type(e).__name__ + ": " + str(e)) def _check_or_create_resource_group(self): """ @@ -225,9 +281,12 @@ class vimconnector(vimconn.VimConnector): """ try: rg_exists = self.conn.resource_groups.check_existence(self.resource_group) + if not rg_exists: self.logger.debug("create base rgroup: %s", self.resource_group) - self.conn.resource_groups.create_or_update(self.resource_group, {'location': self.region}) + self.conn.resource_groups.create_or_update( + self.resource_group, {"location": self.region} + ) except Exception as e: self._format_vimconn_exception(e) @@ -236,9 +295,12 @@ class vimconnector(vimconn.VimConnector): Try to get existent base vnet, in case it does not exist it creates it """ try: - vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name) + vnet = self.conn_vnet.virtual_networks.get( + self.resource_group, self.vnet_name + ) self.vnet_address_space = vnet.address_space.address_prefixes[0] self.vnet_id = vnet.id + return except CloudError as e: if e.error.error and "notfound" in e.error.error.lower(): @@ -250,21 +312,30 @@ class vimconnector(vimconn.VimConnector): # if it does not exist, create it try: vnet_params = { - 'location': self.region, - 'address_space': { - 'address_prefixes': ["10.0.0.0/8"] - }, + "location": self.region, + "address_space": {"address_prefixes": ["10.0.0.0/8"]}, } self.vnet_address_space = "10.0.0.0/8" self.logger.debug("create base vnet: %s", self.vnet_name) - self.conn_vnet.virtual_networks.create_or_update(self.resource_group, self.vnet_name, vnet_params) - vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name) + self.conn_vnet.virtual_networks.create_or_update( + self.resource_group, self.vnet_name, vnet_params + ) + vnet = self.conn_vnet.virtual_networks.get( + self.resource_group, self.vnet_name + ) self.vnet_id = vnet.id except Exception as e: self._format_vimconn_exception(e) - def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): + def new_network( + self, + net_name, + net_type, + ip_profile=None, + shared=False, + provider_network_profile=None, + ): """ Adds a tenant network to VIM :param net_name: name of the network @@ -297,7 +368,7 @@ class vimconnector(vimconn.VimConnector): otherwise it creates a subnet in the indicated address :return: a tuple with the network identifier and created_items, or raises an exception on error """ - self.logger.debug('create subnet name %s, ip_profile %s', net_name, ip_profile) + self.logger.debug("create subnet name %s, ip_profile %s", net_name, ip_profile) self._reload_connection() if ip_profile is None: @@ -306,32 +377,35 @@ class vimconnector(vimconn.VimConnector): for ip_range in netaddr.IPNetwork(self.vnet_address_space).subnet(24): for used_subnet in used_subnets: subnet_range = netaddr.IPNetwork(used_subnet["cidr_block"]) + if subnet_range in ip_range or ip_range in subnet_range: # this range overlaps with an existing subnet ip range. Breaks and look for another break else: ip_profile = {"subnet_address": str(ip_range)} - self.logger.debug('dinamically obtained ip_profile: %s', ip_range) + self.logger.debug("dinamically obtained ip_profile: %s", ip_range) break else: - raise vimconn.VimConnException("Cannot find a non-used subnet range in {}". - format(self.vnet_address_space)) + raise vimconn.VimConnException( + "Cannot find a non-used subnet range in {}".format( + self.vnet_address_space + ) + ) else: - ip_profile = {"subnet_address": ip_profile['subnet_address']} + ip_profile = {"subnet_address": ip_profile["subnet_address"]} try: # subnet_name = "{}-{}".format(net_name[:24], uuid4()) - subnet_params = { - 'address_prefix': ip_profile['subnet_address'] - } + subnet_params = {"address_prefix": ip_profile["subnet_address"]} # Assign a not duplicated net name subnet_name = self._get_unused_subnet_name(net_name) - self.logger.debug('creating subnet_name: {}'.format(subnet_name)) - async_creation = self.conn_vnet.subnets.create_or_update(self.resource_group, self.vnet_name, - subnet_name, subnet_params) + self.logger.debug("creating subnet_name: {}".format(subnet_name)) + async_creation = self.conn_vnet.subnets.create_or_update( + self.resource_group, self.vnet_name, subnet_name, subnet_params + ) async_creation.wait() - self.logger.debug('created subnet_name: {}'.format(subnet_name)) + self.logger.debug("created subnet_name: {}".format(subnet_name)) return "{}/subnets/{}".format(self.vnet_id, subnet_name), None except Exception as e: @@ -344,7 +418,9 @@ class vimconnector(vimconn.VimConnector): """ all_subnets = self.conn_vnet.subnets.list(self.resource_group, self.vnet_name) # Filter to subnets starting with the indicated name - subnets = list(filter(lambda subnet: (subnet.name.startswith(subnet_name)), all_subnets)) + subnets = list( + filter(lambda subnet: (subnet.name.startswith(subnet_name)), all_subnets) + ) net_names = [str(subnet.name) for subnet in subnets] # get the name with the first not used suffix @@ -354,59 +430,63 @@ class vimconnector(vimconn.VimConnector): while name in net_names: name_suffix += 1 name = subnet_name + "-" + str(name_suffix) + return name def _create_nic(self, net, nic_name, static_ip=None, created_items={}): - - self.logger.debug('create nic name %s, net_name %s', nic_name, net) + self.logger.debug("create nic name %s, net_name %s", nic_name, net) self._reload_connection() - subnet_id = net['net_id'] + subnet_id = net["net_id"] location = self._get_location_from_resource_group(self.resource_group) try: - net_ifz = {'location': location} - net_ip_config = {'name': nic_name + '-ipconfiguration', 'subnet': {'id': subnet_id}} + net_ifz = {"location": location} + net_ip_config = { + "name": nic_name + "-ipconfiguration", + "subnet": {"id": subnet_id}, + } + if static_ip: - net_ip_config['privateIPAddress'] = static_ip - net_ip_config['privateIPAllocationMethod'] = 'Static' - net_ifz['ip_configurations'] = [net_ip_config] - mac_address = net.get('mac_address') + net_ip_config["privateIPAddress"] = static_ip + net_ip_config["privateIPAllocationMethod"] = "Static" + + net_ifz["ip_configurations"] = [net_ip_config] + mac_address = net.get("mac_address") + if mac_address: - net_ifz['mac_address'] = mac_address + net_ifz["mac_address"] = mac_address - async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(self.resource_group, nic_name, - net_ifz) + async_nic_creation = self.conn_vnet.network_interfaces.create_or_update( + self.resource_group, nic_name, net_ifz + ) nic_data = async_nic_creation.result() created_items[nic_data.id] = True - self.logger.debug('created nic name %s', nic_name) + self.logger.debug("created nic name %s", nic_name) - public_ip = net.get('floating_ip') + public_ip = net.get("floating_ip") if public_ip: public_ip_address_params = { - 'location': location, - 'public_ip_allocation_method': 'Dynamic' + "location": location, + "public_ip_allocation_method": "Dynamic", } - public_ip_name = nic_name + '-public-ip' + public_ip_name = nic_name + "-public-ip" async_public_ip = self.conn_vnet.public_ip_addresses.create_or_update( - self.resource_group, - public_ip_name, - public_ip_address_params + self.resource_group, public_ip_name, public_ip_address_params ) public_ip = async_public_ip.result() - self.logger.debug('created public IP: {}'.format(public_ip)) + self.logger.debug("created public IP: {}".format(public_ip)) # Associate NIC to Public IP nic_data = self.conn_vnet.network_interfaces.get( - self.resource_group, - nic_name) + self.resource_group, nic_name + ) nic_data.ip_configurations[0].public_ip_address = public_ip created_items[public_ip.id] = True self.conn_vnet.network_interfaces.create_or_update( - self.resource_group, - nic_name, - nic_data) + self.resource_group, nic_name, nic_data + ) except Exception as e: self._format_vimconn_exception(e) @@ -417,25 +497,33 @@ class vimconnector(vimconn.VimConnector): """ It is not allowed to create new flavors in Azure, must always use an existing one """ - raise vimconn.VimConnAuthException("It is not possible to create new flavors in AZURE") + raise vimconn.VimConnAuthException( + "It is not possible to create new flavors in AZURE" + ) def new_tenant(self, tenant_name, tenant_description): """ It is not allowed to create new tenants in azure """ - raise vimconn.VimConnAuthException("It is not possible to create a TENANT in AZURE") + raise vimconn.VimConnAuthException( + "It is not possible to create a TENANT in AZURE" + ) def new_image(self, image_dict): """ It is not allowed to create new images in Azure, must always use an existing one """ - raise vimconn.VimConnAuthException("It is not possible to create new images in AZURE") + raise vimconn.VimConnAuthException( + "It is not possible to create new images in AZURE" + ) def get_image_id_from_path(self, path): """Get the image id from image path in the VIM database. - Returns the image_id or raises a vimconnNotFoundException + Returns the image_id or raises a vimconnNotFoundException """ - raise vimconn.VimConnAuthException("It is not possible to obtain image from path in AZURE") + raise vimconn.VimConnAuthException( + "It is not possible to obtain image from path in AZURE" + ) def get_image_list(self, filter_dict={}): """Obtain tenant images from VIM @@ -448,36 +536,44 @@ class vimconnector(vimconn.VimConnector): [{}, ...] List can be empty """ - self.logger.debug("get_image_list filter {}".format(filter_dict)) self._reload_connection() try: image_list = [] if filter_dict.get("name"): - # name will have the format 'publisher:offer:sku:version' + # name will have the format "publisher:offer:sku:version" # publisher is required, offer sku and version will be searched if not provided params = filter_dict["name"].split(":") publisher = params[0] if publisher: # obtain offer list offer_list = self._get_offer_list(params, publisher) + for offer in offer_list: # obtain skus sku_list = self._get_sku_list(params, publisher, offer) + for sku in sku_list: # if version is defined get directly version, else list images if len(params) == 4 and params[3]: version = params[3] - image_list = self._get_version_image_list(publisher, offer, sku, version) + image_list = self._get_version_image_list( + publisher, offer, sku, version + ) else: - image_list = self._get_sku_image_list(publisher, offer, sku) + image_list = self._get_sku_image_list( + publisher, offer, sku + ) else: raise vimconn.VimConnAuthException( - "List images in Azure must include name param with at least publisher") + "List images in Azure must include name param with at least publisher" + ) else: - raise vimconn.VimConnAuthException("List images in Azure must include name param with at" - " least publisher") + raise vimconn.VimConnAuthException( + "List images in Azure must include name param with at" + " least publisher" + ) return image_list except Exception as e: @@ -492,11 +588,19 @@ class vimconnector(vimconn.VimConnector): else: try: # get list of offers from azure - result_offers = self.conn_compute.virtual_machine_images.list_offers(self.region, publisher) + result_offers = self.conn_compute.virtual_machine_images.list_offers( + self.region, publisher + ) + return [offer.name for offer in result_offers] except CloudError as e: # azure raises CloudError when not found - self.logger.info("error listing offers for publisher {}, Error: {}".format(publisher, e)) + self.logger.info( + "error listing offers for publisher {}, Error: {}".format( + publisher, e + ) + ) + return [] def _get_sku_list(self, params, publisher, offer): @@ -508,11 +612,19 @@ class vimconnector(vimconn.VimConnector): else: try: # get list of skus from azure - result_skus = self.conn_compute.virtual_machine_images.list_skus(self.region, publisher, offer) + result_skus = self.conn_compute.virtual_machine_images.list_skus( + self.region, publisher, offer + ) + return [sku.name for sku in result_skus] except CloudError as e: # azure raises CloudError when not found - self.logger.info("error listing skus for publisher {}, offer {}, Error: {}".format(publisher, offer, e)) + self.logger.info( + "error listing skus for publisher {}, offer {}, Error: {}".format( + publisher, offer, e + ) + ) + return [] def _get_sku_image_list(self, publisher, offer, sku): @@ -521,32 +633,49 @@ class vimconnector(vimconn.VimConnector): """ image_list = [] try: - result_images = self.conn_compute.virtual_machine_images.list(self.region, publisher, offer, sku) + result_images = self.conn_compute.virtual_machine_images.list( + self.region, publisher, offer, sku + ) for result_image in result_images: - image_list.append({ - 'id': str(result_image.id), - 'name': ":".join([publisher, offer, sku, result_image.name]) - }) + image_list.append( + { + "id": str(result_image.id), + "name": ":".join([publisher, offer, sku, result_image.name]), + } + ) except CloudError as e: self.logger.info( - "error listing skus for publisher {}, offer {}, Error: {}".format(publisher, offer, e)) + "error listing skus for publisher {}, offer {}, Error: {}".format( + publisher, offer, e + ) + ) image_list = [] + return image_list def _get_version_image_list(self, publisher, offer, sku, version): image_list = [] try: - result_image = self.conn_compute.virtual_machine_images.get(self.region, publisher, offer, sku, version) + result_image = self.conn_compute.virtual_machine_images.get( + self.region, publisher, offer, sku, version + ) + if result_image: - image_list.append({ - 'id': str(result_image.id), - 'name': ":".join([publisher, offer, sku, version]) - }) + image_list.append( + { + "id": str(result_image.id), + "name": ":".join([publisher, offer, sku, version]), + } + ) except CloudError as e: # azure gives CloudError when not found - self.logger.info("error listing images for publisher {}, offer {}, sku {}, version {} Error: {}". - format(publisher, offer, sku, version, e)) + self.logger.info( + "error listing images for publisher {}, offer {}, sku {}, version {} Error: {}".format( + publisher, offer, sku, version, e + ) + ) image_list = [] + return image_list def get_network_list(self, filter_dict={}): @@ -560,44 +689,68 @@ class vimconnector(vimconn.VimConnector): status: 'ACTIVE', not implemented in Azure # Returns the network list of dictionaries """ - # self.logger.debug('getting network list for vim, filter %s', filter_dict) + # self.logger.debug("getting network list for vim, filter %s", filter_dict) try: self._reload_connection() - vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name) + vnet = self.conn_vnet.virtual_networks.get( + self.resource_group, self.vnet_name + ) subnet_list = [] for subnet in vnet.subnets: if filter_dict: if filter_dict.get("id") and str(subnet.id) != filter_dict["id"]: continue - if filter_dict.get("name") and \ - str(subnet.name) != filter_dict["name"]: + + if ( + filter_dict.get("name") + and str(subnet.name) != filter_dict["name"] + ): continue name = self._get_resource_name_from_resource_id(subnet.id) - subnet_list.append({ - 'id': str(subnet.id), - 'name': name, - 'status': self.provision_state2osm[subnet.provisioning_state], - 'cidr_block': str(subnet.address_prefix), - 'type': 'bridge', - 'shared': False - }) + subnet_list.append( + { + "id": str(subnet.id), + "name": name, + "status": self.provision_state2osm[subnet.provisioning_state], + "cidr_block": str(subnet.address_prefix), + "type": "bridge", + "shared": False, + } + ) return subnet_list except Exception as e: self._format_vimconn_exception(e) - def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, - disk_list=None, availability_zone_index=None, availability_zone_list=None): - - self.logger.debug("new vm instance name: %s, image_id: %s, flavor_id: %s, net_list: %s, cloud_config: %s, " - "disk_list: %s, availability_zone_index: %s, availability_zone_list: %s", - name, image_id, flavor_id, net_list, cloud_config, disk_list, - availability_zone_index, availability_zone_list) - + def new_vminstance( + self, + name, + description, + start, + image_id, + flavor_id, + net_list, + cloud_config=None, + disk_list=None, + availability_zone_index=None, + availability_zone_list=None, + ): + self.logger.debug( + "new vm instance name: %s, image_id: %s, flavor_id: %s, net_list: %s, cloud_config: %s, " + "disk_list: %s, availability_zone_index: %s, availability_zone_list: %s", + name, + image_id, + flavor_id, + net_list, + cloud_config, + disk_list, + availability_zone_index, + availability_zone_list, + ) self._reload_connection() # Validate input data is valid @@ -609,7 +762,9 @@ class vimconnector(vimconn.VimConnector): # At least one network must be provided if not net_list: - raise vimconn.VimConnException("At least one net must be provided to create a new VM") + raise vimconn.VimConnException( + "At least one net must be provided to create a new VM" + ) # image_id are several fields of the image_id image_reference = self._get_image_reference(image_id) @@ -621,20 +776,25 @@ class vimconnector(vimconn.VimConnector): # Create nics for each subnet self._check_subnets_for_vm(net_list) vm_nics = [] + for idx, net in enumerate(net_list): # Fault with subnet_id - # subnet_id=net['subnet_id'] - # subnet_id=net['net_id'] - nic_name = vm_name + '-nic-' + str(idx) - vm_nic, nic_items = self._create_nic(net, nic_name, net.get('ip_address'), created_items) - vm_nics.append({'id': str(vm_nic.id)}) - net['vim_id'] = vm_nic.id + # subnet_id=net["subnet_id"] + # subnet_id=net["net_id"] + nic_name = vm_name + "-nic-" + str(idx) + vm_nic, nic_items = self._create_nic( + net, nic_name, net.get("ip_address"), created_items + ) + vm_nics.append({"id": str(vm_nic.id)}) + net["vim_id"] = vm_nic.id # cloud-init configuration # cloud config if cloud_config: config_drive, userdata = self._create_user_data(cloud_config) - custom_data = base64.b64encode(userdata.encode('utf-8')).decode('latin-1') + custom_data = base64.b64encode(userdata.encode("utf-8")).decode( + "latin-1" + ) key_data = None key_pairs = cloud_config.get("key-pairs") if key_pairs: @@ -646,35 +806,35 @@ class vimconnector(vimconn.VimConnector): user_name = "osm" # DEFAULT USER IS OSM os_profile = { - 'computer_name': vm_name, - 'admin_username': user_name, - 'linux_configuration': { + "computer_name": vm_name, + "admin_username": user_name, + "linux_configuration": { "disable_password_authentication": True, "ssh": { - "public_keys": [{ - "path": "/home/{}/.ssh/authorized_keys".format(user_name), - "key_data": key_data - }] - } + "public_keys": [ + { + "path": "/home/{}/.ssh/authorized_keys".format( + user_name + ), + "key_data": key_data, + } + ] + }, }, - 'custom_data': custom_data + "custom_data": custom_data, } else: os_profile = { - 'computer_name': vm_name, - 'admin_username': 'osm', - 'admin_password': 'Osm4u!', + "computer_name": vm_name, + "admin_username": "osm", + "admin_password": "Osm4u!", } vm_parameters = { - 'location': self.region, - 'os_profile': os_profile, - 'hardware_profile': { - 'vm_size': flavor_id - }, - 'storage_profile': { - 'image_reference': image_reference - } + "location": self.region, + "os_profile": os_profile, + "hardware_profile": {"vm_size": flavor_id}, + "storage_profile": {"image_reference": image_reference}, } # If the machine has several networks one must be marked as primary @@ -682,22 +842,20 @@ class vimconnector(vimconn.VimConnector): if len(vm_nics) > 1: for idx, vm_nic in enumerate(vm_nics): if idx == 0: - vm_nics[0]['Primary'] = True + vm_nics[0]["Primary"] = True else: - vm_nics[idx]['Primary'] = False + vm_nics[idx]["Primary"] = False - vm_parameters['network_profile'] = {'network_interfaces': vm_nics} + vm_parameters["network_profile"] = {"network_interfaces": vm_nics} # Obtain zone information vm_zone = self._get_vm_zone(availability_zone_index, availability_zone_list) if vm_zone: - vm_parameters['zones'] = [vm_zone] + vm_parameters["zones"] = [vm_zone] self.logger.debug("create vm name: %s", vm_name) creation_result = self.conn_compute.virtual_machines.create_or_update( - self.resource_group, - vm_name, - vm_parameters + self.resource_group, vm_name, vm_parameters ) virtual_machine = creation_result.result() self.logger.debug("created vm name: %s", vm_name) @@ -705,35 +863,41 @@ class vimconnector(vimconn.VimConnector): # Add disks if they are provided if disk_list: for disk_index, disk in enumerate(disk_list): - self.logger.debug("add disk size: %s, image: %s", disk.get("size"), disk.get("image")) - self._add_newvm_disk(virtual_machine, vm_name, disk_index, disk, created_items) + self.logger.debug( + "add disk size: %s, image: %s", + disk.get("size"), + disk.get("image"), + ) + self._add_newvm_disk( + virtual_machine, vm_name, disk_index, disk, created_items + ) if start: - self.conn_compute.virtual_machines.start( - self.resource_group, - vm_name) + self.conn_compute.virtual_machines.start(self.resource_group, vm_name) # start_result.wait() return virtual_machine.id, created_items - + # run_command_parameters = { - # 'command_id': 'RunShellScript', # For linux, don't change it - # 'script': [ - # 'date > /tmp/test.txt' + # "command_id": "RunShellScript", # For linux, don't change it + # "script": [ + # "date > /tmp/test.txt" # ] # } except Exception as e: # Rollback vm creacion vm_id = None + if virtual_machine: vm_id = virtual_machine.id + try: self.logger.debug("exception creating vm try to rollback") self.delete_vminstance(vm_id, created_items) except Exception as e2: self.logger.error("new_vminstance rollback fail {}".format(e2)) - self.logger.debug('Exception creating new vminstance: %s', e, exc_info=True) + self.logger.debug("Exception creating new vminstance: %s", e, exc_info=True) self._format_vimconn_exception(e) def _get_unused_vm_name(self, vm_name): @@ -750,21 +914,25 @@ class vimconnector(vimconn.VimConnector): name_suffix = 0 # name = subnet_name + "-" + str(name_suffix) name = vm_name # first subnet created will have no prefix + while name in vm_names: name_suffix += 1 name = vm_name + "-" + str(name_suffix) + return name def _get_vm_zone(self, availability_zone_index, availability_zone_list): - if availability_zone_index is None: return None vim_availability_zones = self._get_azure_availability_zones() # check if VIM offer enough availability zones describe in the VNFD - if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones): + if vim_availability_zones and len(availability_zone_list) <= len( + vim_availability_zones + ): # check if all the names of NFV AV match VIM AV names match_by_index = False + if not availability_zone_list: match_by_index = True else: @@ -772,25 +940,29 @@ class vimconnector(vimconn.VimConnector): if av not in vim_availability_zones: match_by_index = True break + if match_by_index: return vim_availability_zones[availability_zone_index] else: return availability_zone_list[availability_zone_index] else: - raise vimconn.VimConnConflictException("No enough availability zones at VIM for this deployment") + raise vimconn.VimConnConflictException( + "No enough availability zones at VIM for this deployment" + ) def _get_azure_availability_zones(self): return self.AZURE_ZONES - def _add_newvm_disk(self, virtual_machine, vm_name, disk_index, disk, created_items={}): - + def _add_newvm_disk( + self, virtual_machine, vm_name, disk_index, disk, created_items={} + ): disk_name = None data_disk = None # Check if must create empty disk or from image - if disk.get('vim_id'): + if disk.get("vim_id"): # disk already exists, just get - parsed_id = azure_tools.parse_resource_id(disk.get('vim_id')) + parsed_id = azure_tools.parse_resource_id(disk.get("vim_id")) disk_name = parsed_id.get("name") data_disk = self.conn_compute.disks.get(self.resource_group, disk_name) else: @@ -801,72 +973,71 @@ class vimconnector(vimconn.VimConnector): self.resource_group, disk_name, { - 'location': self.region, - 'disk_size_gb': disk.get("size"), - 'creation_data': { - 'create_option': DiskCreateOption.empty - } - } + "location": self.region, + "disk_size_gb": disk.get("size"), + "creation_data": {"create_option": DiskCreateOption.empty}, + }, ) data_disk = async_disk_creation.result() created_items[data_disk.id] = True else: image_id = disk.get("image_id") + if azure_tools.is_valid_resource_id(image_id): parsed_id = azure_tools.parse_resource_id(image_id) # Check if image is snapshot or disk image_name = parsed_id.get("name") type = parsed_id.get("resource_type") - if type == 'snapshots' or type == 'disks': + if type == "snapshots" or type == "disks": self.logger.debug("create disk from copy name: %s", image_name) # ¿Should check that snapshot exists? async_disk_creation = self.conn_compute.disks.create_or_update( self.resource_group, disk_name, { - 'location': self.region, - 'creation_data': { - 'create_option': 'Copy', - 'source_uri': image_id - } - } + "location": self.region, + "creation_data": { + "create_option": "Copy", + "source_uri": image_id, + }, + }, ) data_disk = async_disk_creation.result() created_items[data_disk.id] = True - else: - raise vimconn.VimConnNotFoundException("Invalid image_id: %s ", image_id) + raise vimconn.VimConnNotFoundException( + "Invalid image_id: %s ", image_id + ) else: - raise vimconn.VimConnNotFoundException("Invalid image_id: %s ", image_id) + raise vimconn.VimConnNotFoundException( + "Invalid image_id: %s ", image_id + ) # Attach the disk created - virtual_machine.storage_profile.data_disks.append({ - 'lun': disk_index, - 'name': disk_name, - 'create_option': DiskCreateOption.attach, - 'managed_disk': { - 'id': data_disk.id - }, - 'disk_size_gb': disk.get('size') - }) + virtual_machine.storage_profile.data_disks.append( + { + "lun": disk_index, + "name": disk_name, + "create_option": DiskCreateOption.attach, + "managed_disk": {"id": data_disk.id}, + "disk_size_gb": disk.get("size"), + } + ) self.logger.debug("attach disk name: %s", disk_name) self.conn_compute.virtual_machines.create_or_update( - self.resource_group, - virtual_machine.name, - virtual_machine + self.resource_group, virtual_machine.name, virtual_machine ) # It is necesary extract from image_id data to create the VM with this format - # 'image_reference': { - # 'publisher': vm_reference['publisher'], - # 'offer': vm_reference['offer'], - # 'sku': vm_reference['sku'], - # 'version': vm_reference['version'] + # "image_reference": { + # "publisher": vm_reference["publisher"], + # "offer": vm_reference["offer"], + # "sku": vm_reference["sku"], + # "version": vm_reference["version"] # }, def _get_image_reference(self, image_id): - try: # The data input format example: # /Subscriptions/ca3d18ab-d373-4afb-a5d6-7c44f098d16a/Providers/Microsoft.Compute/Locations/westeurope/ @@ -874,27 +1045,29 @@ class vimconnector(vimconn.VimConnector): # Offers/UbuntuServer/ # Skus/18.04-LTS/ # Versions/18.04.201809110 - publisher = str(image_id.split('/')[8]) - offer = str(image_id.split('/')[12]) - sku = str(image_id.split('/')[14]) - version = str(image_id.split('/')[16]) + publisher = str(image_id.split("/")[8]) + offer = str(image_id.split("/")[12]) + sku = str(image_id.split("/")[14]) + version = str(image_id.split("/")[16]) return { - 'publisher': publisher, - 'offer': offer, - 'sku': sku, - 'version': version + "publisher": publisher, + "offer": offer, + "sku": sku, + "version": version, } except Exception: raise vimconn.VimConnException( - "Unable to get image_reference from invalid image_id format: '{}'".format(image_id)) + "Unable to get image_reference from invalid image_id format: '{}'".format( + image_id + ) + ) # Azure VM names can not have some special characters def _check_vm_name(self, vm_name): """ Checks vm name, in case the vm has not allowed characters they are removed, not error raised """ - chars_not_allowed_list = "~!@#$%^&*()=+_[]{}|;:<>/?." # First: the VM name max length is 64 characters @@ -904,60 +1077,77 @@ class vimconnector(vimconn.VimConnector): for elem in chars_not_allowed_list: # Check if string is in the main string if elem in vm_name_aux: - # self.logger.debug('Dentro del IF') + # self.logger.debug("Dentro del IF") # Replace the string - vm_name_aux = vm_name_aux.replace(elem, '-') + vm_name_aux = vm_name_aux.replace(elem, "-") return vm_name_aux def get_flavor_id_from_data(self, flavor_dict): - self.logger.debug("getting flavor id from data, flavor_dict: %s", flavor_dict) filter_dict = flavor_dict or {} + try: self._reload_connection() - vm_sizes_list = [vm_size.serialize() for vm_size in - self.conn_compute.virtual_machine_sizes.list(self.region)] + vm_sizes_list = [ + vm_size.serialize() + for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region) + ] - cpus = filter_dict.get('vcpus') or 0 - memMB = filter_dict.get('ram') or 0 + cpus = filter_dict.get("vcpus") or 0 + memMB = filter_dict.get("ram") or 0 # Filter if self._config.get("flavors_pattern"): - filtered_sizes = [size for size in vm_sizes_list if size['numberOfCores'] >= cpus and - size['memoryInMB'] >= memMB and - re.search(self._config.get("flavors_pattern"), size["name"])] + filtered_sizes = [ + size + for size in vm_sizes_list + if size["numberOfCores"] >= cpus + and size["memoryInMB"] >= memMB + and re.search(self._config.get("flavors_pattern"), size["name"]) + ] else: - filtered_sizes = [size for size in vm_sizes_list if size['numberOfCores'] >= cpus and - size['memoryInMB'] >= memMB] + filtered_sizes = [ + size + for size in vm_sizes_list + if size["numberOfCores"] >= cpus and size["memoryInMB"] >= memMB + ] # Sort - listedFilteredSizes = sorted(filtered_sizes, key=lambda k: (k['numberOfCores'], k['memoryInMB'], - k['resourceDiskSizeInMB'])) + listedFilteredSizes = sorted( + filtered_sizes, + key=lambda k: ( + k["numberOfCores"], + k["memoryInMB"], + k["resourceDiskSizeInMB"], + ), + ) if listedFilteredSizes: - return listedFilteredSizes[0]['name'] - raise vimconn.VimConnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict))) + return listedFilteredSizes[0]["name"] + raise vimconn.VimConnNotFoundException( + "Cannot find any flavor matching '{}'".format(str(flavor_dict)) + ) except Exception as e: self._format_vimconn_exception(e) def _get_flavor_id_from_flavor_name(self, flavor_name): - # self.logger.debug("getting flavor id from flavor name {}".format(flavor_name)) try: self._reload_connection() - vm_sizes_list = [vm_size.serialize() for vm_size in - self.conn_compute.virtual_machine_sizes.list(self.region)] + vm_sizes_list = [ + vm_size.serialize() + for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region) + ] output_flavor = None for size in vm_sizes_list: - if size['name'] == flavor_name: + if size["name"] == flavor_name: output_flavor = size # None is returned if not found anything return output_flavor - except Exception as e: self._format_vimconn_exception(e) @@ -966,52 +1156,63 @@ class vimconnector(vimconn.VimConnector): self._reload_connection() return True except Exception as e: - raise vimconn.VimConnException("Connectivity issue with Azure API: {}".format(e)) + raise vimconn.VimConnException( + "Connectivity issue with Azure API: {}".format(e) + ) def get_network(self, net_id): - - # self.logger.debug('get network id: {}'.format(net_id)) + # self.logger.debug("get network id: {}".format(net_id)) # res_name = self._get_resource_name_from_resource_id(net_id) self._reload_connection() - filter_dict = {'name': net_id} + filter_dict = {"name": net_id} network_list = self.get_network_list(filter_dict) if not network_list: - raise vimconn.VimConnNotFoundException("network '{}' not found".format(net_id)) + raise vimconn.VimConnNotFoundException( + "network '{}' not found".format(net_id) + ) else: return network_list[0] def delete_network(self, net_id, created_items=None): - - self.logger.debug('deleting network {} - {}'.format(self.resource_group, net_id)) + self.logger.debug( + "deleting network {} - {}".format(self.resource_group, net_id) + ) self._reload_connection() res_name = self._get_resource_name_from_resource_id(net_id) - filter_dict = {'name': res_name} + filter_dict = {"name": res_name} network_list = self.get_network_list(filter_dict) if not network_list: - raise vimconn.VimConnNotFoundException("network '{}' not found".format(net_id)) + raise vimconn.VimConnNotFoundException( + "network '{}' not found".format(net_id) + ) try: # Subnet API fails (CloudError: Azure Error: ResourceNotFound) # Put the initial virtual_network API - async_delete = self.conn_vnet.subnets.delete(self.resource_group, self.vnet_name, res_name) + async_delete = self.conn_vnet.subnets.delete( + self.resource_group, self.vnet_name, res_name + ) async_delete.wait() return net_id except CloudError as e: if e.error.error and "notfound" in e.error.error.lower(): - raise vimconn.VimConnNotFoundException("network '{}' not found".format(net_id)) + raise vimconn.VimConnNotFoundException( + "network '{}' not found".format(net_id) + ) else: self._format_vimconn_exception(e) except Exception as e: self._format_vimconn_exception(e) def delete_vminstance(self, vm_id, created_items=None): - """ Deletes a vm instance from the vim. - """ - self.logger.debug('deleting VM instance {} - {}'.format(self.resource_group, vm_id)) + """Deletes a vm instance from the vim.""" + self.logger.debug( + "deleting VM instance {} - {}".format(self.resource_group, vm_id) + ) self._reload_connection() created_items = created_items or {} @@ -1019,62 +1220,79 @@ class vimconnector(vimconn.VimConnector): # Check vm exists, we can call delete_vm to clean created_items if vm_id: res_name = self._get_resource_name_from_resource_id(vm_id) - vm = self.conn_compute.virtual_machines.get(self.resource_group, res_name) + vm = self.conn_compute.virtual_machines.get( + self.resource_group, res_name + ) # Shuts down the virtual machine and releases the compute resources # vm_stop = self.conn_compute.virtual_machines.power_off(self.resource_group, resName) # vm_stop.wait() - vm_delete = self.conn_compute.virtual_machines.delete(self.resource_group, res_name) + vm_delete = self.conn_compute.virtual_machines.delete( + self.resource_group, res_name + ) vm_delete.wait() - self.logger.debug('deleted VM name: %s', res_name) + self.logger.debug("deleted VM name: %s", res_name) # Delete OS Disk os_disk_name = vm.storage_profile.os_disk.name - self.logger.debug('delete OS DISK: %s', os_disk_name) - async_disk_delete = self.conn_compute.disks.delete(self.resource_group, os_disk_name) + self.logger.debug("delete OS DISK: %s", os_disk_name) + async_disk_delete = self.conn_compute.disks.delete( + self.resource_group, os_disk_name + ) async_disk_delete.wait() # os disks are created always with the machine - self.logger.debug('deleted OS DISK name: %s', os_disk_name) + self.logger.debug("deleted OS DISK name: %s", os_disk_name) for data_disk in vm.storage_profile.data_disks: - self.logger.debug('delete data_disk: %s', data_disk.name) - async_disk_delete = self.conn_compute.disks.delete(self.resource_group, data_disk.name) + self.logger.debug("delete data_disk: %s", data_disk.name) + async_disk_delete = self.conn_compute.disks.delete( + self.resource_group, data_disk.name + ) async_disk_delete.wait() self._markdel_created_item(data_disk.managed_disk.id, created_items) - self.logger.debug('deleted OS DISK name: %s', data_disk.name) + self.logger.debug("deleted OS DISK name: %s", data_disk.name) # After deleting VM, it is necessary to delete NIC, because if is not deleted delete_network # does not work because Azure says that is in use the subnet network_interfaces = vm.network_profile.network_interfaces for network_interface in network_interfaces: - - nic_name = self._get_resource_name_from_resource_id(network_interface.id) + nic_name = self._get_resource_name_from_resource_id( + network_interface.id + ) nic_data = self.conn_vnet.network_interfaces.get( - self.resource_group, - nic_name) + self.resource_group, nic_name + ) public_ip_name = None exist_public_ip = nic_data.ip_configurations[0].public_ip_address if exist_public_ip: - public_ip_id = nic_data.ip_configurations[0].public_ip_address.id + public_ip_id = nic_data.ip_configurations[ + 0 + ].public_ip_address.id # Delete public_ip - public_ip_name = self._get_resource_name_from_resource_id(public_ip_id) + public_ip_name = self._get_resource_name_from_resource_id( + public_ip_id + ) # Public ip must be deleted afterwards of nic that is attached - self.logger.debug('delete NIC name: %s', nic_name) - nic_delete = self.conn_vnet.network_interfaces.delete(self.resource_group, nic_name) + self.logger.debug("delete NIC name: %s", nic_name) + nic_delete = self.conn_vnet.network_interfaces.delete( + self.resource_group, nic_name + ) nic_delete.wait() self._markdel_created_item(network_interface.id, created_items) - self.logger.debug('deleted NIC name: %s', nic_name) + self.logger.debug("deleted NIC name: %s", nic_name) # Delete list of public ips if public_ip_name: - self.logger.debug('delete PUBLIC IP - ' + public_ip_name) - ip_delete = self.conn_vnet.public_ip_addresses.delete(self.resource_group, public_ip_name) + self.logger.debug("delete PUBLIC IP - " + public_ip_name) + ip_delete = self.conn_vnet.public_ip_addresses.delete( + self.resource_group, public_ip_name + ) ip_delete.wait() self._markdel_created_item(public_ip_id, created_items) @@ -1083,7 +1301,9 @@ class vimconnector(vimconn.VimConnector): except CloudError as e: if e.error.error and "notfound" in e.error.error.lower(): - raise vimconn.VimConnNotFoundException("No vm instance found '{}'".format(vm_id)) + raise vimconn.VimConnNotFoundException( + "No vm instance found '{}'".format(vm_id) + ) else: self._format_vimconn_exception(e) except Exception as e: @@ -1094,9 +1314,9 @@ class vimconnector(vimconn.VimConnector): created_items[item_id] = False def _delete_created_items(self, created_items): - """ Delete created_items elements that have not been deleted with the virtual machine - Created_items may not be deleted correctly with the created machine if the - virtual machine fails creating or in other cases of error + """Delete created_items elements that have not been deleted with the virtual machine + Created_items may not be deleted correctly with the created machine if the + virtual machine fails creating or in other cases of error """ self.logger.debug("Created items: %s", created_items) # Must delete in order first nics, then public_ips @@ -1109,7 +1329,6 @@ class vimconnector(vimconn.VimConnector): continue # self.logger.debug("Must delete item id: %s", item_id) - # Obtain type, supported nic, disk or public ip parsed_id = azure_tools.parse_resource_id(item_id) resource_type = parsed_id.get("resource_type") @@ -1126,47 +1345,67 @@ class vimconnector(vimconn.VimConnector): for item_name in nics_to_delete: try: self.logger.debug("deleting nic name %s:", item_name) - nic_delete = self.conn_vnet.network_interfaces.delete(self.resource_group, item_name) + nic_delete = self.conn_vnet.network_interfaces.delete( + self.resource_group, item_name + ) nic_delete.wait() self.logger.debug("deleted nic name %s:", item_name) except Exception as e: - self.logger.error("Error deleting item: {}: {}".format(type(e).__name__, e)) + self.logger.error( + "Error deleting item: {}: {}".format(type(e).__name__, e) + ) for item_name in publics_ip_to_delete: try: self.logger.debug("deleting public ip name %s:", item_name) - ip_delete = self.conn_vnet.public_ip_addresses.delete(self.resource_group, name) + ip_delete = self.conn_vnet.public_ip_addresses.delete( + self.resource_group, name + ) ip_delete.wait() self.logger.debug("deleted public ip name %s:", item_name) except Exception as e: - self.logger.error("Error deleting item: {}: {}".format(type(e).__name__, e)) + self.logger.error( + "Error deleting item: {}: {}".format(type(e).__name__, e) + ) for item_name in disks_to_delete: try: self.logger.debug("deleting data disk name %s:", name) - async_disk_delete = self.conn_compute.disks.delete(self.resource_group, item_name) + async_disk_delete = self.conn_compute.disks.delete( + self.resource_group, item_name + ) async_disk_delete.wait() self.logger.debug("deleted data disk name %s:", name) except Exception as e: - self.logger.error("Error deleting item: {}: {}".format(type(e).__name__, e)) + self.logger.error( + "Error deleting item: {}: {}".format(type(e).__name__, e) + ) def action_vminstance(self, vm_id, action_dict, created_items={}): """Send and action over a VM instance from VIM Returns the vm_id if the action was successfully sent to the VIM """ - self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict)) + try: self._reload_connection() resName = self._get_resource_name_from_resource_id(vm_id) + if "start" in action_dict: self.conn_compute.virtual_machines.start(self.resource_group, resName) - elif "stop" in action_dict or "shutdown" in action_dict or "shutoff" in action_dict: - self.conn_compute.virtual_machines.power_off(self.resource_group, resName) + elif ( + "stop" in action_dict + or "shutdown" in action_dict + or "shutoff" in action_dict + ): + self.conn_compute.virtual_machines.power_off( + self.resource_group, resName + ) elif "terminate" in action_dict: self.conn_compute.virtual_machines.delete(self.resource_group, resName) elif "reboot" in action_dict: self.conn_compute.virtual_machines.restart(self.resource_group, resName) + return None except CloudError as e: if e.error.error and "notfound" in e.error.error.lower(): @@ -1177,13 +1416,19 @@ class vimconnector(vimconn.VimConnector): self._format_vimconn_exception(e) def delete_flavor(self, flavor_id): - raise vimconn.VimConnAuthException("It is not possible to delete a FLAVOR in AZURE") + raise vimconn.VimConnAuthException( + "It is not possible to delete a FLAVOR in AZURE" + ) - def delete_tenant(self, tenant_id,): - raise vimconn.VimConnAuthException("It is not possible to delete a TENANT in AZURE") + def delete_tenant(self, tenant_id): + raise vimconn.VimConnAuthException( + "It is not possible to delete a TENANT in AZURE" + ) def delete_image(self, image_id): - raise vimconn.VimConnAuthException("It is not possible to delete a IMAGE in AZURE") + raise vimconn.VimConnAuthException( + "It is not possible to delete a IMAGE in AZURE" + ) def get_vminstance(self, vm_id): """ @@ -1196,7 +1441,9 @@ class vimconnector(vimconn.VimConnector): vm = self.conn_compute.virtual_machines.get(self.resource_group, resName) except CloudError as e: if e.error.error and "notfound" in e.error.error.lower(): - raise vimconn.VimConnNotFoundException("No vminstance found '{}'".format(vm_id)) + raise vimconn.VimConnNotFoundException( + "No vminstance found '{}'".format(vm_id) + ) else: self._format_vimconn_exception(e) except Exception as e: @@ -1211,32 +1458,43 @@ class vimconnector(vimconn.VimConnector): self._reload_connection() self.logger.debug("get flavor from id: %s", flavor_id) flavor_data = self._get_flavor_id_from_flavor_name(flavor_id) + if flavor_data: flavor = { - 'id': flavor_id, - 'name': flavor_id, - 'ram': flavor_data['memoryInMB'], - 'vcpus': flavor_data['numberOfCores'], - 'disk': flavor_data['resourceDiskSizeInMB']/1024 + "id": flavor_id, + "name": flavor_id, + "ram": flavor_data["memoryInMB"], + "vcpus": flavor_data["numberOfCores"], + "disk": flavor_data["resourceDiskSizeInMB"] / 1024, } + return flavor else: - raise vimconn.VimConnNotFoundException("flavor '{}' not found".format(flavor_id)) + raise vimconn.VimConnNotFoundException( + "flavor '{}' not found".format(flavor_id) + ) def get_tenant_list(self, filter_dict={}): - """ Obtains the list of tenants - For the azure connector only the azure tenant will be returned if it is compatible - with filter_dict + """Obtains the list of tenants + For the azure connector only the azure tenant will be returned if it is compatible + with filter_dict """ - tenants_azure = [{'name': self.tenant, 'id': self.tenant}] + tenants_azure = [{"name": self.tenant, "id": self.tenant}] tenant_list = [] self.logger.debug("get tenant list: %s", filter_dict) for tenant_azure in tenants_azure: if filter_dict: - if filter_dict.get("id") and str(tenant_azure.get("id")) != filter_dict["id"]: + if ( + filter_dict.get("id") + and str(tenant_azure.get("id")) != filter_dict["id"] + ): continue - if filter_dict.get("name") and str(tenant_azure.get("name")) != filter_dict["name"]: + + if ( + filter_dict.get("name") + and str(tenant_azure.get("name")) != filter_dict["name"] + ): continue tenant_list.append(tenant_azure) @@ -1245,22 +1503,20 @@ class vimconnector(vimconn.VimConnector): def refresh_nets_status(self, net_list): """Get the status of the networks - Params: the list of network identifiers - Returns a dictionary with: - net_id: #VIM id of this network - status: #Mandatory. Text with one of: - # DELETED (not found at vim) - # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) - # OTHER (Vim reported other status not understood) - # ERROR (VIM indicates an ERROR status) - # ACTIVE, INACTIVE, DOWN (admin down), - # BUILD (on building process) - # - error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR - vim_info: #Text with plain information obtained from vim (yaml.safe_dump) - + Params: the list of network identifiers + Returns a dictionary with: + net_id: #VIM id of this network + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, INACTIVE, DOWN (admin down), + # BUILD (on building process) + # + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) """ - out_nets = {} self._reload_connection() @@ -1274,37 +1530,45 @@ class vimconnector(vimconn.VimConnector): out_nets[net_id] = { "status": self.provision_state2osm[net.provisioning_state], - "vim_info": str(net) + "vim_info": str(net), } except CloudError as e: if e.error.error and "notfound" in e.error.error.lower(): - self.logger.info("Not found subnet net_name: %s, subnet_name: %s", netName, resName) - out_nets[net_id] = { - "status": "DELETED", - "error_msg": str(e) - } + self.logger.info( + "Not found subnet net_name: %s, subnet_name: %s", + netName, + resName, + ) + out_nets[net_id] = {"status": "DELETED", "error_msg": str(e)} else: - self.logger.error("CloudError Exception %s when searching subnet", e) + self.logger.error( + "CloudError Exception %s when searching subnet", e + ) out_nets[net_id] = { "status": "VIM_ERROR", - "error_msg": str(e) + "error_msg": str(e), } except vimconn.VimConnNotFoundException as e: - self.logger.error("VimConnNotFoundException %s when searching subnet", e) + self.logger.error( + "VimConnNotFoundException %s when searching subnet", e + ) out_nets[net_id] = { "status": "DELETED", - "error_msg": str(e) + "error_msg": str(e), } except Exception as e: - self.logger.error("Exception %s when searching subnet", e, exc_info=True) + self.logger.error( + "Exception %s when searching subnet", e, exc_info=True + ) out_nets[net_id] = { "status": "VIM_ERROR", - "error_msg": str(e) + "error_msg": str(e), } + return out_nets def refresh_vms_status(self, vm_list): - """ Get the status of the virtual machines and their interfaces/ports + """Get the status of the virtual machines and their interfaces/ports Params: the list of VM identifiers Returns a dictionary with: vm_id: # VIM id of this Virtual Machine @@ -1325,7 +1589,6 @@ class vimconnector(vimconn.VimConnector): mac_address - The MAC address of the interface. ip_address - The IP address of the interface within the subnet. """ - out_vms = {} self._reload_connection() @@ -1337,38 +1600,52 @@ class vimconnector(vimconn.VimConnector): try: res_name = self._get_resource_name_from_resource_id(vm_id) - vm = self.conn_compute.virtual_machines.get(self.resource_group, res_name) - out_vm['vim_info'] = str(vm) - out_vm['status'] = self.provision_state2osm.get(vm.provisioning_state, 'OTHER') - if vm.provisioning_state == 'Succeeded': + vm = self.conn_compute.virtual_machines.get( + self.resource_group, res_name + ) + out_vm["vim_info"] = str(vm) + out_vm["status"] = self.provision_state2osm.get( + vm.provisioning_state, "OTHER" + ) + + if vm.provisioning_state == "Succeeded": # check if machine is running or stopped - instance_view = self.conn_compute.virtual_machines.instance_view(self.resource_group, - res_name) + instance_view = self.conn_compute.virtual_machines.instance_view( + self.resource_group, res_name + ) + for status in instance_view.statuses: splitted_status = status.code.split("/") - if len(splitted_status) == 2 and splitted_status[0] == 'PowerState': - out_vm['status'] = self.power_state2osm.get(splitted_status[1], 'OTHER') + if ( + len(splitted_status) == 2 + and splitted_status[0] == "PowerState" + ): + out_vm["status"] = self.power_state2osm.get( + splitted_status[1], "OTHER" + ) network_interfaces = vm.network_profile.network_interfaces - out_vm['interfaces'] = self._get_vm_interfaces_status(vm_id, network_interfaces) + out_vm["interfaces"] = self._get_vm_interfaces_status( + vm_id, network_interfaces + ) except CloudError as e: if e.error.error and "notfound" in e.error.error.lower(): self.logger.debug("Not found vm id: %s", vm_id) - out_vm['status'] = "DELETED" - out_vm['error_msg'] = str(e) - out_vm['vim_info'] = None + out_vm["status"] = "DELETED" + out_vm["error_msg"] = str(e) + out_vm["vim_info"] = None else: # maybe connection error or another type of error, return vim error self.logger.error("Exception %s refreshing vm_status", e) - out_vm['status'] = "VIM_ERROR" - out_vm['error_msg'] = str(e) - out_vm['vim_info'] = None + out_vm["status"] = "VIM_ERROR" + out_vm["error_msg"] = str(e) + out_vm["vim_info"] = None except Exception as e: self.logger.error("Exception %s refreshing vm_status", e, exc_info=True) - out_vm['status'] = "VIM_ERROR" - out_vm['error_msg'] = str(e) - out_vm['vim_info'] = None + out_vm["status"] = "VIM_ERROR" + out_vm["error_msg"] = str(e) + out_vm["vim_info"] = None out_vms[vm_id] = out_vm @@ -1384,40 +1661,50 @@ class vimconnector(vimconn.VimConnector): interface_list = [] for network_interface in interfaces: interface_dict = {} - nic_name = self._get_resource_name_from_resource_id(network_interface.id) - interface_dict['vim_interface_id'] = network_interface.id + nic_name = self._get_resource_name_from_resource_id( + network_interface.id + ) + interface_dict["vim_interface_id"] = network_interface.id nic_data = self.conn_vnet.network_interfaces.get( self.resource_group, - nic_name) + nic_name, + ) ips = [] if nic_data.ip_configurations[0].public_ip_address: self.logger.debug("Obtain public ip address") public_ip_name = self._get_resource_name_from_resource_id( - nic_data.ip_configurations[0].public_ip_address.id) - public_ip = self.conn_vnet.public_ip_addresses.get(self.resource_group, public_ip_name) + nic_data.ip_configurations[0].public_ip_address.id + ) + public_ip = self.conn_vnet.public_ip_addresses.get( + self.resource_group, public_ip_name + ) self.logger.debug("Public ip address is: %s", public_ip.ip_address) ips.append(public_ip.ip_address) private_ip = nic_data.ip_configurations[0].private_ip_address ips.append(private_ip) - interface_dict['mac_address'] = nic_data.mac_address - interface_dict['ip_address'] = ";".join(ips) + interface_dict["mac_address"] = nic_data.mac_address + interface_dict["ip_address"] = ";".join(ips) interface_list.append(interface_dict) return interface_list except Exception as e: - self.logger.error("Exception %s obtaining interface data for vm: %s, error: %s", vm_id, e, exc_info=True) + self.logger.error( + "Exception %s obtaining interface data for vm: %s, error: %s", + vm_id, + e, + exc_info=True, + ) self._format_vimconn_exception(e) if __name__ == "__main__": - # Making some basic test - vim_id = 'azure' - vim_name = 'azure' + vim_id = "azure" + vim_name = "azure" needed_test_params = { "client_id": "AZURE_CLIENT_ID", "secret": "AZURE_SECRET", @@ -1430,53 +1717,65 @@ if __name__ == "__main__": for param, env_var in needed_test_params.items(): value = getenv(env_var) + if not value: raise Exception("Provide a valid value for env '{}'".format(env_var)) + test_params[param] = value config = { - 'region_name': getenv("AZURE_REGION_NAME", 'westeurope'), - 'resource_group': getenv("AZURE_RESOURCE_GROUP"), - 'subscription_id': getenv("AZURE_SUBSCRIPTION_ID"), - 'pub_key': getenv("AZURE_PUB_KEY", None), - 'vnet_name': getenv("AZURE_VNET_NAME", 'myNetwork'), + "region_name": getenv("AZURE_REGION_NAME", "westeurope"), + "resource_group": getenv("AZURE_RESOURCE_GROUP"), + "subscription_id": getenv("AZURE_SUBSCRIPTION_ID"), + "pub_key": getenv("AZURE_PUB_KEY", None), + "vnet_name": getenv("AZURE_VNET_NAME", "myNetwork"), } virtualMachine = { - 'name': 'sergio', - 'description': 'new VM', - 'status': 'running', - 'image': { - 'publisher': 'Canonical', - 'offer': 'UbuntuServer', - 'sku': '16.04.0-LTS', - 'version': 'latest' - }, - 'hardware_profile': { - 'vm_size': 'Standard_DS1_v2' + "name": "sergio", + "description": "new VM", + "status": "running", + "image": { + "publisher": "Canonical", + "offer": "UbuntuServer", + "sku": "16.04.0-LTS", + "version": "latest", }, - 'networks': [ - 'sergio' - ] + "hardware_profile": {"vm_size": "Standard_DS1_v2"}, + "networks": ["sergio"], } vnet_config = { - 'subnet_address': '10.1.2.0/24', - # 'subnet_name': 'subnet-oam' + "subnet_address": "10.1.2.0/24", + # "subnet_name": "subnet-oam" } ########################### - azure = vimconnector(vim_id, vim_name, tenant_id=test_params["tenant"], tenant_name=None, url=None, url_admin=None, - user=test_params["client_id"], passwd=test_params["secret"], log_level=None, config=config) + azure = vimconnector( + vim_id, + vim_name, + tenant_id=test_params["tenant"], + tenant_name=None, + url=None, + url_admin=None, + user=test_params["client_id"], + passwd=test_params["secret"], + log_level=None, + config=config, + ) # azure.get_flavor_id_from_data("here") # subnets=azure.get_network_list() - # azure.new_vminstance(virtualMachine['name'], virtualMachine['description'], virtualMachine['status'], - # virtualMachine['image'], virtualMachine['hardware_profile']['vm_size'], subnets) + # azure.new_vminstance(virtualMachine["name"], virtualMachine["description"], virtualMachine["status"], + # virtualMachine["image"], virtualMachine["hardware_profile"]["vm_size"], subnets) azure.new_network("mynet", None) - net_id = "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/Microsoft."\ - "Network/virtualNetworks/test" - net_id_not_found = "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/"\ - "Microsoft.Network/virtualNetworks/testALF" + net_id = ( + "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/Microsoft." + "Network/virtualNetworks/test" + ) + net_id_not_found = ( + "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/" + "Microsoft.Network/virtualNetworks/testALF" + ) azure.refresh_nets_status([net_id, net_id_not_found]) diff --git a/RO-VIM-azure/setup.py b/RO-VIM-azure/setup.py index 9debdd07..f6b5ffec 100644 --- a/RO-VIM-azure/setup.py +++ b/RO-VIM-azure/setup.py @@ -30,18 +30,20 @@ osm-ro pluging for azure VIM setup( name=_name, - description='OSM ro vim plugin for azure', + description="OSM ro vim plugin for azure", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, # python_requires='>3.5.0', - author='ETSI OSM', - author_email='alfonso.tiernosepulveda@telefonica.com', - maintainer='Alfonso Tierno', - maintainer_email='alfonso.tiernosepulveda@telefonica.com', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + author="ETSI OSM", + author_email="alfonso.tiernosepulveda@telefonica.com", + maintainer="Alfonso Tierno", + maintainer_email="alfonso.tiernosepulveda@telefonica.com", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ @@ -49,10 +51,12 @@ setup( "netaddr", "PyYAML", "azure==4.0.0", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rovim.plugins': ['rovim_azure = osm_rovim_azure.vimconn_azure:vimconnector'], + "osm_rovim.plugins": [ + "rovim_azure = osm_rovim_azure.vimconn_azure:vimconnector" + ], }, ) diff --git a/RO-VIM-azure/tox.ini b/RO-VIM-azure/tox.ini index bc074293..18ba9060 100644 --- a/RO-VIM-azure/tox.ini +++ b/RO-VIM-azure/tox.ini @@ -27,7 +27,7 @@ commands=python3 -m unittest discover -v basepython = python3 deps = flake8 commands = flake8 osm_rovim_azure --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py b/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py index e851fd33..8f946338 100644 --- a/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py +++ b/RO-VIM-fos/osm_rovim_fos/vimconn_fos.py @@ -37,6 +37,7 @@ import uuid import socket import struct from osm_ro_plugin import vimconn + # import json from functools import partial from fog05 import FIMAPI @@ -48,8 +49,20 @@ __date__ = "$2-june-2020 10:35:12$" class vimconnector(vimconn.VimConnector): - def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None, - config={}, persistent_info={}): + def __init__( + self, + uuid, + name, + tenant_id, + tenant_name, + url, + url_admin=None, + user=None, + passwd=None, + log_level=None, + config={}, + persistent_info={}, + ): """Constructor of VIM Params: 'uuid': id asigned to this VIM @@ -67,38 +80,53 @@ class vimconnector(vimconn.VimConnector): Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity check against the VIM """ - - vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, - config, persistent_info) - - self.logger = logging.getLogger('ro.vim.fos') - self.logger.debug('vimconn_fos init with config: {}'.format(config)) - self.arch = config.get('arch', 'x86_64') - self.hv = config.get('hypervisor', 'LXD') - self.nodes = config.get('nodes', []) + vimconn.VimConnector.__init__( + self, + uuid, + name, + tenant_id, + tenant_name, + url, + url_admin, + user, + passwd, + log_level, + config, + persistent_info, + ) + + self.logger = logging.getLogger("ro.vim.fos") + self.logger.debug("vimconn_fos init with config: {}".format(config)) + self.arch = config.get("arch", "x86_64") + self.hv = config.get("hypervisor", "LXD") + self.nodes = config.get("nodes", []) self.fdu_node_map = {} self.fos_api = FIMAPI(locator=self.url) def __get_ip_range(self, first, count): - int_first = struct.unpack('!L', socket.inet_aton(first))[0] + int_first = struct.unpack("!L", socket.inet_aton(first))[0] int_last = int_first + count - last = socket.inet_ntoa(struct.pack('!L', int_last)) + last = socket.inet_ntoa(struct.pack("!L", int_last)) + return (first, last) def __name_filter(self, desc, filter_name=None): if filter_name is None: return True - return desc.get('name') == filter_name + + return desc.get("name") == filter_name def __id_filter(self, desc, filter_id=None): if filter_id is None: return True - return desc.get('uuid') == filter_id + + return desc.get("uuid") == filter_id def __checksum_filter(self, desc, filter_checksum=None): if filter_checksum is None: return True - return desc.get('checksum') == filter_checksum + + return desc.get("checksum") == filter_checksum def check_vim_connectivity(self): """Checks VIM can be reached and user credentials are ok. @@ -106,13 +134,25 @@ class vimconnector(vimconn.VimConnector): """ try: self.fos_api.node.list() + return None except fimapi.FIMAuthExcetpion as fae: - raise vimconn.VimConnAuthException("Unable to authenticate to the VIM. Error {}".format(fae)) + raise vimconn.VimConnAuthException( + "Unable to authenticate to the VIM. Error {}".format(fae) + ) except Exception as e: - raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) - - def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): + raise vimconn.VimConnConnectionException( + "VIM not reachable. Error {}".format(e) + ) + + def new_network( + self, + net_name, + net_type, + ip_profile=None, + shared=False, + provider_network_profile=None, + ): """Adds a tenant network to VIM Params: 'net_name': name of the network @@ -131,42 +171,61 @@ class vimconnector(vimconn.VimConnector): 'shared': if this network can be seen/use by other tenants/organization Returns the network identifier on success or raises and exception on failure """ - self.logger.debug('new_network: {}'.format(locals())) - if net_type in ['data', 'ptp']: - raise vimconn.VimConnNotImplemented('{} type of network not supported'.format(net_type)) + self.logger.debug("new_network: {}".format(locals())) + + if net_type in ["data", "ptp"]: + raise vimconn.VimConnNotImplemented( + "{} type of network not supported".format(net_type) + ) - net_uuid = '{}'.format(uuid.uuid4()) + net_uuid = "{}".format(uuid.uuid4()) desc = { - 'uuid': net_uuid, - 'name': net_name, - 'net_type': 'ELAN', - 'is_mgmt': False + "uuid": net_uuid, + "name": net_name, + "net_type": "ELAN", + "is_mgmt": False, } if ip_profile is not None: ip = {} - if ip_profile.get('ip_version') == 'IPv4': + if ip_profile.get("ip_version") == "IPv4": ip_info = {} - ip_range = self.__get_ip_range(ip_profile.get('dhcp_start_address'), ip_profile.get('dhcp_count')) - dhcp_range = '{},{}'.format(ip_range[0], ip_range[1]) - ip['subnet'] = ip_profile.get('subnet_address') - ip['dns'] = ip_profile.get('dns', None) - ip['dhcp_enable'] = ip_profile.get('dhcp_enabled', False) - ip['dhcp_range'] = dhcp_range - ip['gateway'] = ip_profile.get('gateway_address', None) - desc['ip_configuration'] = ip_info + ip_range = self.__get_ip_range( + ip_profile.get("dhcp_start_address"), ip_profile.get("dhcp_count") + ) + dhcp_range = "{},{}".format(ip_range[0], ip_range[1]) + ip["subnet"] = ip_profile.get("subnet_address") + ip["dns"] = ip_profile.get("dns", None) + ip["dhcp_enable"] = ip_profile.get("dhcp_enabled", False) + ip["dhcp_range"] = dhcp_range + ip["gateway"] = ip_profile.get("gateway_address", None) + desc["ip_configuration"] = ip_info else: - raise vimconn.VimConnNotImplemented('IPV6 network is not implemented at VIM') - desc['ip_configuration'] = ip - self.logger.debug('VIM new_network args: {} - Generated Eclipse fog05 Descriptor {}'.format(locals(), desc)) + raise vimconn.VimConnNotImplemented( + "IPV6 network is not implemented at VIM" + ) + + desc["ip_configuration"] = ip + + self.logger.debug( + "VIM new_network args: {} - Generated Eclipse fog05 Descriptor {}".format( + locals(), desc + ) + ) + try: self.fos_api.network.add_network(desc) except fimapi.FIMAResouceExistingException as free: - raise vimconn.VimConnConflictException("Network already exists at VIM. Error {}".format(free)) + raise vimconn.VimConnConflictException( + "Network already exists at VIM. Error {}".format(free) + ) except Exception as e: - raise vimconn.VimConnException("Unable to create network {}. Error {}".format(net_name, e)) + raise vimconn.VimConnException( + "Unable to create network {}. Error {}".format(net_name, e) + ) # No way from the current rest service to get the actual error, most likely it will be an already # existing error + return net_uuid, {} def get_network_list(self, filter_dict={}): @@ -189,35 +248,36 @@ class vimconnector(vimconn.VimConnector): List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity, authorization, or some other unspecific error """ - self.logger.debug('get_network_list: {}'.format(filter_dict)) + self.logger.debug("get_network_list: {}".format(filter_dict)) res = [] + try: nets = self.fos_api.network.list() except Exception as e: raise vimconn.VimConnConnectionException( - "Cannot get network list from VIM, connection error. Error {}".format(e)) + "Cannot get network list from VIM, connection error. Error {}".format(e) + ) filters = [ - partial(self.__name_filter, filter_name=filter_dict.get('name')), - partial(self.__id_filter, filter_id=filter_dict.get('id')) + partial(self.__name_filter, filter_name=filter_dict.get("name")), + partial(self.__id_filter, filter_id=filter_dict.get("id")), ] r1 = [] for n in nets: match = True + for f in filters: match = match and f(n) + if match: r1.append(n) for n in r1: - osm_net = { - 'id': n.get('uuid'), - 'name': n.get('name'), - 'status': 'ACTIVE' - } + osm_net = {"id": n.get("uuid"), "name": n.get("name"), "status": "ACTIVE"} res.append(osm_net) + return res def get_network(self, net_id): @@ -230,24 +290,35 @@ class vimconnector(vimconn.VimConnector): other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param Raises an exception upon error or when network is not found """ - self.logger.debug('get_network: {}'.format(net_id)) - res = self.get_network_list(filter_dict={'id': net_id}) + self.logger.debug("get_network: {}".format(net_id)) + res = self.get_network_list(filter_dict={"id": net_id}) + if len(res) == 0: - raise vimconn.VimConnNotFoundException("Network {} not found at VIM".format(net_id)) + raise vimconn.VimConnNotFoundException( + "Network {} not found at VIM".format(net_id) + ) + return res[0] def delete_network(self, net_id, created_items=None): """Deletes a tenant network from VIM Returns the network identifier or raises an exception upon error or when network is not found """ - self.logger.debug('delete_network: {}'.format(net_id)) + self.logger.debug("delete_network: {}".format(net_id)) + try: self.fos_api.network.remove_network(net_id) except fimapi.FIMNotFoundException as fnfe: raise vimconn.VimConnNotFoundException( - "Network {} not found at VIM (already deleted?). Error {}".format(net_id, fnfe)) + "Network {} not found at VIM (already deleted?). Error {}".format( + net_id, fnfe + ) + ) except Exception as e: - raise vimconn.VimConnException("Cannot delete network {} from VIM. Error {}".format(net_id, e)) + raise vimconn.VimConnException( + "Cannot delete network {} from VIM. Error {}".format(net_id, e) + ) + return net_id def refresh_nets_status(self, net_list): @@ -267,14 +338,16 @@ class vimconnector(vimconn.VimConnector): vim_info: #Text with plain information obtained from vim (yaml.safe_dump) 'net_id2': ... """ - self.logger.debug('Refeshing network status with args: {}'.format(locals())) + self.logger.debug("Refeshing network status with args: {}".format(locals())) r = {} + for n in net_list: try: osm_n = self.get_network(n) - r[osm_n.get('id')] = {'status': osm_n.get('status')} + r[osm_n.get("id")] = {"status": osm_n.get("status")} except vimconn.VimConnNotFoundException: - r[n] = {'status': 'VIM_ERROR'} + r[n] = {"status": "VIM_ERROR"} + return r def get_flavor(self, flavor_id): @@ -282,14 +355,19 @@ class vimconnector(vimconn.VimConnector): Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } Raises an exception upon error or if not found """ - self.logger.debug('VIM get_flavor with args: {}'.format(locals())) + self.logger.debug("VIM get_flavor with args: {}".format(locals())) + try: r = self.fos_api.flavor.get(flavor_id) except Exception as e: - raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) + raise vimconn.VimConnConnectionException( + "VIM not reachable. Error {}".format(e) + ) + if r is None: raise vimconn.VimConnNotFoundException("Flavor not found at VIM") - return {'id': r.get('uuid'), 'name': r.get('name'), 'fos': r} + + return {"id": r.get("uuid"), "name": r.get("name"), "fos": r} def get_flavor_id_from_data(self, flavor_dict): """Obtain flavor id that match the flavor description @@ -301,17 +379,28 @@ class vimconnector(vimconn.VimConnector): #TODO: complete parameters for EPA Returns the flavor_id or raises a vimconnNotFoundException """ - self.logger.debug('VIM get_flavor_id_from_data with args : {}'.format(locals())) + self.logger.debug("VIM get_flavor_id_from_data with args : {}".format(locals())) try: flvs = self.fos_api.flavor.list() except Exception as e: - raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) - r = [x.get('uuid') for x in flvs if (x.get('cpu_min_count') == flavor_dict.get('vcpus') and - x.get('ram_size_mb') == flavor_dict.get('ram') and - x.get('storage_size_gb') == flavor_dict.get('disk'))] + raise vimconn.VimConnConnectionException( + "VIM not reachable. Error {}".format(e) + ) + + r = [ + x.get("uuid") + for x in flvs + if ( + x.get("cpu_min_count") == flavor_dict.get("vcpus") + and x.get("ram_size_mb") == flavor_dict.get("ram") + and x.get("storage_size_gb") == flavor_dict.get("disk") + ) + ] + if len(r) == 0: raise vimconn.VimConnNotFoundException("No flavor found") + return r[0] def new_flavor(self, flavor_data): @@ -333,23 +422,29 @@ class vimconnector(vimconn.VimConnector): is_public: #TODO to concrete Returns the flavor identifier""" - self.logger.debug('VIM new_flavor with args: {}'.format(locals())) - flv_id = '{}'.format(uuid.uuid4()) + self.logger.debug("VIM new_flavor with args: {}".format(locals())) + flv_id = "{}".format(uuid.uuid4()) desc = { - 'uuid': flv_id, - 'name': flavor_data.get('name'), - 'cpu_arch': self.arch, - 'cpu_min_count': flavor_data.get('vcpus'), - 'cpu_min_freq': 0, - 'ram_size_mb': float(flavor_data.get('ram')), - 'storage_size_gb': float(flavor_data.get('disk')) + "uuid": flv_id, + "name": flavor_data.get("name"), + "cpu_arch": self.arch, + "cpu_min_count": flavor_data.get("vcpus"), + "cpu_min_freq": 0, + "ram_size_mb": float(flavor_data.get("ram")), + "storage_size_gb": float(flavor_data.get("disk")), } + try: self.fos_api.flavor.add(desc) except fimapi.FIMAResouceExistingException as free: - raise vimconn.VimConnConflictException("Flavor {} already exist at VIM. Error {}".format(flv_id, free)) + raise vimconn.VimConnConflictException( + "Flavor {} already exist at VIM. Error {}".format(flv_id, free) + ) except Exception as e: - raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) + raise vimconn.VimConnConnectionException( + "VIM not reachable. Error {}".format(e) + ) + return flv_id def delete_flavor(self, flavor_id): @@ -359,13 +454,19 @@ class vimconnector(vimconn.VimConnector): self.fos_api.flavor.remove(flavor_id) except fimapi.FIMNotFoundException as fnfe: raise vimconn.VimConnNotFoundException( - "Flavor {} not found at VIM (already deleted?). Error {}".format(flavor_id, fnfe)) + "Flavor {} not found at VIM (already deleted?). Error {}".format( + flavor_id, fnfe + ) + ) except Exception as e: - raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) + raise vimconn.VimConnConnectionException( + "VIM not reachable. Error {}".format(e) + ) + return flavor_id def new_image(self, image_dict): - """ Adds a tenant image to VIM. imge_dict is a dictionary with: + """Adds a tenant image to VIM. imge_dict is a dictionary with: name: name disk_format: qcow2, vhd, vmdk, raw (by default), ... location: path or URI @@ -373,35 +474,46 @@ class vimconnector(vimconn.VimConnector): metadata: metadata of the image Returns the image id or raises an exception if failed """ - self.logger.debug('VIM new_image with args: {}'.format(locals())) - img_id = '{}'.format(uuid.uuid4()) + self.logger.debug("VIM new_image with args: {}".format(locals())) + img_id = "{}".format(uuid.uuid4()) desc = { - 'name': image_dict.get('name'), - 'uuid': img_id, - 'uri': image_dict.get('location'), - 'format': image_dict.get('disk_format') + "name": image_dict.get("name"), + "uuid": img_id, + "uri": image_dict.get("location"), + "format": image_dict.get("disk_format"), } + try: self.fos_api.image.add(desc) except fimapi.FIMAResouceExistingException as free: - raise vimconn.VimConnConflictException("Image {} already exist at VIM. Error {}".format(img_id, free)) + raise vimconn.VimConnConflictException( + "Image {} already exist at VIM. Error {}".format(img_id, free) + ) except Exception as e: - raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) + raise vimconn.VimConnConnectionException( + "VIM not reachable. Error {}".format(e) + ) + return img_id def get_image_id_from_path(self, path): - """Get the image id from image path in the VIM database. - Returns the image_id or raises a vimconnNotFoundException + Returns the image_id or raises a vimconnNotFoundException """ - self.logger.debug('VIM get_image_id_from_path with args: {}'.format(locals())) + self.logger.debug("VIM get_image_id_from_path with args: {}".format(locals())) + try: imgs = self.fos_api.image.list() except Exception as e: - raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) - res = [x.get('uuid') for x in imgs if x.get('uri') == path] + raise vimconn.VimConnConnectionException( + "VIM not reachable. Error {}".format(e) + ) + + res = [x.get("uuid") for x in imgs if x.get("uri") == path] + if len(res) == 0: raise vimconn.VimConnNotFoundException("Image with this path was not found") + return res[0] def get_image_list(self, filter_dict={}): @@ -415,42 +527,61 @@ class vimconnector(vimconn.VimConnector): [{}, ...] List can be empty """ - self.logger.debug('VIM get_image_list args: {}'.format(locals())) + self.logger.debug("VIM get_image_list args: {}".format(locals())) r = [] + try: fimgs = self.fos_api.image.list() except Exception as e: - raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) + raise vimconn.VimConnConnectionException( + "VIM not reachable. Error {}".format(e) + ) filters = [ - partial(self.__name_filter, filter_name=filter_dict.get('name')), - partial(self.__id_filter, filter_id=filter_dict.get('id')), - partial(self.__checksum_filter, filter_checksum=filter_dict.get('checksum')) + partial(self.__name_filter, filter_name=filter_dict.get("name")), + partial(self.__id_filter, filter_id=filter_dict.get("id")), + partial( + self.__checksum_filter, filter_checksum=filter_dict.get("checksum") + ), ] r1 = [] for i in fimgs: match = True + for f in filters: match = match and f(i) + if match: r1.append(i) for i in r1: img_info = { - 'name': i.get('name'), - 'id': i.get('uuid'), - 'checksum': i.get('checksum'), - 'location': i.get('uri'), - 'fos': i + "name": i.get("name"), + "id": i.get("uuid"), + "checksum": i.get("checksum"), + "location": i.get("uri"), + "fos": i, } r.append(img_info) + return r # raise VimConnNotImplemented( "Should have implemented this" ) - def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None, - availability_zone_index=None, availability_zone_list=None): + def new_vminstance( + self, + name, + description, + start, + image_id, + flavor_id, + net_list, + cloud_config=None, + disk_list=None, + availability_zone_index=None, + availability_zone_list=None, + ): """Adds a VM instance to VIM :param start: (boolean) indicates if VM must start or created in pause mode. :param image_id: :param flavor_id: image and flavor VIM id to use for the VM @@ -503,82 +634,88 @@ class vimconnector(vimconn.VimConnector): Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same as not present. """ - self.logger.debug('new_vminstance with args: {}'.format(locals())) - fdu_uuid = '{}'.format(uuid.uuid4()) + self.logger.debug("new_vminstance with args: {}".format(locals())) + fdu_uuid = "{}".format(uuid.uuid4()) flv = self.fos_api.flavor.get(flavor_id) img = self.fos_api.image.get(image_id) if flv is None: - raise vimconn.VimConnNotFoundException("Flavor {} not found at VIM".format(flavor_id)) + raise vimconn.VimConnNotFoundException( + "Flavor {} not found at VIM".format(flavor_id) + ) + if img is None: - raise vimconn.VimConnNotFoundException("Image {} not found at VIM".format(image_id)) + raise vimconn.VimConnNotFoundException( + "Image {} not found at VIM".format(image_id) + ) created_items = { - 'fdu_id': '', - 'node_id': '', - 'connection_points': [] + "fdu_id": "", + "node_id": "", + "connection_points": [], } fdu_desc = { - 'name': name, - 'id': fdu_uuid, - 'uuid': fdu_uuid, - 'computation_requirements': flv, - 'image': img, - 'hypervisor': self.hv, - 'migration_kind': 'LIVE', - 'interfaces': [], - 'io_ports': [], - 'connection_points': [], - 'depends_on': [], - 'storage': [] + "name": name, + "id": fdu_uuid, + "uuid": fdu_uuid, + "computation_requirements": flv, + "image": img, + "hypervisor": self.hv, + "migration_kind": "LIVE", + "interfaces": [], + "io_ports": [], + "connection_points": [], + "depends_on": [], + "storage": [], } nets = [] cps = [] intf_id = 0 for n in net_list: - cp_id = '{}'.format(uuid.uuid4()) - n['vim_id'] = cp_id - pair_id = n.get('net_id') + cp_id = "{}".format(uuid.uuid4()) + n["vim_id"] = cp_id + pair_id = n.get("net_id") cp_d = { - 'id': cp_id, - 'name': cp_id, - 'vld_ref': pair_id + "id": cp_id, + "name": cp_id, + "vld_ref": pair_id, } intf_d = { - 'name': n.get('name', 'eth{}'.format(intf_id)), - 'is_mgmt': False, - 'if_type': 'INTERNAL', - 'virtual_interface': { - 'intf_type': n.get('model', 'VIRTIO'), - 'vpci': n.get('vpci', '0:0:0'), - 'bandwidth': int(n.get('bw', 100)) + "name": n.get("name", "eth{}".format(intf_id)), + "is_mgmt": False, + "if_type": "INTERNAL", + "virtual_interface": { + "intf_type": n.get("model", "VIRTIO"), + "vpci": n.get("vpci", "0:0:0"), + "bandwidth": int(n.get("bw", 100)), }, - 'cp_id': cp_id + "cp_id": cp_id, } - if n.get('mac_address', None) is not None: - intf_d['mac_address'] = n['mac_address'] + if n.get("mac_address", None) is not None: + intf_d["mac_address"] = n["mac_address"] - created_items['connection_points'].append(cp_id) - fdu_desc['connection_points'].append(cp_d) - fdu_desc['interfaces'].append(intf_d) + created_items["connection_points"].append(cp_id) + fdu_desc["connection_points"].append(cp_d) + fdu_desc["interfaces"].append(intf_d) intf_id = intf_id + 1 if cloud_config is not None: - configuration = {'conf_type': 'CLOUD_INIT'} - if cloud_config.get('user-data') is not None: - configuration['script'] = cloud_config.get('user-data') - if cloud_config.get('key-pairs') is not None: - configuration['ssh_keys'] = cloud_config.get('key-pairs') + configuration = {"conf_type": "CLOUD_INIT"} + if cloud_config.get("user-data") is not None: + configuration["script"] = cloud_config.get("user-data") + + if cloud_config.get("key-pairs") is not None: + configuration["ssh_keys"] = cloud_config.get("key-pairs") - if 'script' in configuration: - fdu_desc['configuration'] = configuration + if "script" in configuration: + fdu_desc["configuration"] = configuration - self.logger.debug('Eclipse fog05 FDU Descriptor: {}'.format(fdu_desc)) + self.logger.debug("Eclipse fog05 FDU Descriptor: {}".format(fdu_desc)) fdu = FDU(fdu_desc) @@ -586,49 +723,64 @@ class vimconnector(vimconn.VimConnector): self.fos_api.fdu.onboard(fdu) instance = self.fos_api.fdu.define(fdu_uuid) instance_list = self.fos_api.fdu.instance_list(fdu_uuid) - selected_node = '' + selected_node = "" + for n in instance_list: instances = instance_list[n] if instance.uuid in instances: selected_node = n - if selected_node == '': + + if selected_node == "": raise ValueError("Unable to find node for network creation") - self.logger.debug('Selected node by VIM: {}'.format(selected_node)) - created_items['fdu_id'] = fdu_uuid - created_items['node_id'] = selected_node + self.logger.debug("Selected node by VIM: {}".format(selected_node)) + created_items["fdu_id"] = fdu_uuid + created_items["node_id"] = selected_node - for cp in fdu_desc['connection_points']: + for cp in fdu_desc["connection_points"]: nets = self.fos_api.network.list() for net in nets: - if net.get('uuid') == cp['vld_ref']: + if net.get("uuid") == cp["vld_ref"]: self.fos_api.network.add_network_to_node(net, selected_node) self.fos_api.fdu.configure(instance.uuid) self.fos_api.fdu.start(instance.uuid) - self.logger.debug('Eclipse fog05 FDU Started {}'.format(instance.uuid)) + self.logger.debug("Eclipse fog05 FDU Started {}".format(instance.uuid)) - created_items['instance_id'] = str(instance.uuid) + created_items["instance_id"] = str(instance.uuid) self.fdu_node_map[instance.uuid] = selected_node - self.logger.debug('new_vminstance returns: {} {}'.format(instance.uuid, created_items)) + self.logger.debug( + "new_vminstance returns: {} {}".format(instance.uuid, created_items) + ) + return str(instance.uuid), created_items except fimapi.FIMAResouceExistingException as free: - raise vimconn.VimConnConflictException("VM already exists at VIM. Error {}".format(free)) + raise vimconn.VimConnConflictException( + "VM already exists at VIM. Error {}".format(free) + ) except Exception as e: - raise vimconn.VimConnException("Error while instantiating VM {}. Error {}".format(name, e)) + raise vimconn.VimConnException( + "Error while instantiating VM {}. Error {}".format(name, e) + ) def get_vminstance(self, vm_id): """Returns the VM instance information from VIM""" - self.logger.debug('VIM get_vminstance with args: {}'.format(locals())) + self.logger.debug("VIM get_vminstance with args: {}".format(locals())) try: instance = self.fos_api.fdu.instance_info(vm_id) except Exception as e: - raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) + raise vimconn.VimConnConnectionException( + "VIM not reachable. Error {}".format(e) + ) + if instance is None: - raise vimconn.VimConnNotFoundException('VM with id {} not found!'.format(vm_id)) + raise vimconn.VimConnNotFoundException( + "VM with id {} not found!".format(vm_id) + ) + return instance.to_json() def delete_vminstance(self, vm_id, created_items=None): @@ -639,97 +791,102 @@ class vimconnector(vimconn.VimConnector): action_vminstance :return: None or the same vm_id. Raises an exception on fail """ - self.logger.debug('FOS delete_vminstance with args: {}'.format(locals())) - fduid = created_items.get('fdu_id') + self.logger.debug("FOS delete_vminstance with args: {}".format(locals())) + fduid = created_items.get("fdu_id") + try: instance = self.fos_api.fdu.instance_info(vm_id) instance_list = self.fos_api.fdu.instance_list(instance.fdu_id) - selected_node = '' + selected_node = "" + for n in instance_list: instances = instance_list[n] + if instance.uuid in instances: selected_node = n - if selected_node == '': + + if selected_node == "": raise ValueError("Unable to find node for the given Instance") self.fos_api.fdu.stop(vm_id) - for cp in instance.to_json()['connection_points']: + for cp in instance.to_json()["connection_points"]: nets = self.fos_api.network.list() for net in nets: - if net.get('uuid') == cp['vld_ref']: - self.fos_api.network.remove_network_from_node(net.get('uuid'), selected_node) + if net.get("uuid") == cp["vld_ref"]: + self.fos_api.network.remove_network_from_node( + net.get("uuid"), selected_node + ) self.fos_api.fdu.clean(vm_id) self.fos_api.fdu.undefine(vm_id) - self.fos_api.fdu.offload(fduid) except Exception as e: - raise vimconn.VimConnException("Error on deleting VM with id {}. Error {}".format(vm_id, e)) + raise vimconn.VimConnException( + "Error on deleting VM with id {}. Error {}".format(vm_id, e) + ) + return vm_id # raise VimConnNotImplemented( "Should have implemented this" ) def refresh_vms_status(self, vm_list): """Get the status of the virtual machines and their interfaces/ports - Params: the list of VM identifiers - Returns a dictionary with: - vm_id: #VIM id of this Virtual Machine - status: #Mandatory. Text with one of: - # DELETED (not found at vim) - # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) - # OTHER (Vim reported other status not understood) - # ERROR (VIM indicates an ERROR status) - # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), - # BUILD (on building process), ERROR - # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address - # - error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR - vim_info: #Text with plain information obtained from vim (yaml.safe_dump) - interfaces: list with interface info. Each item a dictionary with: - vim_info: #Text with plain information obtained from vim (yaml.safe_dump) - mac_address: #Text format XX:XX:XX:XX:XX:XX - vim_net_id: #network id where this interface is connected, if provided at creation - vim_interface_id: #interface/port VIM id - ip_address: #null, or text with IPv4, IPv6 address - compute_node: #identification of compute node where PF,VF interface is allocated - pci: #PCI address of the NIC that hosts the PF,VF - vlan: #physical VLAN used for VF + Params: the list of VM identifiers + Returns a dictionary with: + vm_id: #VIM id of this Virtual Machine + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), + # BUILD (on building process), ERROR + # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address + # + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + interfaces: list with interface info. Each item a dictionary with: + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + mac_address: #Text format XX:XX:XX:XX:XX:XX + vim_net_id: #network id where this interface is connected, if provided at creation + vim_interface_id: #interface/port VIM id + ip_address: #null, or text with IPv4, IPv6 address + compute_node: #identification of compute node where PF,VF interface is allocated + pci: #PCI address of the NIC that hosts the PF,VF + vlan: #physical VLAN used for VF """ - self.logger.debug('FOS refresh_vms_status with args: {}'.format(locals())) + self.logger.debug("FOS refresh_vms_status with args: {}".format(locals())) fos2osm_status = { - 'DEFINE': 'OTHER', - 'CONFIGURE': 'INACTIVE', - 'RUN': 'ACTIVE', - 'PAUSE': 'PAUSED', - 'ERROR': 'ERROR' + "DEFINE": "OTHER", + "CONFIGURE": "INACTIVE", + "RUN": "ACTIVE", + "PAUSE": "PAUSED", + "ERROR": "ERROR", } r = {} for vm in vm_list: - self.logger.debug('FOS refresh_vms_status for {}'.format(vm)) + self.logger.debug("FOS refresh_vms_status for {}".format(vm)) info = {} nid = self.fdu_node_map.get(vm) if nid is None: r[vm] = { - 'status': 'VIM_ERROR', - 'error_msg': 'Not compute node associated for VM' + "status": "VIM_ERROR", + "error_msg": "Not compute node associated for VM", } continue try: vm_info = self.fos_api.fdu.instance_info(vm) except Exception: - r[vm] = { - 'status': 'VIM_ERROR', - 'error_msg': 'unable to connect to VIM' - } + r[vm] = {"status": "VIM_ERROR", "error_msg": "unable to connect to VIM"} continue if vm_info is None: - r[vm:] = {'status': 'DELETED'} + r[vm:] = {"status": "DELETED"} continue desc = self.fos_api.fdu.info(str(vm_info.fdu_id)) @@ -737,57 +894,74 @@ class vimconnector(vimconn.VimConnector): vm_info = vm_info.to_json() desc = desc.to_json() - osm_status = fos2osm_status.get(vm_info.get('status')) + osm_status = fos2osm_status.get(vm_info.get("status")) + + self.logger.debug("FOS status info {}".format(vm_info)) + self.logger.debug( + "FOS status is {} <-> OSM Status {}".format( + vm_info.get("status"), osm_status + ) + ) + info["status"] = osm_status + + if vm_info.get("status") == "ERROR": + info["error_msg"] = vm_info.get("error_code") - self.logger.debug('FOS status info {}'.format(vm_info)) - self.logger.debug('FOS status is {} <-> OSM Status {}'.format(vm_info.get('status'), osm_status)) - info['status'] = osm_status - if vm_info.get('status') == 'ERROR': - info['error_msg'] = vm_info.get('error_code') # yaml.safe_dump(json.loads(json.dumps(vm_info))) - # info['vim_info'] = '' + # info["vim_info"] = "" faces = [] i = 0 - for intf_name in vm_info.get('hypervisor_info').get('network', []): - intf_info = vm_info.get('hypervisor_info').get('network').get(intf_name) + for intf_name in vm_info.get("hypervisor_info").get("network", []): + intf_info = vm_info.get("hypervisor_info").get("network").get(intf_name) face = {} - face['compute_node'] = nid - # face['vim_info'] = '' #yaml.safe_dump(json.loads(json.dumps(intf_info))) - face['mac_address'] = intf_info.get('hwaddr') + face["compute_node"] = nid + # face["vim_info"] = "" #yaml.safe_dump(json.loads(json.dumps(intf_info))) + face["mac_address"] = intf_info.get("hwaddr") addrs = [] - for a in intf_info.get('addresses'): - addrs.append(a.get('address')) + + for a in intf_info.get("addresses"): + addrs.append(a.get("address")) + if len(addrs) >= 0: - face['ip_address'] = ','.join(addrs) + face["ip_address"] = ",".join(addrs) else: - face['ip_address'] = '' - face['pci'] = '0:0:0.0' + face["ip_address"] = "" + + face["pci"] = "0:0:0.0" try: - cp_info = vm_info.get('connection_points')[i] + cp_info = vm_info.get("connection_points")[i] except IndexError: cp_info = None + if cp_info is not None: - cp_id = cp_info['cp_id'] - cps_d = desc['connection_points'] - matches = [x for x in cps_d if x['id'] == cp_id] + cp_id = cp_info["cp_id"] + cps_d = desc["connection_points"] + matches = [x for x in cps_d if x["id"] == cp_id] + if len(matches) > 0: cpd = matches[0] - face['vim_net_id'] = cpd.get('vld_ref', '') + face["vim_net_id"] = cpd.get("vld_ref", "") else: - face['vim_net_id'] = '' - face['vim_interface_id'] = cp_id - # cp_info.get('uuid') + face["vim_net_id"] = "" + + face["vim_interface_id"] = cp_id + # cp_info.get("uuid") else: - face['vim_net_id'] = '' - face['vim_interface_id'] = intf_name + face["vim_net_id"] = "" + face["vim_interface_id"] = intf_name + faces.append(face) i += 1 - info['interfaces'] = faces + info["interfaces"] = faces r[vm] = info - self.logger.debug('FOS refresh_vms_status res for {} is {}'.format(vm, info)) - self.logger.debug('FOS refresh_vms_status res is {}'.format(r)) + self.logger.debug( + "FOS refresh_vms_status res for {} is {}".format(vm, info) + ) + + self.logger.debug("FOS refresh_vms_status res is {}".format(r)) + return r def action_vminstance(self, vm_id, action_dict, created_items={}): @@ -802,65 +976,87 @@ class vimconnector(vimconn.VimConnector): method can modify this value :return: None, or a console dict """ - self.logger.debug('VIM action_vminstance with args: {}'.format(locals())) + self.logger.debug("VIM action_vminstance with args: {}".format(locals())) nid = self.fdu_node_map.get(vm_id) + if nid is None: - raise vimconn.VimConnNotFoundException('No node for this VM') + raise vimconn.VimConnNotFoundException("No node for this VM") + try: instance = self.fos_api.fdu.instance_info(vm_id) if "start" in action_dict: - if instance.get('status') == 'CONFIGURE': + if instance.get("status") == "CONFIGURE": self.fos_api.fdu.start(vm_id) - elif instance.get('status') == 'PAUSE': + elif instance.get("status") == "PAUSE": self.fos_api.fdu.resume(vm_id) else: - raise vimconn.VimConnConflictException('Cannot start from current state: {}'.format( - instance.get('status'))) + raise vimconn.VimConnConflictException( + "Cannot start from current state: {}".format( + instance.get("status") + ) + ) elif "pause" in action_dict: - if instance.get('status') == 'RUN': + if instance.get("status") == "RUN": self.fos_api.fdu.pause(vm_id) else: - raise vimconn.VimConnConflictException('Cannot pause from current state: {}'.format( - instance.get('status'))) + raise vimconn.VimConnConflictException( + "Cannot pause from current state: {}".format( + instance.get("status") + ) + ) elif "resume" in action_dict: - if instance.get('status') == 'PAUSE': + if instance.get("status") == "PAUSE": self.fos_api.fdu.resume(vm_id) else: - raise vimconn.VimConnConflictException('Cannot resume from current state: {}'.format( - instance.get('status'))) + raise vimconn.VimConnConflictException( + "Cannot resume from current state: {}".format( + instance.get("status") + ) + ) elif "shutoff" in action_dict or "shutdown" or "forceOff" in action_dict: - if instance.get('status') == 'RUN': + if instance.get("status") == "RUN": self.fos_api.fdu.stop(vm_id) else: - raise vimconn.VimConnConflictException('Cannot shutoff from current state: {}'.format( - instance.get('status'))) + raise vimconn.VimConnConflictException( + "Cannot shutoff from current state: {}".format( + instance.get("status") + ) + ) elif "terminate" in action_dict: - if instance.get('status') == 'RUN': + if instance.get("status") == "RUN": self.fos_api.fdu.stop(vm_id) self.fos_api.fdu.clean(vm_id) self.fos_api.fdu.undefine(vm_id) # self.fos_api.fdu.offload(vm_id) - elif instance.get('status') == 'CONFIGURE': + elif instance.get("status") == "CONFIGURE": self.fos_api.fdu.clean(vm_id) self.fos_api.fdu.undefine(vm_id) # self.fos_api.fdu.offload(vm_id) - elif instance.get('status') == 'PAUSE': + elif instance.get("status") == "PAUSE": self.fos_api.fdu.resume(vm_id) self.fos_api.fdu.stop(vm_id) self.fos_api.fdu.clean(vm_id) self.fos_api.fdu.undefine(vm_id) # self.fos_api.fdu.offload(vm_id) else: - raise vimconn.VimConnConflictException('Cannot terminate from current state: {}'.format( - instance.get('status'))) + raise vimconn.VimConnConflictException( + "Cannot terminate from current state: {}".format( + instance.get("status") + ) + ) elif "rebuild" in action_dict: raise vimconn.VimConnNotImplemented("Rebuild not implemented") elif "reboot" in action_dict: - if instance.get('status') == 'RUN': + if instance.get("status") == "RUN": self.fos_api.fdu.stop(vm_id) self.fos_api.fdu.start(vm_id) else: - raise vimconn.VimConnConflictException('Cannot reboot from current state: {}'.format( - instance.get('status'))) + raise vimconn.VimConnConflictException( + "Cannot reboot from current state: {}".format( + instance.get("status") + ) + ) except Exception as e: - raise vimconn.VimConnConnectionException("VIM not reachable. Error {}".format(e)) + raise vimconn.VimConnConnectionException( + "VIM not reachable. Error {}".format(e) + ) diff --git a/RO-VIM-fos/setup.py b/RO-VIM-fos/setup.py index 93935664..ef11dca7 100644 --- a/RO-VIM-fos/setup.py +++ b/RO-VIM-fos/setup.py @@ -30,18 +30,20 @@ osm-ro pluging for Eclipse fog05 VIM setup( name=_name, - description='OSM ro vim plugin for Eclipse fog05', + description="OSM ro vim plugin for Eclipse fog05", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, - # python_requires='>3.5.0', - author='ETSI OSM', - author_email='OSM_TECH@LIST.ETSI.ORG', - maintainer='ETSI OSM', - maintainer_email='OSM_TECH@LIST.ETSI.ORG', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + # python_requires=">3.5.0", + author="ETSI OSM", + author_email="OSM_TECH@LIST.ETSI.ORG", + maintainer="ETSI OSM", + maintainer_email="OSM_TECH@LIST.ETSI.ORG", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ @@ -54,10 +56,10 @@ setup( "fog05==0.2.0", "pyangbind", "sphinx", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rovim.plugins': ['rovim_fos = osm_rovim_fos.vimconn_fos:vimconnector'], + "osm_rovim.plugins": ["rovim_fos = osm_rovim_fos.vimconn_fos:vimconnector"], }, ) diff --git a/RO-VIM-fos/tox.ini b/RO-VIM-fos/tox.ini index e25c4bd5..836a2934 100644 --- a/RO-VIM-fos/tox.ini +++ b/RO-VIM-fos/tox.ini @@ -27,7 +27,7 @@ commands=python3 -m unittest discover -v basepython = python3 deps = flake8 commands = flake8 osm_rovim_fos --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py b/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py index a646036d..ad9ca298 100644 --- a/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py +++ b/RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py @@ -25,15 +25,19 @@ """ vimconnector implements all the methods to interact with OpenNebula using the XML-RPC API. """ -__author__ = "Jose Maria Carmona Perez,Juan Antonio Hernando Labajo, Emilio Abraham Garrido Garcia,Alberto Florez " \ - "Pages, Andres Pozo Munoz, Santiago Perez Marin, Onlife Networks Telefonica I+D Product Innovation " +__author__ = ( + "Jose Maria Carmona Perez,Juan Antonio Hernando Labajo, Emilio Abraham Garrido Garcia,Alberto Florez " + "Pages, Andres Pozo Munoz, Santiago Perez Marin, Onlife Networks Telefonica I+D Product Innovation " +) __date__ = "$13-dec-2017 11:09:29$" from osm_ro_plugin import vimconn import logging import requests + # import logging import oca + # import untangle import math import random @@ -41,9 +45,20 @@ import pyone class vimconnector(vimconn.VimConnector): - def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, - log_level="DEBUG", config={}, persistent_info={}): - + def __init__( + self, + uuid, + name, + tenant_id, + tenant_name, + url, + url_admin=None, + user=None, + passwd=None, + log_level="DEBUG", + config={}, + persistent_info={}, + ): """Constructor of VIM Params: 'uuid': id asigned to this VIM @@ -61,32 +76,46 @@ class vimconnector(vimconn.VimConnector): Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity check against the VIM """ - - vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, - config) - - self.logger = logging.getLogger('ro.vim.openstack') + vimconn.VimConnector.__init__( + self, + uuid, + name, + tenant_id, + tenant_name, + url, + url_admin, + user, + passwd, + log_level, + config, + ) + + self.logger = logging.getLogger("ro.vim.openstack") def _new_one_connection(self): - return pyone.OneServer(self.url, session=self.user + ':' + self.passwd) + return pyone.OneServer(self.url, session=self.user + ":" + self.passwd) def new_tenant(self, tenant_name, tenant_description): - # '''Adds a new tenant to VIM with this name and description, returns the tenant identifier''' + # """Adds a new tenant to VIM with this name and description, returns the tenant identifier""" try: - client = oca.Client(self.user + ':' + self.passwd, self.url) + client = oca.Client(self.user + ":" + self.passwd, self.url) group_list = oca.GroupPool(client) user_list = oca.UserPool(client) group_list.info() user_list.info() create_primarygroup = 1 + # create group-tenant for group in group_list: if str(group.name) == str(tenant_name): create_primarygroup = 0 break + if create_primarygroup == 1: oca.Group.allocate(client, tenant_name) + group_list.info() + # set to primary_group the tenant_group and oneadmin to secondary_group for group in group_list: if str(group.name) == str(tenant_name): @@ -97,27 +126,34 @@ class vimconnector(vimconn.VimConnector): else: self._add_secondarygroup(user.id, group.id) user.chgrp(group.id) + return str(group.id) except Exception as e: self.logger.error("Create new tenant error: " + str(e)) + raise vimconn.VimConnException(e) def delete_tenant(self, tenant_id): """Delete a tenant from VIM. Returns the old tenant identifier""" try: - client = oca.Client(self.user + ':' + self.passwd, self.url) + client = oca.Client(self.user + ":" + self.passwd, self.url) group_list = oca.GroupPool(client) user_list = oca.UserPool(client) group_list.info() user_list.info() + for group in group_list: if str(group.id) == str(tenant_id): for user in user_list: if str(user.name) == str(self.user): self._delete_secondarygroup(user.id, group.id) group.delete(client) + return None - raise vimconn.VimConnNotFoundException("Group {} not found".format(tenant_id)) + + raise vimconn.VimConnNotFoundException( + "Group {} not found".format(tenant_id) + ) except Exception as e: self.logger.error("Delete tenant " + str(tenant_id) + " error: " + str(e)) raise vimconn.VimConnException(e) @@ -138,7 +174,9 @@ class vimconnector(vimconn.VimConnector): {}\ \ \ - '.format(self.user, self.passwd, (str(id_user)), (str(id_group))) + '.format( + self.user, self.passwd, (str(id_user)), (str(id_group)) + ) requests.post(self.url, params) def _delete_secondarygroup(self, id_user, id_group): @@ -156,10 +194,19 @@ class vimconnector(vimconn.VimConnector): {}\ \ \ - '.format(self.user, self.passwd, (str(id_user)), (str(id_group))) + '.format( + self.user, self.passwd, (str(id_user)), (str(id_group)) + ) requests.post(self.url, params) - def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): + def new_network( + self, + net_name, + net_type, + ip_profile=None, + shared=False, + provider_network_profile=None, + ): """Adds a tenant network to VIM Params: 'net_name': name of the network @@ -183,27 +230,37 @@ class vimconnector(vimconn.VimConnector): Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same as not present. """ - # oca library method cannot be used in this case (problem with cluster parameters) try: vlan = None + if provider_network_profile: vlan = provider_network_profile.get("segmentation-id") + created_items = {} one = self._new_one_connection() size = "254" + if ip_profile is None: subnet_rand = random.randint(0, 255) ip_start = "192.168.{}.1".format(subnet_rand) else: index = ip_profile["subnet_address"].find("/") ip_start = ip_profile["subnet_address"][:index] + if "dhcp_count" in ip_profile and ip_profile["dhcp_count"] is not None: size = str(ip_profile["dhcp_count"]) - elif "dhcp_count" not in ip_profile and ip_profile["ip_version"] == "IPv4": - prefix = ip_profile["subnet_address"][index + 1:] + elif ( + "dhcp_count" not in ip_profile + and ip_profile["ip_version"] == "IPv4" + ): + prefix = ip_profile["subnet_address"][index + 1 :] size = int(math.pow(2, 32 - prefix)) - if "dhcp_start_address" in ip_profile and ip_profile["dhcp_start_address"] is not None: + + if ( + "dhcp_start_address" in ip_profile + and ip_profile["dhcp_start_address"] is not None + ): ip_start = str(ip_profile["dhcp_start_address"]) # if ip_profile["ip_version"] == "IPv6": # ip_prefix_type = "GLOBAL_PREFIX" @@ -212,29 +269,27 @@ class vimconnector(vimconn.VimConnector): vlan_id = vlan else: vlan_id = str(random.randint(100, 4095)) + # if "internal" in net_name: # OpenNebula not support two networks with same name random_net_name = str(random.randint(1, 1000000)) net_name = net_name + random_net_name - net_id = one.vn.allocate({ - 'NAME': net_name, - 'VN_MAD': '802.1Q', - 'PHYDEV': self.config["network"]["phydev"], - 'VLAN_ID': vlan_id - }, self.config["cluster"]["id"]) - arpool = { - 'AR_POOL': { - 'AR': { - 'TYPE': 'IP4', - 'IP': ip_start, - 'SIZE': size - } - } - } + net_id = one.vn.allocate( + { + "NAME": net_name, + "VN_MAD": "802.1Q", + "PHYDEV": self.config["network"]["phydev"], + "VLAN_ID": vlan_id, + }, + self.config["cluster"]["id"], + ) + arpool = {"AR_POOL": {"AR": {"TYPE": "IP4", "IP": ip_start, "SIZE": size}}} one.vn.add_ar(net_id, arpool) + return net_id, created_items except Exception as e: self.logger.error("Create new network error: " + str(e)) + raise vimconn.VimConnException(e) def get_network_list(self, filter_dict={}): @@ -257,26 +312,36 @@ class vimconnector(vimconn.VimConnector): List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity, authorization, or some other unspecific error """ - try: one = self._new_one_connection() net_pool = one.vnpool.info(-2, -1, -1).VNET response = [] + if "name" in filter_dict: network_name_filter = filter_dict["name"] else: network_name_filter = None + if "id" in filter_dict: network_id_filter = filter_dict["id"] else: network_id_filter = None + for network in net_pool: - if network.NAME == network_name_filter or str(network.ID) == str(network_id_filter): - net_dict = {"name": network.NAME, "id": str(network.ID), "status": "ACTIVE"} + if network.NAME == network_name_filter or str(network.ID) == str( + network_id_filter + ): + net_dict = { + "name": network.NAME, + "id": str(network.ID), + "status": "ACTIVE", + } response.append(net_dict) + return response except Exception as e: self.logger.error("Get network list error: " + str(e)) + raise vimconn.VimConnException(e) def get_network(self, net_id): @@ -293,18 +358,23 @@ class vimconnector(vimconn.VimConnector): one = self._new_one_connection() net_pool = one.vnpool.info(-2, -1, -1).VNET net = {} + for network in net_pool: if str(network.ID) == str(net_id): - net['id'] = network.ID - net['name'] = network.NAME - net['status'] = "ACTIVE" + net["id"] = network.ID + net["name"] = network.NAME + net["status"] = "ACTIVE" break + if net: return net else: - raise vimconn.VimConnNotFoundException("Network {} not found".format(net_id)) + raise vimconn.VimConnNotFoundException( + "Network {} not found".format(net_id) + ) except Exception as e: self.logger.error("Get network " + str(net_id) + " error): " + str(e)) + raise vimconn.VimConnException(e) def delete_network(self, net_id, created_items=None): @@ -315,12 +385,15 @@ class vimconnector(vimconn.VimConnector): Returns the network identifier or raises an exception upon error or when network is not found """ try: - one = self._new_one_connection() one.vn.delete(int(net_id)) + return net_id except Exception as e: - self.logger.error("Delete network " + str(net_id) + "error: network not found" + str(e)) + self.logger.error( + "Delete network " + str(net_id) + "error: network not found" + str(e) + ) + raise vimconn.VimConnException(e) def refresh_nets_status(self, net_list): @@ -344,25 +417,30 @@ class vimconnector(vimconn.VimConnector): try: for net_id in net_list: net = {} + try: net_vim = self.get_network(net_id) net["status"] = net_vim["status"] net["vim_info"] = None except vimconn.VimConnNotFoundException as e: self.logger.error("Exception getting net status: {}".format(str(e))) - net['status'] = "DELETED" - net['error_msg'] = str(e) + net["status"] = "DELETED" + net["error_msg"] = str(e) except vimconn.VimConnException as e: self.logger.error(e) net["status"] = "VIM_ERROR" net["error_msg"] = str(e) + net_dict[net_id] = net + return net_dict except vimconn.VimConnException as e: self.logger.error(e) + for k in net_dict: net_dict[k]["status"] = "VIM_ERROR" net_dict[k]["error_msg"] = str(e) + return net_dict def get_flavor(self, flavor_id): # Esta correcto @@ -371,14 +449,18 @@ class vimconnector(vimconn.VimConnector): Raises an exception upon error or if not found """ try: - one = self._new_one_connection() template = one.template.info(int(flavor_id)) + if template is not None: - return {'id': template.ID, 'name': template.NAME} - raise vimconn.VimConnNotFoundException("Flavor {} not found".format(flavor_id)) + return {"id": template.ID, "name": template.NAME} + + raise vimconn.VimConnNotFoundException( + "Flavor {} not found".format(flavor_id) + ) except Exception as e: self.logger.error("get flavor " + str(flavor_id) + " error: " + str(e)) + raise vimconn.VimConnException(e) def new_flavor(self, flavor_data): @@ -400,47 +482,48 @@ class vimconnector(vimconn.VimConnector): is_public: #TODO to concrete Returns the flavor identifier""" - - disk_size = str(int(flavor_data["disk"])*1024) + disk_size = str(int(flavor_data["disk"]) * 1024) try: one = self._new_one_connection() - template_id = one.template.allocate({ - 'TEMPLATE': { - 'NAME': flavor_data["name"], - 'CPU': flavor_data["vcpus"], - 'VCPU': flavor_data["vcpus"], - 'MEMORY': flavor_data["ram"], - 'DISK': { - 'SIZE': disk_size - }, - 'CONTEXT': { - 'NETWORK': "YES", - 'SSH_PUBLIC_KEY': '$USER[SSH_PUBLIC_KEY]' - }, - 'GRAPHICS': { - 'LISTEN': '0.0.0.0', - 'TYPE': 'VNC' - }, - 'CLUSTER_ID': self.config["cluster"]["id"] + template_id = one.template.allocate( + { + "TEMPLATE": { + "NAME": flavor_data["name"], + "CPU": flavor_data["vcpus"], + "VCPU": flavor_data["vcpus"], + "MEMORY": flavor_data["ram"], + "DISK": {"SIZE": disk_size}, + "CONTEXT": { + "NETWORK": "YES", + "SSH_PUBLIC_KEY": "$USER[SSH_PUBLIC_KEY]", + }, + "GRAPHICS": {"LISTEN": "0.0.0.0", "TYPE": "VNC"}, + "CLUSTER_ID": self.config["cluster"]["id"], + } } - }) - return template_id + ) + return template_id except Exception as e: self.logger.error("Create new flavor error: " + str(e)) + raise vimconn.VimConnException(e) def delete_flavor(self, flavor_id): - """ Deletes a tenant flavor from VIM - Returns the old flavor_id + """Deletes a tenant flavor from VIM + Returns the old flavor_id """ try: one = self._new_one_connection() one.template.delete(int(flavor_id), False) + return flavor_id except Exception as e: - self.logger.error("Error deleting flavor " + str(flavor_id) + ". Flavor not found") + self.logger.error( + "Error deleting flavor " + str(flavor_id) + ". Flavor not found" + ) + raise vimconn.VimConnException(e) def get_image_list(self, filter_dict={}): @@ -458,25 +541,42 @@ class vimconnector(vimconn.VimConnector): one = self._new_one_connection() image_pool = one.imagepool.info(-2, -1, -1).IMAGE images = [] + if "name" in filter_dict: image_name_filter = filter_dict["name"] else: image_name_filter = None + if "id" in filter_dict: image_id_filter = filter_dict["id"] else: image_id_filter = None + for image in image_pool: - if str(image_name_filter) == str(image.NAME) or str(image.ID) == str(image_id_filter): + if str(image_name_filter) == str(image.NAME) or str(image.ID) == str( + image_id_filter + ): images_dict = {"name": image.NAME, "id": str(image.ID)} images.append(images_dict) + return images except Exception as e: self.logger.error("Get image list error: " + str(e)) raise vimconn.VimConnException(e) - def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None, - availability_zone_index=None, availability_zone_list=None): + def new_vminstance( + self, + name, + description, + start, + image_id, + flavor_id, + net_list, + cloud_config=None, + disk_list=None, + availability_zone_index=None, + availability_zone_list=None, + ): """ Adds a VM instance to VIM :param name: @@ -537,7 +637,11 @@ class vimconnector(vimconn.VimConnector): as not present. """ self.logger.debug( - "new_vminstance input: image='{}' flavor='{}' nics='{}'".format(image_id, flavor_id, str(net_list))) + "new_vminstance input: image='{}' flavor='{}' nics='{}'".format( + image_id, flavor_id, str(net_list) + ) + ) + try: one = self._new_one_connection() template_vim = one.template.info(int(flavor_id), True) @@ -545,34 +649,48 @@ class vimconnector(vimconn.VimConnector): one = self._new_one_connection() template_updated = "" + for net in net_list: net_in_vim = one.vn.info(int(net["net_id"])) net["vim_id"] = str(net_in_vim.ID) network = 'NIC = [NETWORK = "{}",NETWORK_UNAME = "{}" ]'.format( - net_in_vim.NAME, net_in_vim.UNAME) + net_in_vim.NAME, net_in_vim.UNAME + ) template_updated += network - template_updated += "DISK = [ IMAGE_ID = {},\n SIZE = {}]".format(image_id, disk_size) + template_updated += "DISK = [ IMAGE_ID = {},\n SIZE = {}]".format( + image_id, disk_size + ) if isinstance(cloud_config, dict): if cloud_config.get("key-pairs"): context = 'CONTEXT = [\n NETWORK = "YES",\n SSH_PUBLIC_KEY = "' + for key in cloud_config["key-pairs"]: - context += key + '\n' + context += key + "\n" + # if False: # context += '"\n USERNAME = ' context += '"]' template_updated += context - vm_instance_id = one.template.instantiate(int(flavor_id), name, False, template_updated) + vm_instance_id = one.template.instantiate( + int(flavor_id), name, False, template_updated + ) self.logger.info( - "Instanciating in OpenNebula a new VM name:{} id:{}".format(name, flavor_id)) + "Instanciating in OpenNebula a new VM name:{} id:{}".format( + name, flavor_id + ) + ) + return str(vm_instance_id), None except pyone.OneNoExistsException as e: self.logger.error("Network with id " + str(e) + " not found: " + str(e)) + raise vimconn.VimConnNotFoundException(e) except Exception as e: self.logger.error("Create new vm instance error: " + str(e)) + raise vimconn.VimConnException(e) def get_vminstance(self, vm_id): @@ -580,9 +698,13 @@ class vimconnector(vimconn.VimConnector): try: one = self._new_one_connection() vm = one.vm.info(int(vm_id)) + return vm except Exception as e: - self.logger.error("Getting vm instance error: " + str(e) + ": VM Instance not found") + self.logger.error( + "Getting vm instance error: " + str(e) + ": VM Instance not found" + ) + raise vimconn.VimConnException(e) def delete_vminstance(self, vm_id, created_items=None): @@ -597,76 +719,87 @@ class vimconnector(vimconn.VimConnector): one = self._new_one_connection() one.vm.recover(int(vm_id), 3) vm = None + while True: if vm is not None and vm.LCM_STATE == 0: break else: vm = one.vm.info(int(vm_id)) - except pyone.OneNoExistsException: - self.logger.info("The vm " + str(vm_id) + " does not exist or is already deleted") - raise vimconn.VimConnNotFoundException("The vm {} does not exist or is already deleted".format(vm_id)) + self.logger.info( + "The vm " + str(vm_id) + " does not exist or is already deleted" + ) + + raise vimconn.VimConnNotFoundException( + "The vm {} does not exist or is already deleted".format(vm_id) + ) except Exception as e: self.logger.error("Delete vm instance " + str(vm_id) + " error: " + str(e)) raise vimconn.VimConnException(e) def refresh_vms_status(self, vm_list): """Get the status of the virtual machines and their interfaces/ports - Params: the list of VM identifiers - Returns a dictionary with: - vm_id: #VIM id of this Virtual Machine - status: #Mandatory. Text with one of: - # DELETED (not found at vim) - # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) - # OTHER (Vim reported other status not understood) - # ERROR (VIM indicates an ERROR status) - # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), - # BUILD (on building process), ERROR - # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address - # - error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR - vim_info: #Text with plain information obtained from vim (yaml.safe_dump) - interfaces: list with interface info. Each item a dictionary with: - vim_info: #Text with plain information obtained from vim (yaml.safe_dump) - mac_address: #Text format XX:XX:XX:XX:XX:XX - vim_net_id: #network id where this interface is connected, if provided at creation - vim_interface_id: #interface/port VIM id - ip_address: #null, or text with IPv4, IPv6 address - compute_node: #identification of compute node where PF,VF interface is allocated - pci: #PCI address of the NIC that hosts the PF,VF - vlan: #physical VLAN used for VF + Params: the list of VM identifiers + Returns a dictionary with: + vm_id: #VIM id of this Virtual Machine + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), + # BUILD (on building process), ERROR + # ACTIVE:NoMgmtIP (Active but any of its interface has an IP address + # + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + interfaces: list with interface info. Each item a dictionary with: + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) + mac_address: #Text format XX:XX:XX:XX:XX:XX + vim_net_id: #network id where this interface is connected, if provided at creation + vim_interface_id: #interface/port VIM id + ip_address: #null, or text with IPv4, IPv6 address + compute_node: #identification of compute node where PF,VF interface is allocated + pci: #PCI address of the NIC that hosts the PF,VF + vlan: #physical VLAN used for VF """ vm_dict = {} try: for vm_id in vm_list: vm = {} + if self.get_vminstance(vm_id) is not None: vm_element = self.get_vminstance(vm_id) else: self.logger.info("The vm " + str(vm_id) + " does not exist.") - vm['status'] = "DELETED" - vm['error_msg'] = ("The vm " + str(vm_id) + " does not exist.") + vm["status"] = "DELETED" + vm["error_msg"] = "The vm " + str(vm_id) + " does not exist." continue + vm["vim_info"] = None vm_status = vm_element.LCM_STATE + if vm_status == 3: - vm['status'] = "ACTIVE" + vm["status"] = "ACTIVE" elif vm_status == 36: - vm['status'] = "ERROR" - vm['error_msg'] = "VM failure" + vm["status"] = "ERROR" + vm["error_msg"] = "VM failure" else: - vm['status'] = "BUILD" + vm["status"] = "BUILD" if vm_element is not None: interfaces = self._get_networks_vm(vm_element) vm["interfaces"] = interfaces + vm_dict[vm_id] = vm + return vm_dict except Exception as e: self.logger.error(e) for k in vm_dict: vm_dict[k]["status"] = "VIM_ERROR" vm_dict[k]["error_msg"] = str(e) + return vm_dict def _get_networks_vm(self, vm_element): @@ -674,24 +807,40 @@ class vimconnector(vimconn.VimConnector): try: if isinstance(vm_element.TEMPLATE["NIC"], list): for net in vm_element.TEMPLATE["NIC"]: - interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]), - "vim_interface_id": str(net["NETWORK_ID"])} + interface = { + "vim_info": None, + "mac_address": str(net["MAC"]), + "vim_net_id": str(net["NETWORK_ID"]), + "vim_interface_id": str(net["NETWORK_ID"]), + } + # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6 - if 'IP' in net: + if "IP" in net: interface["ip_address"] = str(net["IP"]) - if 'IP6_GLOBAL' in net: + + if "IP6_GLOBAL" in net: interface["ip_address"] = str(net["IP6_GLOBAL"]) + interfaces.append(interface) else: net = vm_element.TEMPLATE["NIC"] - interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]), - "vim_interface_id": str(net["NETWORK_ID"])} + interface = { + "vim_info": None, + "mac_address": str(net["MAC"]), + "vim_net_id": str(net["NETWORK_ID"]), + "vim_interface_id": str(net["NETWORK_ID"]), + } + # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6 - if 'IP' in net: + if "IP" in net: interface["ip_address"] = str(net["IP"]) - if 'IP6_GLOBAL' in net: + + if "IP6_GLOBAL" in net: interface["ip_address"] = str(net["IP6_GLOBAL"]) + interfaces.append(interface) return interfaces except Exception: - self.logger.error("Error getting vm interface_information of vm_id: " + str(vm_element.ID)) + self.logger.error( + "Error getting vm interface_information of vm_id: " + str(vm_element.ID) + ) diff --git a/RO-VIM-opennebula/setup.py b/RO-VIM-opennebula/setup.py index 4b3ec179..29fd37c2 100644 --- a/RO-VIM-opennebula/setup.py +++ b/RO-VIM-opennebula/setup.py @@ -31,28 +31,32 @@ osm-ro pluging for opennebula VIM setup( name=_name, - description='OSM ro vim plugin for opennebula', + description="OSM ro vim plugin for opennebula", long_description=README, - version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'), + version_command=( + "git describe --match v* --tags --long --dirty", + "pep440-git-full", + ), # version=VERSION, - # python_requires='>3.5.0', - author='ETSI OSM', - author_email='OSM_TECH@LIST.ETSI.ORG', - maintainer='ETSI OSM', - maintainer_email='OSM_TECH@LIST.ETSI.ORG', - url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary', - license='Apache 2.0', - + # python_requires=">3.5.0", + author="ETSI OSM", + author_email="OSM_TECH@LIST.ETSI.ORG", + maintainer="ETSI OSM", + maintainer_email="OSM_TECH@LIST.ETSI.ORG", + url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary", + license="Apache 2.0", packages=[_name], include_package_data=True, install_requires=[ "requests", "netaddr", "PyYAML", - "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin" + "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin", ], - setup_requires=['setuptools-version-command'], + setup_requires=["setuptools-version-command"], entry_points={ - 'osm_rovim.plugins': ['rovim_opennebula = osm_rovim_opennebula.vimconn_opennebula:vimconnector'], + "osm_rovim.plugins": [ + "rovim_opennebula = osm_rovim_opennebula.vimconn_opennebula:vimconnector" + ], }, ) diff --git a/RO-VIM-opennebula/tox.ini b/RO-VIM-opennebula/tox.ini index b6993f5a..874eaa08 100644 --- a/RO-VIM-opennebula/tox.ini +++ b/RO-VIM-opennebula/tox.ini @@ -28,7 +28,7 @@ commands=python3 -m unittest discover -v basepython = python3 deps = flake8 commands = flake8 osm_rovim_opennebula --max-line-length 120 \ - --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504 + --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241 [testenv:unittest] basepython = python3 diff --git a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py index f78c5b69..19c9d521 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py @@ -42,89 +42,112 @@ class TestSfcOperations(unittest.TestCase): def setUp(self): # instantiate dummy VIM connector so we can test it self.vimconn = vimconnector( - '123', 'openstackvim', '456', '789', 'http://dummy.url', None, - 'user', 'pass') - - def _test_new_sfi(self, create_sfc_port_pair, sfc_encap, - ingress_ports=['5311c75d-d718-4369-bbda-cdcc6da60fcc'], - egress_ports=['230cdf1b-de37-4891-bc07-f9010cf1f967']): + "123", + "openstackvim", + "456", + "789", + "http://dummy.url", + None, + "user", + "pass", + ) + + def _test_new_sfi( + self, + create_sfc_port_pair, + sfc_encap, + ingress_ports=["5311c75d-d718-4369-bbda-cdcc6da60fcc"], + egress_ports=["230cdf1b-de37-4891-bc07-f9010cf1f967"], + ): # input to VIM connector - name = 'osm_sfi' + name = "osm_sfi" # + ingress_ports # + egress_ports # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround) - correlation = 'nsh' + correlation = "nsh" if sfc_encap is not None: if not sfc_encap: correlation = None - # what OpenStack is assumed to respond (patch OpenStack's return value) - dict_from_neutron = {'port_pair': { - 'id': '3d7ddc13-923c-4332-971e-708ed82902ce', - 'name': name, - 'description': '', - 'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', - 'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', - 'ingress': ingress_ports[0] if len(ingress_ports) else None, - 'egress': egress_ports[0] if len(egress_ports) else None, - 'service_function_parameters': {'correlation': correlation} - }} + # what OpenStack is assumed to respond (patch OpenStack"s return value) + dict_from_neutron = { + "port_pair": { + "id": "3d7ddc13-923c-4332-971e-708ed82902ce", + "name": name, + "description": "", + "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c", + "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c", + "ingress": ingress_ports[0] if len(ingress_ports) else None, + "egress": egress_ports[0] if len(egress_ports) else None, + "service_function_parameters": {"correlation": correlation}, + } + } create_sfc_port_pair.return_value = dict_from_neutron # what the VIM connector is expected to # send to OpenStack based on the input - dict_to_neutron = {'port_pair': { - 'name': name, - 'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', - 'egress': '230cdf1b-de37-4891-bc07-f9010cf1f967', - 'service_function_parameters': {'correlation': correlation} - }} + dict_to_neutron = { + "port_pair": { + "name": name, + "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc", + "egress": "230cdf1b-de37-4891-bc07-f9010cf1f967", + "service_function_parameters": {"correlation": correlation}, + } + } # call the VIM connector if sfc_encap is None: result = self.vimconn.new_sfi(name, ingress_ports, egress_ports) else: - result = self.vimconn.new_sfi(name, ingress_ports, egress_ports, - sfc_encap) + result = self.vimconn.new_sfi(name, ingress_ports, egress_ports, sfc_encap) # assert that the VIM connector made the expected call to OpenStack create_sfc_port_pair.assert_called_with(dict_to_neutron) # assert that the VIM connector had the expected result / return value - self.assertEqual(result, dict_from_neutron['port_pair']['id']) + self.assertEqual(result, dict_from_neutron["port_pair"]["id"]) def _test_new_sf(self, create_sfc_port_pair_group): # input to VIM connector - name = 'osm_sf' - instances = ['bbd01220-cf72-41f2-9e70-0669c2e5c4cd', - '12ba215e-3987-4892-bd3a-d0fd91eecf98', - 'e25a7c79-14c8-469a-9ae1-f601c9371ffd'] - - # what OpenStack is assumed to respond (patch OpenStack's return value) - dict_from_neutron = {'port_pair_group': { - 'id': '3d7ddc13-923c-4332-971e-708ed82902ce', - 'name': name, - 'description': '', - 'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', - 'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', - 'port_pairs': instances, - 'group_id': 1, - 'port_pair_group_parameters': { - "lb_fields": [], - "ppg_n_tuple_mapping": { - "ingress_n_tuple": {}, - "egress_n_tuple": {} - }} - }} + name = "osm_sf" + instances = [ + "bbd01220-cf72-41f2-9e70-0669c2e5c4cd", + "12ba215e-3987-4892-bd3a-d0fd91eecf98", + "e25a7c79-14c8-469a-9ae1-f601c9371ffd", + ] + + # what OpenStack is assumed to respond (patch OpenStack"s return value) + dict_from_neutron = { + "port_pair_group": { + "id": "3d7ddc13-923c-4332-971e-708ed82902ce", + "name": name, + "description": "", + "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c", + "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c", + "port_pairs": instances, + "group_id": 1, + "port_pair_group_parameters": { + "lb_fields": [], + "ppg_n_tuple_mapping": { + "ingress_n_tuple": {}, + "egress_n_tuple": {}, + }, + }, + } + } create_sfc_port_pair_group.return_value = dict_from_neutron # what the VIM connector is expected to # send to OpenStack based on the input - dict_to_neutron = {'port_pair_group': { - 'name': name, - 'port_pairs': ['bbd01220-cf72-41f2-9e70-0669c2e5c4cd', - '12ba215e-3987-4892-bd3a-d0fd91eecf98', - 'e25a7c79-14c8-469a-9ae1-f601c9371ffd'] - }} + dict_to_neutron = { + "port_pair_group": { + "name": name, + "port_pairs": [ + "bbd01220-cf72-41f2-9e70-0669c2e5c4cd", + "12ba215e-3987-4892-bd3a-d0fd91eecf98", + "e25a7c79-14c8-469a-9ae1-f601c9371ffd", + ], + } + } # call the VIM connector result = self.vimconn.new_sf(name, instances) @@ -132,96 +155,111 @@ class TestSfcOperations(unittest.TestCase): # assert that the VIM connector made the expected call to OpenStack create_sfc_port_pair_group.assert_called_with(dict_to_neutron) # assert that the VIM connector had the expected result / return value - self.assertEqual(result, dict_from_neutron['port_pair_group']['id']) + self.assertEqual(result, dict_from_neutron["port_pair_group"]["id"]) def _test_new_sfp(self, create_sfc_port_chain, sfc_encap, spi): # input to VIM connector - name = 'osm_sfp' - classifications = ['2bd2a2e5-c5fd-4eac-a297-d5e255c35c19', - '00f23389-bdfa-43c2-8b16-5815f2582fa8'] - sfs = ['2314daec-c262-414a-86e3-69bb6fa5bc16', - 'd8bfdb5d-195e-4f34-81aa-6135705317df'] + name = "osm_sfp" + classifications = [ + "2bd2a2e5-c5fd-4eac-a297-d5e255c35c19", + "00f23389-bdfa-43c2-8b16-5815f2582fa8", + ] + sfs = [ + "2314daec-c262-414a-86e3-69bb6fa5bc16", + "d8bfdb5d-195e-4f34-81aa-6135705317df", + ] # TODO(igordc): must be changed to NSH in Queens (MPLS is a workaround) - correlation = 'nsh' + correlation = "nsh" chain_id = 33 if spi: chain_id = spi - # what OpenStack is assumed to respond (patch OpenStack's return value) - dict_from_neutron = {'port_chain': { - 'id': '5bc05721-079b-4b6e-a235-47cac331cbb6', - 'name': name, - 'description': '', - 'tenant_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', - 'project_id': '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c', - 'chain_id': chain_id, - 'flow_classifiers': classifications, - 'port_pair_groups': sfs, - 'chain_parameters': {'correlation': correlation} - }} + # what OpenStack is assumed to respond (patch OpenStack"s return value) + dict_from_neutron = { + "port_chain": { + "id": "5bc05721-079b-4b6e-a235-47cac331cbb6", + "name": name, + "description": "", + "tenant_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c", + "project_id": "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c", + "chain_id": chain_id, + "flow_classifiers": classifications, + "port_pair_groups": sfs, + "chain_parameters": {"correlation": correlation}, + } + } create_sfc_port_chain.return_value = dict_from_neutron # what the VIM connector is expected to # send to OpenStack based on the input - dict_to_neutron = {'port_chain': { - 'name': name, - 'flow_classifiers': ['2bd2a2e5-c5fd-4eac-a297-d5e255c35c19', - '00f23389-bdfa-43c2-8b16-5815f2582fa8'], - 'port_pair_groups': ['2314daec-c262-414a-86e3-69bb6fa5bc16', - 'd8bfdb5d-195e-4f34-81aa-6135705317df'], - 'chain_parameters': {'correlation': correlation} - }} + dict_to_neutron = { + "port_chain": { + "name": name, + "flow_classifiers": [ + "2bd2a2e5-c5fd-4eac-a297-d5e255c35c19", + "00f23389-bdfa-43c2-8b16-5815f2582fa8", + ], + "port_pair_groups": [ + "2314daec-c262-414a-86e3-69bb6fa5bc16", + "d8bfdb5d-195e-4f34-81aa-6135705317df", + ], + "chain_parameters": {"correlation": correlation}, + } + } if spi: - dict_to_neutron['port_chain']['chain_id'] = spi + dict_to_neutron["port_chain"]["chain_id"] = spi # call the VIM connector if sfc_encap is None: if spi is None: result = self.vimconn.new_sfp(name, classifications, sfs) else: - result = self.vimconn.new_sfp(name, classifications, sfs, - spi=spi) + result = self.vimconn.new_sfp(name, classifications, sfs, spi=spi) else: if spi is None: - result = self.vimconn.new_sfp(name, classifications, sfs, - sfc_encap) + result = self.vimconn.new_sfp(name, classifications, sfs, sfc_encap) else: - result = self.vimconn.new_sfp(name, classifications, sfs, - sfc_encap, spi) + result = self.vimconn.new_sfp( + name, classifications, sfs, sfc_encap, spi + ) # assert that the VIM connector made the expected call to OpenStack create_sfc_port_chain.assert_called_with(dict_to_neutron) # assert that the VIM connector had the expected result / return value - self.assertEqual(result, dict_from_neutron['port_chain']['id']) + self.assertEqual(result, dict_from_neutron["port_chain"]["id"]) def _test_new_classification(self, create_sfc_flow_classifier, ctype): # input to VIM connector - name = 'osm_classification' - definition = {'ethertype': 'IPv4', - 'logical_source_port': - 'aaab0ab0-1452-4636-bb3b-11dca833fa2b', - 'protocol': 'tcp', - 'source_ip_prefix': '192.168.2.0/24', - 'source_port_range_max': 99, - 'source_port_range_min': 50} - - # what OpenStack is assumed to respond (patch OpenStack's return value) - dict_from_neutron = {'flow_classifier': copy.copy(definition)} - dict_from_neutron['flow_classifier'][ - 'id'] = '7735ec2c-fddf-4130-9712-32ed2ab6a372' - dict_from_neutron['flow_classifier']['name'] = name - dict_from_neutron['flow_classifier']['description'] = '' - dict_from_neutron['flow_classifier'][ - 'tenant_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c' - dict_from_neutron['flow_classifier'][ - 'project_id'] = '130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c' + name = "osm_classification" + definition = { + "ethertype": "IPv4", + "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b", + "protocol": "tcp", + "source_ip_prefix": "192.168.2.0/24", + "source_port_range_max": 99, + "source_port_range_min": 50, + } + + # what OpenStack is assumed to respond (patch OpenStack"s return value) + dict_from_neutron = {"flow_classifier": copy.copy(definition)} + dict_from_neutron["flow_classifier"][ + "id" + ] = "7735ec2c-fddf-4130-9712-32ed2ab6a372" + dict_from_neutron["flow_classifier"]["name"] = name + dict_from_neutron["flow_classifier"]["description"] = "" + dict_from_neutron["flow_classifier"][ + "tenant_id" + ] = "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c" + dict_from_neutron["flow_classifier"][ + "project_id" + ] = "130b1e97-b0f1-40a8-8804-b6ad9b8c3e0c" create_sfc_flow_classifier.return_value = dict_from_neutron # what the VIM connector is expected to # send to OpenStack based on the input - dict_to_neutron = {'flow_classifier': copy.copy(definition)} - dict_to_neutron['flow_classifier']['name'] = 'osm_classification' + dict_to_neutron = {"flow_classifier": copy.copy(definition)} + dict_to_neutron["flow_classifier"]["name"] = "osm_classification" # call the VIM connector result = self.vimconn.new_classification(name, ctype, definition) @@ -229,626 +267,786 @@ class TestSfcOperations(unittest.TestCase): # assert that the VIM connector made the expected call to OpenStack create_sfc_flow_classifier.assert_called_with(dict_to_neutron) # assert that the VIM connector had the expected result / return value - self.assertEqual(result, dict_from_neutron['flow_classifier']['id']) + self.assertEqual(result, dict_from_neutron["flow_classifier"]["id"]) - @mock.patch.object(Client, 'create_sfc_flow_classifier') + @mock.patch.object(Client, "create_sfc_flow_classifier") def test_new_classification(self, create_sfc_flow_classifier): - self._test_new_classification(create_sfc_flow_classifier, - 'legacy_flow_classifier') + self._test_new_classification( + create_sfc_flow_classifier, "legacy_flow_classifier" + ) - @mock.patch.object(Client, 'create_sfc_flow_classifier') + @mock.patch.object(Client, "create_sfc_flow_classifier") def test_new_classification_unsupported_type(self, create_sfc_flow_classifier): - self.assertRaises(vimconn.VimConnNotSupportedException, - self._test_new_classification, - create_sfc_flow_classifier, 'h265') - - @mock.patch.object(Client, 'create_sfc_port_pair') + self.assertRaises( + vimconn.VimConnNotSupportedException, + self._test_new_classification, + create_sfc_flow_classifier, + "h265", + ) + + @mock.patch.object(Client, "create_sfc_port_pair") def test_new_sfi_with_sfc_encap(self, create_sfc_port_pair): self._test_new_sfi(create_sfc_port_pair, True) - @mock.patch.object(Client, 'create_sfc_port_pair') + @mock.patch.object(Client, "create_sfc_port_pair") def test_new_sfi_without_sfc_encap(self, create_sfc_port_pair): self._test_new_sfi(create_sfc_port_pair, False) - @mock.patch.object(Client, 'create_sfc_port_pair') + @mock.patch.object(Client, "create_sfc_port_pair") def test_new_sfi_default_sfc_encap(self, create_sfc_port_pair): self._test_new_sfi(create_sfc_port_pair, None) - @mock.patch.object(Client, 'create_sfc_port_pair') + @mock.patch.object(Client, "create_sfc_port_pair") def test_new_sfi_bad_ingress_ports(self, create_sfc_port_pair): - ingress_ports = ['5311c75d-d718-4369-bbda-cdcc6da60fcc', - 'a0273f64-82c9-11e7-b08f-6328e53f0fa7'] - self.assertRaises(vimconn.VimConnNotSupportedException, - self._test_new_sfi, - create_sfc_port_pair, True, ingress_ports=ingress_ports) + ingress_ports = [ + "5311c75d-d718-4369-bbda-cdcc6da60fcc", + "a0273f64-82c9-11e7-b08f-6328e53f0fa7", + ] + self.assertRaises( + vimconn.VimConnNotSupportedException, + self._test_new_sfi, + create_sfc_port_pair, + True, + ingress_ports=ingress_ports, + ) ingress_ports = [] - self.assertRaises(vimconn.VimConnNotSupportedException, - self._test_new_sfi, - create_sfc_port_pair, True, ingress_ports=ingress_ports) - - @mock.patch.object(Client, 'create_sfc_port_pair') + self.assertRaises( + vimconn.VimConnNotSupportedException, + self._test_new_sfi, + create_sfc_port_pair, + True, + ingress_ports=ingress_ports, + ) + + @mock.patch.object(Client, "create_sfc_port_pair") def test_new_sfi_bad_egress_ports(self, create_sfc_port_pair): - egress_ports = ['230cdf1b-de37-4891-bc07-f9010cf1f967', - 'b41228fe-82c9-11e7-9b44-17504174320b'] - self.assertRaises(vimconn.VimConnNotSupportedException, - self._test_new_sfi, - create_sfc_port_pair, True, egress_ports=egress_ports) + egress_ports = [ + "230cdf1b-de37-4891-bc07-f9010cf1f967", + "b41228fe-82c9-11e7-9b44-17504174320b", + ] + self.assertRaises( + vimconn.VimConnNotSupportedException, + self._test_new_sfi, + create_sfc_port_pair, + True, + egress_ports=egress_ports, + ) egress_ports = [] - self.assertRaises(vimconn.VimConnNotSupportedException, - self._test_new_sfi, - create_sfc_port_pair, True, egress_ports=egress_ports) - - @mock.patch.object(vimconnector, 'get_sfi') - @mock.patch.object(Client, 'create_sfc_port_pair_group') + self.assertRaises( + vimconn.VimConnNotSupportedException, + self._test_new_sfi, + create_sfc_port_pair, + True, + egress_ports=egress_ports, + ) + + @mock.patch.object(vimconnector, "get_sfi") + @mock.patch.object(Client, "create_sfc_port_pair_group") def test_new_sf(self, create_sfc_port_pair_group, get_sfi): - get_sfi.return_value = {'sfc_encap': True} + get_sfi.return_value = {"sfc_encap": True} self._test_new_sf(create_sfc_port_pair_group) - @mock.patch.object(vimconnector, 'get_sfi') - @mock.patch.object(Client, 'create_sfc_port_pair_group') - def test_new_sf_inconsistent_sfc_encap(self, create_sfc_port_pair_group, - get_sfi): - get_sfi.return_value = {'sfc_encap': 'nsh'} - self.assertRaises(vimconn.VimConnNotSupportedException, - self._test_new_sf, create_sfc_port_pair_group) - - @mock.patch.object(Client, 'create_sfc_port_chain') + @mock.patch.object(vimconnector, "get_sfi") + @mock.patch.object(Client, "create_sfc_port_pair_group") + def test_new_sf_inconsistent_sfc_encap(self, create_sfc_port_pair_group, get_sfi): + get_sfi.return_value = {"sfc_encap": "nsh"} + self.assertRaises( + vimconn.VimConnNotSupportedException, + self._test_new_sf, + create_sfc_port_pair_group, + ) + + @mock.patch.object(Client, "create_sfc_port_chain") def test_new_sfp_with_sfc_encap(self, create_sfc_port_chain): self._test_new_sfp(create_sfc_port_chain, True, None) - @mock.patch.object(Client, 'create_sfc_port_chain') + @mock.patch.object(Client, "create_sfc_port_chain") def test_new_sfp_without_sfc_encap(self, create_sfc_port_chain): self._test_new_sfp(create_sfc_port_chain, False, None) self._test_new_sfp(create_sfc_port_chain, False, 25) - @mock.patch.object(Client, 'create_sfc_port_chain') + @mock.patch.object(Client, "create_sfc_port_chain") def test_new_sfp_default_sfc_encap(self, create_sfc_port_chain): self._test_new_sfp(create_sfc_port_chain, None, None) - @mock.patch.object(Client, 'create_sfc_port_chain') + @mock.patch.object(Client, "create_sfc_port_chain") def test_new_sfp_with_sfc_encap_spi(self, create_sfc_port_chain): self._test_new_sfp(create_sfc_port_chain, True, 25) - @mock.patch.object(Client, 'create_sfc_port_chain') + @mock.patch.object(Client, "create_sfc_port_chain") def test_new_sfp_default_sfc_encap_spi(self, create_sfc_port_chain): self._test_new_sfp(create_sfc_port_chain, None, 25) - @mock.patch.object(Client, 'list_sfc_flow_classifiers') + @mock.patch.object(Client, "list_sfc_flow_classifiers") def test_get_classification_list(self, list_sfc_flow_classifiers): # what OpenStack is assumed to return to the VIM connector - list_sfc_flow_classifiers.return_value = {'flow_classifiers': [ - {'source_port_range_min': 2000, - 'destination_ip_prefix': '192.168.3.0/24', - 'protocol': 'udp', - 'description': '', - 'ethertype': 'IPv4', - 'l7_parameters': {}, - 'source_port_range_max': 2000, - 'destination_port_range_min': 3000, - 'source_ip_prefix': '192.168.2.0/24', - 'logical_destination_port': None, - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'destination_port_range_max': None, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b', - 'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d', - 'name': 'fc1'}]} + list_sfc_flow_classifiers.return_value = { + "flow_classifiers": [ + { + "source_port_range_min": 2000, + "destination_ip_prefix": "192.168.3.0/24", + "protocol": "udp", + "description": "", + "ethertype": "IPv4", + "l7_parameters": {}, + "source_port_range_max": 2000, + "destination_port_range_min": 3000, + "source_ip_prefix": "192.168.2.0/24", + "logical_destination_port": None, + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "destination_port_range_max": None, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b", + "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d", + "name": "fc1", + } + ] + } # call the VIM connector - filter_dict = {'protocol': 'tcp', 'ethertype': 'IPv4'} + filter_dict = {"protocol": "tcp", "ethertype": "IPv4"} result = self.vimconn.get_classification_list(filter_dict.copy()) # assert that VIM connector called OpenStack with the expected filter list_sfc_flow_classifiers.assert_called_with(**filter_dict) # assert that the VIM connector successfully # translated and returned the OpenStack result - self.assertEqual(result, [ - {'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d', - 'name': 'fc1', - 'description': '', - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'ctype': 'legacy_flow_classifier', - 'definition': { - 'source_port_range_min': 2000, - 'destination_ip_prefix': '192.168.3.0/24', - 'protocol': 'udp', - 'ethertype': 'IPv4', - 'l7_parameters': {}, - 'source_port_range_max': 2000, - 'destination_port_range_min': 3000, - 'source_ip_prefix': '192.168.2.0/24', - 'logical_destination_port': None, - 'destination_port_range_max': None, - 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b'} - }]) + self.assertEqual( + result, + [ + { + "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d", + "name": "fc1", + "description": "", + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "ctype": "legacy_flow_classifier", + "definition": { + "source_port_range_min": 2000, + "destination_ip_prefix": "192.168.3.0/24", + "protocol": "udp", + "ethertype": "IPv4", + "l7_parameters": {}, + "source_port_range_max": 2000, + "destination_port_range_min": 3000, + "source_ip_prefix": "192.168.2.0/24", + "logical_destination_port": None, + "destination_port_range_max": None, + "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b", + }, + } + ], + ) def _test_get_sfi_list(self, list_port_pair, correlation, sfc_encap): # what OpenStack is assumed to return to the VIM connector - list_port_pair.return_value = {'port_pairs': [ - {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', - 'service_function_parameters': {'correlation': correlation}, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966', - 'name': 'osm_sfi'}]} + list_port_pair.return_value = { + "port_pairs": [ + { + "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc", + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc", + "service_function_parameters": {"correlation": correlation}, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "c121ebdd-7f2d-4213-b933-3325298a6966", + "name": "osm_sfi", + } + ] + } # call the VIM connector - filter_dict = {'name': 'osm_sfi', 'description': ''} + filter_dict = {"name": "osm_sfi", "description": ""} result = self.vimconn.get_sfi_list(filter_dict.copy()) # assert that VIM connector called OpenStack with the expected filter list_port_pair.assert_called_with(**filter_dict) # assert that the VIM connector successfully # translated and returned the OpenStack result - self.assertEqual(result, [ - {'ingress_ports': ['5311c75d-d718-4369-bbda-cdcc6da60fcc'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'egress_ports': ['5311c75d-d718-4369-bbda-cdcc6da60fcc'], - 'sfc_encap': sfc_encap, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966', - 'name': 'osm_sfi'}]) - - @mock.patch.object(Client, 'list_sfc_port_pairs') + self.assertEqual( + result, + [ + { + "ingress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "egress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"], + "sfc_encap": sfc_encap, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "c121ebdd-7f2d-4213-b933-3325298a6966", + "name": "osm_sfi", + } + ], + ) + + @mock.patch.object(Client, "list_sfc_port_pairs") def test_get_sfi_list_with_sfc_encap(self, list_sfc_port_pairs): - self._test_get_sfi_list(list_sfc_port_pairs, 'nsh', True) + self._test_get_sfi_list(list_sfc_port_pairs, "nsh", True) - @mock.patch.object(Client, 'list_sfc_port_pairs') + @mock.patch.object(Client, "list_sfc_port_pairs") def test_get_sfi_list_without_sfc_encap(self, list_sfc_port_pairs): self._test_get_sfi_list(list_sfc_port_pairs, None, False) - @mock.patch.object(Client, 'list_sfc_port_pair_groups') + @mock.patch.object(Client, "list_sfc_port_pair_groups") def test_get_sf_list(self, list_sfc_port_pair_groups): # what OpenStack is assumed to return to the VIM connector - list_sfc_port_pair_groups.return_value = {'port_pair_groups': [ - {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2', - '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'port_pair_group_parameters': {}, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': 'f4a0bde8-82d5-11e7-90e1-a72b762fa27f', - 'name': 'osm_sf'}]} + list_sfc_port_pair_groups.return_value = { + "port_pair_groups": [ + { + "port_pairs": [ + "08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2", + "0d63799c-82d6-11e7-8deb-a746bb3ae9f5", + ], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "port_pair_group_parameters": {}, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "f4a0bde8-82d5-11e7-90e1-a72b762fa27f", + "name": "osm_sf", + } + ] + } # call the VIM connector - filter_dict = {'name': 'osm_sf', 'description': ''} + filter_dict = {"name": "osm_sf", "description": ""} result = self.vimconn.get_sf_list(filter_dict.copy()) # assert that VIM connector called OpenStack with the expected filter list_sfc_port_pair_groups.assert_called_with(**filter_dict) # assert that the VIM connector successfully # translated and returned the OpenStack result - self.assertEqual(result, [ - {'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2', - '0d63799c-82d6-11e7-8deb-a746bb3ae9f5'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': 'f4a0bde8-82d5-11e7-90e1-a72b762fa27f', - 'name': 'osm_sf'}]) + self.assertEqual( + result, + [ + { + "sfis": [ + "08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2", + "0d63799c-82d6-11e7-8deb-a746bb3ae9f5", + ], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "f4a0bde8-82d5-11e7-90e1-a72b762fa27f", + "name": "osm_sf", + } + ], + ) def _test_get_sfp_list(self, list_sfc_port_chains, correlation, sfc_encap): # what OpenStack is assumed to return to the VIM connector - list_sfc_port_chains.return_value = {'port_chains': [ - {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25', - '7dc9013e-82d6-11e7-a5a6-a3a8d78a5518'], - 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e', - '1387ab44-82d7-11e7-9bb0-476337183905'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'chain_parameters': {'correlation': correlation}, - 'chain_id': 40, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47', - 'name': 'osm_sfp'}]} + list_sfc_port_chains.return_value = { + "port_chains": [ + { + "port_pair_groups": [ + "7d8e3bf8-82d6-11e7-a032-8ff028839d25", + "7dc9013e-82d6-11e7-a5a6-a3a8d78a5518", + ], + "flow_classifiers": [ + "1333c2f4-82d7-11e7-a5df-9327f33d104e", + "1387ab44-82d7-11e7-9bb0-476337183905", + ], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "chain_parameters": {"correlation": correlation}, + "chain_id": 40, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47", + "name": "osm_sfp", + } + ] + } # call the VIM connector - filter_dict = {'name': 'osm_sfp', 'description': ''} + filter_dict = {"name": "osm_sfp", "description": ""} result = self.vimconn.get_sfp_list(filter_dict.copy()) # assert that VIM connector called OpenStack with the expected filter list_sfc_port_chains.assert_called_with(**filter_dict) # assert that the VIM connector successfully # translated and returned the OpenStack result - self.assertEqual(result, [ - {'service_functions': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25', - '7dc9013e-82d6-11e7-a5a6-a3a8d78a5518'], - 'classifications': ['1333c2f4-82d7-11e7-a5df-9327f33d104e', - '1387ab44-82d7-11e7-9bb0-476337183905'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'sfc_encap': sfc_encap, - 'spi': 40, - 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47', - 'name': 'osm_sfp'}]) - - @mock.patch.object(Client, 'list_sfc_port_chains') + self.assertEqual( + result, + [ + { + "service_functions": [ + "7d8e3bf8-82d6-11e7-a032-8ff028839d25", + "7dc9013e-82d6-11e7-a5a6-a3a8d78a5518", + ], + "classifications": [ + "1333c2f4-82d7-11e7-a5df-9327f33d104e", + "1387ab44-82d7-11e7-9bb0-476337183905", + ], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "sfc_encap": sfc_encap, + "spi": 40, + "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47", + "name": "osm_sfp", + } + ], + ) + + @mock.patch.object(Client, "list_sfc_port_chains") def test_get_sfp_list_with_sfc_encap(self, list_sfc_port_chains): - self._test_get_sfp_list(list_sfc_port_chains, 'nsh', True) + self._test_get_sfp_list(list_sfc_port_chains, "nsh", True) - @mock.patch.object(Client, 'list_sfc_port_chains') + @mock.patch.object(Client, "list_sfc_port_chains") def test_get_sfp_list_without_sfc_encap(self, list_sfc_port_chains): self._test_get_sfp_list(list_sfc_port_chains, None, False) - @mock.patch.object(Client, 'list_sfc_flow_classifiers') + @mock.patch.object(Client, "list_sfc_flow_classifiers") def test_get_classification(self, list_sfc_flow_classifiers): # what OpenStack is assumed to return to the VIM connector - list_sfc_flow_classifiers.return_value = {'flow_classifiers': [ - {'source_port_range_min': 2000, - 'destination_ip_prefix': '192.168.3.0/24', - 'protocol': 'udp', - 'description': '', - 'ethertype': 'IPv4', - 'l7_parameters': {}, - 'source_port_range_max': 2000, - 'destination_port_range_min': 3000, - 'source_ip_prefix': '192.168.2.0/24', - 'logical_destination_port': None, - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'destination_port_range_max': None, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b', - 'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d', - 'name': 'fc1'} - ]} + list_sfc_flow_classifiers.return_value = { + "flow_classifiers": [ + { + "source_port_range_min": 2000, + "destination_ip_prefix": "192.168.3.0/24", + "protocol": "udp", + "description": "", + "ethertype": "IPv4", + "l7_parameters": {}, + "source_port_range_max": 2000, + "destination_port_range_min": 3000, + "source_ip_prefix": "192.168.2.0/24", + "logical_destination_port": None, + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "destination_port_range_max": None, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b", + "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d", + "name": "fc1", + } + ] + } # call the VIM connector - result = self.vimconn.get_classification( - '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d') + result = self.vimconn.get_classification("22198366-d4e8-4d6b-b4d2-637d5d6cbb7d") # assert that VIM connector called OpenStack with the expected filter list_sfc_flow_classifiers.assert_called_with( - id='22198366-d4e8-4d6b-b4d2-637d5d6cbb7d') + id="22198366-d4e8-4d6b-b4d2-637d5d6cbb7d" + ) # assert that VIM connector successfully returned the OpenStack result - self.assertEqual(result, - {'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d', - 'name': 'fc1', - 'description': '', - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'ctype': 'legacy_flow_classifier', - 'definition': { - 'source_port_range_min': 2000, - 'destination_ip_prefix': '192.168.3.0/24', - 'protocol': 'udp', - 'ethertype': 'IPv4', - 'l7_parameters': {}, - 'source_port_range_max': 2000, - 'destination_port_range_min': 3000, - 'source_ip_prefix': '192.168.2.0/24', - 'logical_destination_port': None, - 'destination_port_range_max': None, - 'logical_source_port': - 'aaab0ab0-1452-4636-bb3b-11dca833fa2b'} - }) - - @mock.patch.object(Client, 'list_sfc_flow_classifiers') + self.assertEqual( + result, + { + "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d", + "name": "fc1", + "description": "", + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "ctype": "legacy_flow_classifier", + "definition": { + "source_port_range_min": 2000, + "destination_ip_prefix": "192.168.3.0/24", + "protocol": "udp", + "ethertype": "IPv4", + "l7_parameters": {}, + "source_port_range_max": 2000, + "destination_port_range_min": 3000, + "source_ip_prefix": "192.168.2.0/24", + "logical_destination_port": None, + "destination_port_range_max": None, + "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b", + }, + }, + ) + + @mock.patch.object(Client, "list_sfc_flow_classifiers") def test_get_classification_many_results(self, list_sfc_flow_classifiers): # what OpenStack is assumed to return to the VIM connector - list_sfc_flow_classifiers.return_value = {'flow_classifiers': [ - {'source_port_range_min': 2000, - 'destination_ip_prefix': '192.168.3.0/24', - 'protocol': 'udp', - 'description': '', - 'ethertype': 'IPv4', - 'l7_parameters': {}, - 'source_port_range_max': 2000, - 'destination_port_range_min': 3000, - 'source_ip_prefix': '192.168.2.0/24', - 'logical_destination_port': None, - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'destination_port_range_max': None, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b', - 'id': '22198366-d4e8-4d6b-b4d2-637d5d6cbb7d', - 'name': 'fc1'}, - {'source_port_range_min': 1000, - 'destination_ip_prefix': '192.168.3.0/24', - 'protocol': 'udp', - 'description': '', - 'ethertype': 'IPv4', - 'l7_parameters': {}, - 'source_port_range_max': 1000, - 'destination_port_range_min': 3000, - 'source_ip_prefix': '192.168.2.0/24', - 'logical_destination_port': None, - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'destination_port_range_max': None, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'logical_source_port': 'aaab0ab0-1452-4636-bb3b-11dca833fa2b', - 'id': '3196bafc-82dd-11e7-a205-9bf6c14b0721', - 'name': 'fc2'} - ]} + list_sfc_flow_classifiers.return_value = { + "flow_classifiers": [ + { + "source_port_range_min": 2000, + "destination_ip_prefix": "192.168.3.0/24", + "protocol": "udp", + "description": "", + "ethertype": "IPv4", + "l7_parameters": {}, + "source_port_range_max": 2000, + "destination_port_range_min": 3000, + "source_ip_prefix": "192.168.2.0/24", + "logical_destination_port": None, + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "destination_port_range_max": None, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b", + "id": "22198366-d4e8-4d6b-b4d2-637d5d6cbb7d", + "name": "fc1", + }, + { + "source_port_range_min": 1000, + "destination_ip_prefix": "192.168.3.0/24", + "protocol": "udp", + "description": "", + "ethertype": "IPv4", + "l7_parameters": {}, + "source_port_range_max": 1000, + "destination_port_range_min": 3000, + "source_ip_prefix": "192.168.2.0/24", + "logical_destination_port": None, + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "destination_port_range_max": None, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "logical_source_port": "aaab0ab0-1452-4636-bb3b-11dca833fa2b", + "id": "3196bafc-82dd-11e7-a205-9bf6c14b0721", + "name": "fc2", + }, + ] + } # call the VIM connector - self.assertRaises(vimconn.VimConnConflictException, - self.vimconn.get_classification, - '3196bafc-82dd-11e7-a205-9bf6c14b0721') + self.assertRaises( + vimconn.VimConnConflictException, + self.vimconn.get_classification, + "3196bafc-82dd-11e7-a205-9bf6c14b0721", + ) # assert the VIM connector called OpenStack with the expected filter list_sfc_flow_classifiers.assert_called_with( - id='3196bafc-82dd-11e7-a205-9bf6c14b0721') + id="3196bafc-82dd-11e7-a205-9bf6c14b0721" + ) - @mock.patch.object(Client, 'list_sfc_flow_classifiers') + @mock.patch.object(Client, "list_sfc_flow_classifiers") def test_get_classification_no_results(self, list_sfc_flow_classifiers): # what OpenStack is assumed to return to the VIM connector - list_sfc_flow_classifiers.return_value = {'flow_classifiers': []} + list_sfc_flow_classifiers.return_value = {"flow_classifiers": []} # call the VIM connector - self.assertRaises(vimconn.VimConnNotFoundException, - self.vimconn.get_classification, - '3196bafc-82dd-11e7-a205-9bf6c14b0721') + self.assertRaises( + vimconn.VimConnNotFoundException, + self.vimconn.get_classification, + "3196bafc-82dd-11e7-a205-9bf6c14b0721", + ) # assert the VIM connector called OpenStack with the expected filter list_sfc_flow_classifiers.assert_called_with( - id='3196bafc-82dd-11e7-a205-9bf6c14b0721') + id="3196bafc-82dd-11e7-a205-9bf6c14b0721" + ) - @mock.patch.object(Client, 'list_sfc_port_pairs') + @mock.patch.object(Client, "list_sfc_port_pairs") def test_get_sfi(self, list_sfc_port_pairs): # what OpenStack is assumed to return to the VIM connector - list_sfc_port_pairs.return_value = {'port_pairs': [ - {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', - 'service_function_parameters': {'correlation': 'nsh'}, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966', - 'name': 'osm_sfi1'}, - ]} + list_sfc_port_pairs.return_value = { + "port_pairs": [ + { + "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc", + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc", + "service_function_parameters": {"correlation": "nsh"}, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "c121ebdd-7f2d-4213-b933-3325298a6966", + "name": "osm_sfi1", + }, + ] + } # call the VIM connector - result = self.vimconn.get_sfi('c121ebdd-7f2d-4213-b933-3325298a6966') + result = self.vimconn.get_sfi("c121ebdd-7f2d-4213-b933-3325298a6966") # assert the VIM connector called OpenStack with the expected filter list_sfc_port_pairs.assert_called_with( - id='c121ebdd-7f2d-4213-b933-3325298a6966') + id="c121ebdd-7f2d-4213-b933-3325298a6966" + ) # assert the VIM connector successfully returned the OpenStack result - self.assertEqual(result, - {'ingress_ports': [ - '5311c75d-d718-4369-bbda-cdcc6da60fcc'], - 'egress_ports': [ - '5311c75d-d718-4369-bbda-cdcc6da60fcc'], - 'sfc_encap': True, - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966', - 'name': 'osm_sfi1'}) - - @mock.patch.object(Client, 'list_sfc_port_pairs') + self.assertEqual( + result, + { + "ingress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"], + "egress_ports": ["5311c75d-d718-4369-bbda-cdcc6da60fcc"], + "sfc_encap": True, + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "c121ebdd-7f2d-4213-b933-3325298a6966", + "name": "osm_sfi1", + }, + ) + + @mock.patch.object(Client, "list_sfc_port_pairs") def test_get_sfi_many_results(self, list_sfc_port_pairs): # what OpenStack is assumed to return to the VIM connector - list_sfc_port_pairs.return_value = {'port_pairs': [ - {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', - 'service_function_parameters': {'correlation': 'nsh'}, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': 'c121ebdd-7f2d-4213-b933-3325298a6966', - 'name': 'osm_sfi1'}, - {'ingress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'egress': '5311c75d-d718-4369-bbda-cdcc6da60fcc', - 'service_function_parameters': {'correlation': 'nsh'}, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': 'c0436d92-82db-11e7-8f9c-5fa535f1261f', - 'name': 'osm_sfi2'} - ]} + list_sfc_port_pairs.return_value = { + "port_pairs": [ + { + "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc", + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc", + "service_function_parameters": {"correlation": "nsh"}, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "c121ebdd-7f2d-4213-b933-3325298a6966", + "name": "osm_sfi1", + }, + { + "ingress": "5311c75d-d718-4369-bbda-cdcc6da60fcc", + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "egress": "5311c75d-d718-4369-bbda-cdcc6da60fcc", + "service_function_parameters": {"correlation": "nsh"}, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "c0436d92-82db-11e7-8f9c-5fa535f1261f", + "name": "osm_sfi2", + }, + ] + } # call the VIM connector - self.assertRaises(vimconn.VimConnConflictException, - self.vimconn.get_sfi, - 'c0436d92-82db-11e7-8f9c-5fa535f1261f') + self.assertRaises( + vimconn.VimConnConflictException, + self.vimconn.get_sfi, + "c0436d92-82db-11e7-8f9c-5fa535f1261f", + ) # assert that VIM connector called OpenStack with the expected filter list_sfc_port_pairs.assert_called_with( - id='c0436d92-82db-11e7-8f9c-5fa535f1261f') + id="c0436d92-82db-11e7-8f9c-5fa535f1261f" + ) - @mock.patch.object(Client, 'list_sfc_port_pairs') + @mock.patch.object(Client, "list_sfc_port_pairs") def test_get_sfi_no_results(self, list_sfc_port_pairs): # what OpenStack is assumed to return to the VIM connector - list_sfc_port_pairs.return_value = {'port_pairs': []} + list_sfc_port_pairs.return_value = {"port_pairs": []} # call the VIM connector - self.assertRaises(vimconn.VimConnNotFoundException, - self.vimconn.get_sfi, - 'b22892fc-82d9-11e7-ae85-0fea6a3b3757') + self.assertRaises( + vimconn.VimConnNotFoundException, + self.vimconn.get_sfi, + "b22892fc-82d9-11e7-ae85-0fea6a3b3757", + ) # assert that VIM connector called OpenStack with the expected filter list_sfc_port_pairs.assert_called_with( - id='b22892fc-82d9-11e7-ae85-0fea6a3b3757') + id="b22892fc-82d9-11e7-ae85-0fea6a3b3757" + ) - @mock.patch.object(Client, 'list_sfc_port_pair_groups') + @mock.patch.object(Client, "list_sfc_port_pair_groups") def test_get_sf(self, list_sfc_port_pair_groups): # what OpenStack is assumed to return to the VIM connector - list_sfc_port_pair_groups.return_value = {'port_pair_groups': [ - {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'port_pair_group_parameters': {}, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d', - 'name': 'osm_sf1'} - ]} + list_sfc_port_pair_groups.return_value = { + "port_pair_groups": [ + { + "port_pairs": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "port_pair_group_parameters": {}, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d", + "name": "osm_sf1", + } + ] + } # call the VIM connector - result = self.vimconn.get_sf('b22892fc-82d9-11e7-ae85-0fea6a3b3757') + result = self.vimconn.get_sf("b22892fc-82d9-11e7-ae85-0fea6a3b3757") # assert that VIM connector called OpenStack with the expected filter list_sfc_port_pair_groups.assert_called_with( - id='b22892fc-82d9-11e7-ae85-0fea6a3b3757') + id="b22892fc-82d9-11e7-ae85-0fea6a3b3757" + ) # assert that VIM connector successfully returned the OpenStack result - self.assertEqual(result, - {'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'sfis': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'], - 'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d', - 'name': 'osm_sf1'}) - - @mock.patch.object(Client, 'list_sfc_port_pair_groups') + self.assertEqual( + result, + { + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "sfis": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"], + "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d", + "name": "osm_sf1", + }, + ) + + @mock.patch.object(Client, "list_sfc_port_pair_groups") def test_get_sf_many_results(self, list_sfc_port_pair_groups): # what OpenStack is assumed to return to the VIM connector - list_sfc_port_pair_groups.return_value = {'port_pair_groups': [ - {'port_pairs': ['08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'port_pair_group_parameters': {}, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': 'aabba8a6-82d9-11e7-a18a-d3c7719b742d', - 'name': 'osm_sf1'}, - {'port_pairs': ['0d63799c-82d6-11e7-8deb-a746bb3ae9f5'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'port_pair_group_parameters': {}, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': 'b22892fc-82d9-11e7-ae85-0fea6a3b3757', - 'name': 'osm_sf2'} - ]} + list_sfc_port_pair_groups.return_value = { + "port_pair_groups": [ + { + "port_pairs": ["08fbdbb0-82d6-11e7-ad95-9bb52fbec2f2"], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "port_pair_group_parameters": {}, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "aabba8a6-82d9-11e7-a18a-d3c7719b742d", + "name": "osm_sf1", + }, + { + "port_pairs": ["0d63799c-82d6-11e7-8deb-a746bb3ae9f5"], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "port_pair_group_parameters": {}, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "b22892fc-82d9-11e7-ae85-0fea6a3b3757", + "name": "osm_sf2", + }, + ] + } # call the VIM connector - self.assertRaises(vimconn.VimConnConflictException, - self.vimconn.get_sf, - 'b22892fc-82d9-11e7-ae85-0fea6a3b3757') + self.assertRaises( + vimconn.VimConnConflictException, + self.vimconn.get_sf, + "b22892fc-82d9-11e7-ae85-0fea6a3b3757", + ) # assert that VIM connector called OpenStack with the expected filter list_sfc_port_pair_groups.assert_called_with( - id='b22892fc-82d9-11e7-ae85-0fea6a3b3757') + id="b22892fc-82d9-11e7-ae85-0fea6a3b3757" + ) - @mock.patch.object(Client, 'list_sfc_port_pair_groups') + @mock.patch.object(Client, "list_sfc_port_pair_groups") def test_get_sf_no_results(self, list_sfc_port_pair_groups): # what OpenStack is assumed to return to the VIM connector - list_sfc_port_pair_groups.return_value = {'port_pair_groups': []} + list_sfc_port_pair_groups.return_value = {"port_pair_groups": []} # call the VIM connector - self.assertRaises(vimconn.VimConnNotFoundException, - self.vimconn.get_sf, - 'b22892fc-82d9-11e7-ae85-0fea6a3b3757') + self.assertRaises( + vimconn.VimConnNotFoundException, + self.vimconn.get_sf, + "b22892fc-82d9-11e7-ae85-0fea6a3b3757", + ) # assert that VIM connector called OpenStack with the expected filter list_sfc_port_pair_groups.assert_called_with( - id='b22892fc-82d9-11e7-ae85-0fea6a3b3757') + id="b22892fc-82d9-11e7-ae85-0fea6a3b3757" + ) - @mock.patch.object(Client, 'list_sfc_port_chains') + @mock.patch.object(Client, "list_sfc_port_chains") def test_get_sfp(self, list_sfc_port_chains): # what OpenStack is assumed to return to the VIM connector - list_sfc_port_chains.return_value = {'port_chains': [ - {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'], - 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'chain_parameters': {'correlation': 'nsh'}, - 'chain_id': 40, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47', - 'name': 'osm_sfp1'}]} + list_sfc_port_chains.return_value = { + "port_chains": [ + { + "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"], + "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "chain_parameters": {"correlation": "nsh"}, + "chain_id": 40, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47", + "name": "osm_sfp1", + } + ] + } # call the VIM connector - result = self.vimconn.get_sfp('821bc9be-82d7-11e7-8ce3-23a08a27ab47') + result = self.vimconn.get_sfp("821bc9be-82d7-11e7-8ce3-23a08a27ab47") # assert that VIM connector called OpenStack with the expected filter list_sfc_port_chains.assert_called_with( - id='821bc9be-82d7-11e7-8ce3-23a08a27ab47') + id="821bc9be-82d7-11e7-8ce3-23a08a27ab47" + ) # assert that VIM connector successfully returned the OpenStack result - self.assertEqual(result, - {'service_functions': [ - '7d8e3bf8-82d6-11e7-a032-8ff028839d25'], - 'classifications': [ - '1333c2f4-82d7-11e7-a5df-9327f33d104e'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'sfc_encap': True, - 'spi': 40, - 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47', - 'name': 'osm_sfp1'}) - - @mock.patch.object(Client, 'list_sfc_port_chains') + self.assertEqual( + result, + { + "service_functions": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"], + "classifications": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "sfc_encap": True, + "spi": 40, + "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47", + "name": "osm_sfp1", + }, + ) + + @mock.patch.object(Client, "list_sfc_port_chains") def test_get_sfp_many_results(self, list_sfc_port_chains): # what OpenStack is assumed to return to the VIM connector - list_sfc_port_chains.return_value = {'port_chains': [ - {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'], - 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'chain_parameters': {'correlation': 'nsh'}, - 'chain_id': 40, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': '821bc9be-82d7-11e7-8ce3-23a08a27ab47', - 'name': 'osm_sfp1'}, - {'port_pair_groups': ['7d8e3bf8-82d6-11e7-a032-8ff028839d25'], - 'flow_classifiers': ['1333c2f4-82d7-11e7-a5df-9327f33d104e'], - 'description': '', - 'tenant_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'chain_parameters': {'correlation': 'nsh'}, - 'chain_id': 50, - 'project_id': '8f3019ef06374fa880a0144ad4bc1d7b', - 'id': '5d002f38-82de-11e7-a770-f303f11ce66a', - 'name': 'osm_sfp2'} - ]} + list_sfc_port_chains.return_value = { + "port_chains": [ + { + "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"], + "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "chain_parameters": {"correlation": "nsh"}, + "chain_id": 40, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "821bc9be-82d7-11e7-8ce3-23a08a27ab47", + "name": "osm_sfp1", + }, + { + "port_pair_groups": ["7d8e3bf8-82d6-11e7-a032-8ff028839d25"], + "flow_classifiers": ["1333c2f4-82d7-11e7-a5df-9327f33d104e"], + "description": "", + "tenant_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "chain_parameters": {"correlation": "nsh"}, + "chain_id": 50, + "project_id": "8f3019ef06374fa880a0144ad4bc1d7b", + "id": "5d002f38-82de-11e7-a770-f303f11ce66a", + "name": "osm_sfp2", + }, + ] + } # call the VIM connector - self.assertRaises(vimconn.VimConnConflictException, - self.vimconn.get_sfp, - '5d002f38-82de-11e7-a770-f303f11ce66a') + self.assertRaises( + vimconn.VimConnConflictException, + self.vimconn.get_sfp, + "5d002f38-82de-11e7-a770-f303f11ce66a", + ) # assert that VIM connector called OpenStack with the expected filter list_sfc_port_chains.assert_called_with( - id='5d002f38-82de-11e7-a770-f303f11ce66a') + id="5d002f38-82de-11e7-a770-f303f11ce66a" + ) - @mock.patch.object(Client, 'list_sfc_port_chains') + @mock.patch.object(Client, "list_sfc_port_chains") def test_get_sfp_no_results(self, list_sfc_port_chains): # what OpenStack is assumed to return to the VIM connector - list_sfc_port_chains.return_value = {'port_chains': []} + list_sfc_port_chains.return_value = {"port_chains": []} # call the VIM connector - self.assertRaises(vimconn.VimConnNotFoundException, - self.vimconn.get_sfp, - '5d002f38-82de-11e7-a770-f303f11ce66a') + self.assertRaises( + vimconn.VimConnNotFoundException, + self.vimconn.get_sfp, + "5d002f38-82de-11e7-a770-f303f11ce66a", + ) # assert that VIM connector called OpenStack with the expected filter list_sfc_port_chains.assert_called_with( - id='5d002f38-82de-11e7-a770-f303f11ce66a') + id="5d002f38-82de-11e7-a770-f303f11ce66a" + ) - @mock.patch.object(Client, 'delete_sfc_flow_classifier') + @mock.patch.object(Client, "delete_sfc_flow_classifier") def test_delete_classification(self, delete_sfc_flow_classifier): result = self.vimconn.delete_classification( - '638f957c-82df-11e7-b7c8-132706021464') + "638f957c-82df-11e7-b7c8-132706021464" + ) delete_sfc_flow_classifier.assert_called_with( - '638f957c-82df-11e7-b7c8-132706021464') - self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464') + "638f957c-82df-11e7-b7c8-132706021464" + ) + self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464") - @mock.patch.object(Client, 'delete_sfc_port_pair') + @mock.patch.object(Client, "delete_sfc_port_pair") def test_delete_sfi(self, delete_sfc_port_pair): - result = self.vimconn.delete_sfi( - '638f957c-82df-11e7-b7c8-132706021464') - delete_sfc_port_pair.assert_called_with( - '638f957c-82df-11e7-b7c8-132706021464') - self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464') + result = self.vimconn.delete_sfi("638f957c-82df-11e7-b7c8-132706021464") + delete_sfc_port_pair.assert_called_with("638f957c-82df-11e7-b7c8-132706021464") + self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464") - @mock.patch.object(Client, 'delete_sfc_port_pair_group') + @mock.patch.object(Client, "delete_sfc_port_pair_group") def test_delete_sf(self, delete_sfc_port_pair_group): - result = self.vimconn.delete_sf('638f957c-82df-11e7-b7c8-132706021464') + result = self.vimconn.delete_sf("638f957c-82df-11e7-b7c8-132706021464") delete_sfc_port_pair_group.assert_called_with( - '638f957c-82df-11e7-b7c8-132706021464') - self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464') + "638f957c-82df-11e7-b7c8-132706021464" + ) + self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464") - @mock.patch.object(Client, 'delete_sfc_port_chain') + @mock.patch.object(Client, "delete_sfc_port_chain") def test_delete_sfp(self, delete_sfc_port_chain): - result = self.vimconn.delete_sfp( - '638f957c-82df-11e7-b7c8-132706021464') - delete_sfc_port_chain.assert_called_with( - '638f957c-82df-11e7-b7c8-132706021464') - self.assertEqual(result, '638f957c-82df-11e7-b7c8-132706021464') + result = self.vimconn.delete_sfp("638f957c-82df-11e7-b7c8-132706021464") + delete_sfc_port_chain.assert_called_with("638f957c-82df-11e7-b7c8-132706021464") + self.assertEqual(result, "638f957c-82df-11e7-b7c8-132706021464") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py index bd3955ac..c59bf904 100644 --- a/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py +++ b/RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py @@ -31,6 +31,7 @@ to the VIM connector's SFC resources as follows: """ from osm_ro_plugin import vimconn + # import json import logging import netaddr @@ -40,7 +41,6 @@ import random import re import copy from pprint import pformat - from novaclient import client as nClient, exceptions as nvExceptions from keystoneauth1.identity import v2, v3 from keystoneauth1 import session @@ -50,7 +50,9 @@ import keystoneclient.v2_0.client as ksClient_v2 from glanceclient import client as glClient import glanceclient.exc as gl1Exceptions from cinderclient import client as cClient -from http.client import HTTPException # TODO py3 check that this base exception matches python2 httplib.HTTPException + +# TODO py3 check that this base exception matches python2 httplib.HTTPException +from http.client import HTTPException from neutronclient.neutron import client as neClient from neutronclient.common import exceptions as neExceptions from requests.exceptions import ConnectionError @@ -59,23 +61,25 @@ __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor __date__ = "$22-sep-2017 23:59:59$" """contain the openstack virtual machine status to openmano status""" -vmStatus2manoFormat = {'ACTIVE': 'ACTIVE', - 'PAUSED': 'PAUSED', - 'SUSPENDED': 'SUSPENDED', - 'SHUTOFF': 'INACTIVE', - 'BUILD': 'BUILD', - 'ERROR': 'ERROR', - 'DELETED': 'DELETED' - } -netStatus2manoFormat = {'ACTIVE': 'ACTIVE', - 'PAUSED': 'PAUSED', - 'INACTIVE': 'INACTIVE', - 'BUILD': 'BUILD', - 'ERROR': 'ERROR', - 'DELETED': 'DELETED' - } - -supportedClassificationTypes = ['legacy_flow_classifier'] +vmStatus2manoFormat = { + "ACTIVE": "ACTIVE", + "PAUSED": "PAUSED", + "SUSPENDED": "SUSPENDED", + "SHUTOFF": "INACTIVE", + "BUILD": "BUILD", + "ERROR": "ERROR", + "DELETED": "DELETED", +} +netStatus2manoFormat = { + "ACTIVE": "ACTIVE", + "PAUSED": "PAUSED", + "INACTIVE": "INACTIVE", + "BUILD": "BUILD", + "ERROR": "ERROR", + "DELETED": "DELETED", +} + +supportedClassificationTypes = ["legacy_flow_classifier"] # global var to have a timeout creating and deleting volumes volume_timeout = 1800 @@ -94,74 +98,117 @@ class SafeDumper(yaml.SafeDumper): class vimconnector(vimconn.VimConnector): - def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, - log_level=None, config={}, persistent_info={}): + def __init__( + self, + uuid, + name, + tenant_id, + tenant_name, + url, + url_admin=None, + user=None, + passwd=None, + log_level=None, + config={}, + persistent_info={}, + ): """using common constructor parameters. In this case 'url' is the keystone authorization url, 'url_admin' is not use """ - api_version = config.get('APIversion') - if api_version and api_version not in ('v3.3', 'v2.0', '2', '3'): - raise vimconn.VimConnException("Invalid value '{}' for config:APIversion. " - "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version)) - vim_type = config.get('vim_type') - if vim_type and vim_type not in ('vio', 'VIO'): - raise vimconn.VimConnException("Invalid value '{}' for config:vim_type." - "Allowed values are 'vio' or 'VIO'".format(vim_type)) - - if config.get('dataplane_net_vlan_range') is not None: - # validate vlan ranges provided by user - self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'), 'dataplane_net_vlan_range') + api_version = config.get("APIversion") + + if api_version and api_version not in ("v3.3", "v2.0", "2", "3"): + raise vimconn.VimConnException( + "Invalid value '{}' for config:APIversion. " + "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version) + ) - if config.get('multisegment_vlan_range') is not None: + vim_type = config.get("vim_type") + + if vim_type and vim_type not in ("vio", "VIO"): + raise vimconn.VimConnException( + "Invalid value '{}' for config:vim_type." + "Allowed values are 'vio' or 'VIO'".format(vim_type) + ) + + if config.get("dataplane_net_vlan_range") is not None: # validate vlan ranges provided by user - self._validate_vlan_ranges(config.get('multisegment_vlan_range'), 'multisegment_vlan_range') + self._validate_vlan_ranges( + config.get("dataplane_net_vlan_range"), "dataplane_net_vlan_range" + ) - vimconn.VimConnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, - config) + if config.get("multisegment_vlan_range") is not None: + # validate vlan ranges provided by user + self._validate_vlan_ranges( + config.get("multisegment_vlan_range"), "multisegment_vlan_range" + ) + + vimconn.VimConnector.__init__( + self, + uuid, + name, + tenant_id, + tenant_name, + url, + url_admin, + user, + passwd, + log_level, + config, + ) if self.config.get("insecure") and self.config.get("ca_cert"): - raise vimconn.VimConnException("options insecure and ca_cert are mutually exclusive") + raise vimconn.VimConnException( + "options insecure and ca_cert are mutually exclusive" + ) + self.verify = True + if self.config.get("insecure"): self.verify = False + if self.config.get("ca_cert"): self.verify = self.config.get("ca_cert") if not url: - raise TypeError('url param can not be NoneType') + raise TypeError("url param can not be NoneType") + self.persistent_info = persistent_info - self.availability_zone = persistent_info.get('availability_zone', None) - self.session = persistent_info.get('session', {'reload_client': True}) - self.my_tenant_id = self.session.get('my_tenant_id') - self.nova = self.session.get('nova') - self.neutron = self.session.get('neutron') - self.cinder = self.session.get('cinder') - self.glance = self.session.get('glance') - # self.glancev1 = self.session.get('glancev1') - self.keystone = self.session.get('keystone') - self.api_version3 = self.session.get('api_version3') + self.availability_zone = persistent_info.get("availability_zone", None) + self.session = persistent_info.get("session", {"reload_client": True}) + self.my_tenant_id = self.session.get("my_tenant_id") + self.nova = self.session.get("nova") + self.neutron = self.session.get("neutron") + self.cinder = self.session.get("cinder") + self.glance = self.session.get("glance") + # self.glancev1 = self.session.get("glancev1") + self.keystone = self.session.get("keystone") + self.api_version3 = self.session.get("api_version3") self.vim_type = self.config.get("vim_type") + if self.vim_type: self.vim_type = self.vim_type.upper() + if self.config.get("use_internal_endpoint"): self.endpoint_type = "internalURL" else: self.endpoint_type = None - logging.getLogger('urllib3').setLevel(logging.WARNING) - logging.getLogger('keystoneauth').setLevel(logging.WARNING) - logging.getLogger('novaclient').setLevel(logging.WARNING) - self.logger = logging.getLogger('ro.vim.openstack') + logging.getLogger("urllib3").setLevel(logging.WARNING) + logging.getLogger("keystoneauth").setLevel(logging.WARNING) + logging.getLogger("novaclient").setLevel(logging.WARNING) + self.logger = logging.getLogger("ro.vim.openstack") # allow security_groups to be a list or a single string - if isinstance(self.config.get('security_groups'), str): - self.config['security_groups'] = [self.config['security_groups']] + if isinstance(self.config.get("security_groups"), str): + self.config["security_groups"] = [self.config["security_groups"]] + self.security_groups_id = None # ###### VIO Specific Changes ######### if self.vim_type == "VIO": - self.logger = logging.getLogger('ro.vim.vio') + self.logger = logging.getLogger("ro.vim.vio") if log_level: self.logger.setLevel(getattr(logging, log_level)) @@ -169,9 +216,9 @@ class vimconnector(vimconn.VimConnector): def __getitem__(self, index): """Get individuals parameters. Throw KeyError""" - if index == 'project_domain_id': + if index == "project_domain_id": return self.config.get("project_domain_id") - elif index == 'user_domain_id': + elif index == "user_domain_id": return self.config.get("user_domain_id") else: return vimconn.VimConnector.__getitem__(self, index) @@ -179,13 +226,14 @@ class vimconnector(vimconn.VimConnector): def __setitem__(self, index, value): """Set individuals parameters and it is marked as dirty so to force connection reload. Throw KeyError""" - if index == 'project_domain_id': + if index == "project_domain_id": self.config["project_domain_id"] = value - elif index == 'user_domain_id': + elif index == "user_domain_id": self.config["user_domain_id"] = value else: vimconn.VimConnector.__setitem__(self, index, value) - self.session['reload_client'] = True + + self.session["reload_client"] = True def serialize(self, value): """Serialization of python basic types. @@ -198,11 +246,16 @@ class vimconnector(vimconn.VimConnector): return value try: - return yaml.dump(value, Dumper=SafeDumper, - default_flow_style=True, width=256) + return yaml.dump( + value, Dumper=SafeDumper, default_flow_style=True, width=256 + ) except yaml.representer.RepresenterError: - self.logger.debug('The following entity cannot be serialized in YAML:\n\n%s\n\n', pformat(value), - exc_info=True) + self.logger.debug( + "The following entity cannot be serialized in YAML:\n\n%s\n\n", + pformat(value), + exc_info=True, + ) + return str(value) def _reload_connection(self): @@ -210,83 +263,132 @@ class vimconnector(vimconn.VimConnector): Throw keystoneclient.apiclient.exceptions.AuthorizationFailure """ # TODO control the timing and possible token timeout, but it seams that python client does this task for us :-) - if self.session['reload_client']: - if self.config.get('APIversion'): - self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3' + if self.session["reload_client"]: + if self.config.get("APIversion"): + self.api_version3 = ( + self.config["APIversion"] == "v3.3" + or self.config["APIversion"] == "3" + ) else: # get from ending auth_url that end with v3 or with v2.0 - self.api_version3 = self.url.endswith("/v3") or self.url.endswith("/v3/") - self.session['api_version3'] = self.api_version3 + self.api_version3 = self.url.endswith("/v3") or self.url.endswith( + "/v3/" + ) + + self.session["api_version3"] = self.api_version3 + if self.api_version3: - if self.config.get('project_domain_id') or self.config.get('project_domain_name'): + if self.config.get("project_domain_id") or self.config.get( + "project_domain_name" + ): project_domain_id_default = None else: - project_domain_id_default = 'default' - if self.config.get('user_domain_id') or self.config.get('user_domain_name'): + project_domain_id_default = "default" + + if self.config.get("user_domain_id") or self.config.get( + "user_domain_name" + ): user_domain_id_default = None else: - user_domain_id_default = 'default' - auth = v3.Password(auth_url=self.url, - username=self.user, - password=self.passwd, - project_name=self.tenant_name, - project_id=self.tenant_id, - project_domain_id=self.config.get('project_domain_id', project_domain_id_default), - user_domain_id=self.config.get('user_domain_id', user_domain_id_default), - project_domain_name=self.config.get('project_domain_name'), - user_domain_name=self.config.get('user_domain_name')) + user_domain_id_default = "default" + auth = v3.Password( + auth_url=self.url, + username=self.user, + password=self.passwd, + project_name=self.tenant_name, + project_id=self.tenant_id, + project_domain_id=self.config.get( + "project_domain_id", project_domain_id_default + ), + user_domain_id=self.config.get( + "user_domain_id", user_domain_id_default + ), + project_domain_name=self.config.get("project_domain_name"), + user_domain_name=self.config.get("user_domain_name"), + ) else: - auth = v2.Password(auth_url=self.url, - username=self.user, - password=self.passwd, - tenant_name=self.tenant_name, - tenant_id=self.tenant_id) + auth = v2.Password( + auth_url=self.url, + username=self.user, + password=self.passwd, + tenant_name=self.tenant_name, + tenant_id=self.tenant_id, + ) + sess = session.Session(auth=auth, verify=self.verify) # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River # Titanium cloud and StarlingX - region_name = self.config.get('region_name') + region_name = self.config.get("region_name") + if self.api_version3: - self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type, - region_name=region_name) + self.keystone = ksClient_v3.Client( + session=sess, + endpoint_type=self.endpoint_type, + region_name=region_name, + ) else: - self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type) - self.session['keystone'] = self.keystone - # In order to enable microversion functionality an explicit microversion must be specified in 'config'. + self.keystone = ksClient_v2.Client( + session=sess, endpoint_type=self.endpoint_type + ) + + self.session["keystone"] = self.keystone + # In order to enable microversion functionality an explicit microversion must be specified in "config". # This implementation approach is due to the warning message in # https://developer.openstack.org/api-guide/compute/microversions.html # where it is stated that microversion backwards compatibility is not guaranteed and clients should # always require an specific microversion. - # To be able to use 'device role tagging' functionality define 'microversion: 2.32' in datacenter config + # To be able to use "device role tagging" functionality define "microversion: 2.32" in datacenter config version = self.config.get("microversion") + if not version: version = "2.1" + # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River # Titanium cloud and StarlingX - self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, - endpoint_type=self.endpoint_type, region_name=region_name) - self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, - endpoint_type=self.endpoint_type, - region_name=region_name) - self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type, - region_name=region_name) + self.nova = self.session["nova"] = nClient.Client( + str(version), + session=sess, + endpoint_type=self.endpoint_type, + region_name=region_name, + ) + self.neutron = self.session["neutron"] = neClient.Client( + "2.0", + session=sess, + endpoint_type=self.endpoint_type, + region_name=region_name, + ) + self.cinder = self.session["cinder"] = cClient.Client( + 2, + session=sess, + endpoint_type=self.endpoint_type, + region_name=region_name, + ) + try: - self.my_tenant_id = self.session['my_tenant_id'] = sess.get_project_id() + self.my_tenant_id = self.session["my_tenant_id"] = sess.get_project_id() except Exception: self.logger.error("Cannot get project_id from session", exc_info=True) + if self.endpoint_type == "internalURL": glance_service_id = self.keystone.services.list(name="glance")[0].id - glance_endpoint = self.keystone.endpoints.list(glance_service_id, interface="internal")[0].url + glance_endpoint = self.keystone.endpoints.list( + glance_service_id, interface="internal" + )[0].url else: glance_endpoint = None - self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint) + + self.glance = self.session["glance"] = glClient.Client( + 2, session=sess, endpoint=glance_endpoint + ) # using version 1 of glance client in new_image() - # self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess, + # self.glancev1 = self.session["glancev1"] = glClient.Client("1", session=sess, # endpoint=glance_endpoint) - self.session['reload_client'] = False - self.persistent_info['session'] = self.session + self.session["reload_client"] = False + self.persistent_info["session"] = self.session # add availablity zone info inside self.persistent_info self._set_availablity_zones() - self.persistent_info['availability_zone'] = self.availability_zone - self.security_groups_id = None # force to get again security_groups_ids next time they are needed + self.persistent_info["availability_zone"] = self.availability_zone + # force to get again security_groups_ids next time they are needed + self.security_groups_id = None def __net_os2mano(self, net_list_dict): """Transform the net openstack format to mano format @@ -298,10 +400,10 @@ class vimconnector(vimconn.VimConnector): else: raise TypeError("param net_list_dict must be a list or a dictionary") for net in net_list_: - if net.get('provider:network_type') == "vlan": - net['type'] = 'data' + if net.get("provider:network_type") == "vlan": + net["type"] = "data" else: - net['type'] = 'bridge' + net["type"] = "bridge" def __classification_os2mano(self, class_list_dict): """Transform the openstack format (Flow Classifier) to mano format @@ -314,20 +416,20 @@ class vimconnector(vimconn.VimConnector): else: raise TypeError("param class_list_dict must be a list or a dictionary") for classification in class_list_: - id = classification.pop('id') - name = classification.pop('name') - description = classification.pop('description') - project_id = classification.pop('project_id') - tenant_id = classification.pop('tenant_id') + id = classification.pop("id") + name = classification.pop("name") + description = classification.pop("description") + project_id = classification.pop("project_id") + tenant_id = classification.pop("tenant_id") original_classification = copy.deepcopy(classification) classification.clear() - classification['ctype'] = 'legacy_flow_classifier' - classification['definition'] = original_classification - classification['id'] = id - classification['name'] = name - classification['description'] = description - classification['project_id'] = project_id - classification['tenant_id'] = tenant_id + classification["ctype"] = "legacy_flow_classifier" + classification["definition"] = original_classification + classification["id"] = id + classification["name"] = name + classification["description"] = description + classification["project_id"] = project_id + classification["tenant_id"] = tenant_id def __sfi_os2mano(self, sfi_list_dict): """Transform the openstack format (Port Pair) to mano format (SFI) @@ -338,25 +440,31 @@ class vimconnector(vimconn.VimConnector): elif isinstance(sfi_list_dict, list): sfi_list_ = sfi_list_dict else: - raise TypeError( - "param sfi_list_dict must be a list or a dictionary") + raise TypeError("param sfi_list_dict must be a list or a dictionary") + for sfi in sfi_list_: - sfi['ingress_ports'] = [] - sfi['egress_ports'] = [] - if sfi.get('ingress'): - sfi['ingress_ports'].append(sfi['ingress']) - if sfi.get('egress'): - sfi['egress_ports'].append(sfi['egress']) - del sfi['ingress'] - del sfi['egress'] - params = sfi.get('service_function_parameters') + sfi["ingress_ports"] = [] + sfi["egress_ports"] = [] + + if sfi.get("ingress"): + sfi["ingress_ports"].append(sfi["ingress"]) + + if sfi.get("egress"): + sfi["egress_ports"].append(sfi["egress"]) + + del sfi["ingress"] + del sfi["egress"] + params = sfi.get("service_function_parameters") sfc_encap = False + if params: - correlation = params.get('correlation') + correlation = params.get("correlation") + if correlation: sfc_encap = True - sfi['sfc_encap'] = sfc_encap - del sfi['service_function_parameters'] + + sfi["sfc_encap"] = sfc_encap + del sfi["service_function_parameters"] def __sf_os2mano(self, sf_list_dict): """Transform the openstack format (Port Pair Group) to mano format (SF) @@ -367,12 +475,12 @@ class vimconnector(vimconn.VimConnector): elif isinstance(sf_list_dict, list): sf_list_ = sf_list_dict else: - raise TypeError( - "param sf_list_dict must be a list or a dictionary") + raise TypeError("param sf_list_dict must be a list or a dictionary") + for sf in sf_list_: - del sf['port_pair_group_parameters'] - sf['sfis'] = sf['port_pairs'] - del sf['port_pairs'] + del sf["port_pair_group_parameters"] + sf["sfis"] = sf["port_pairs"] + del sf["port_pairs"] def __sfp_os2mano(self, sfp_list_dict): """Transform the openstack format (Port Chain) to mano format (SFP) @@ -383,19 +491,22 @@ class vimconnector(vimconn.VimConnector): elif isinstance(sfp_list_dict, list): sfp_list_ = sfp_list_dict else: - raise TypeError( - "param sfp_list_dict must be a list or a dictionary") + raise TypeError("param sfp_list_dict must be a list or a dictionary") + for sfp in sfp_list_: - params = sfp.pop('chain_parameters') + params = sfp.pop("chain_parameters") sfc_encap = False + if params: - correlation = params.get('correlation') + correlation = params.get("correlation") + if correlation: sfc_encap = True - sfp['sfc_encap'] = sfc_encap - sfp['spi'] = sfp.pop('chain_id') - sfp['classifications'] = sfp.pop('flow_classifiers') - sfp['service_functions'] = sfp.pop('port_pair_groups') + + sfp["sfc_encap"] = sfc_encap + sfp["spi"] = sfp.pop("chain_id") + sfp["classifications"] = sfp.pop("flow_classifiers") + sfp["service_functions"] = sfp.pop("port_pair_groups") # placeholder for now; read TODO note below def _validate_classification(self, type, definition): @@ -408,30 +519,72 @@ class vimconnector(vimconn.VimConnector): def _format_exception(self, exception): """Transform a keystone, nova, neutron exception into a vimconn exception discovering the cause""" - message_error = str(exception) tip = "" - if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound, - gl1Exceptions.HTTPNotFound)): - raise vimconn.VimConnNotFoundException(type(exception).__name__ + ": " + message_error) - elif isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError, - ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)): + if isinstance( + exception, + ( + neExceptions.NetworkNotFoundClient, + nvExceptions.NotFound, + ksExceptions.NotFound, + gl1Exceptions.HTTPNotFound, + ), + ): + raise vimconn.VimConnNotFoundException( + type(exception).__name__ + ": " + message_error + ) + elif isinstance( + exception, + ( + HTTPException, + gl1Exceptions.HTTPException, + gl1Exceptions.CommunicationError, + ConnectionError, + ksExceptions.ConnectionError, + neExceptions.ConnectionFailed, + ), + ): if type(exception).__name__ == "SSLError": tip = " (maybe option 'insecure' must be added to the VIM)" - raise vimconn.VimConnConnectionException("Invalid URL or credentials{}: {}".format(tip, message_error)) - elif isinstance(exception, (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)): - raise vimconn.VimConnException(type(exception).__name__ + ": " + message_error) - elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException, - neExceptions.NeutronException)): - raise vimconn.VimConnUnexpectedResponse(type(exception).__name__ + ": " + message_error) + + raise vimconn.VimConnConnectionException( + "Invalid URL or credentials{}: {}".format(tip, message_error) + ) + elif isinstance( + exception, + ( + KeyError, + nvExceptions.BadRequest, + ksExceptions.BadRequest, + ), + ): + raise vimconn.VimConnException( + type(exception).__name__ + ": " + message_error + ) + elif isinstance( + exception, + ( + nvExceptions.ClientException, + ksExceptions.ClientException, + neExceptions.NeutronException, + ), + ): + raise vimconn.VimConnUnexpectedResponse( + type(exception).__name__ + ": " + message_error + ) elif isinstance(exception, nvExceptions.Conflict): - raise vimconn.VimConnConflictException(type(exception).__name__ + ": " + message_error) + raise vimconn.VimConnConflictException( + type(exception).__name__ + ": " + message_error + ) elif isinstance(exception, vimconn.VimConnException): raise exception else: # () self.logger.error("General Exception " + message_error, exc_info=True) - raise vimconn.VimConnConnectionException(type(exception).__name__ + ": " + message_error) + + raise vimconn.VimConnConnectionException( + type(exception).__name__ + ": " + message_error + ) def _get_ids_from_name(self): """ @@ -440,22 +593,32 @@ class vimconnector(vimconn.VimConnector): """ # get tenant_id if only tenant_name is supplied self._reload_connection() + if not self.my_tenant_id: - raise vimconn.VimConnConnectionException("Error getting tenant information from name={} id={}". - format(self.tenant_name, self.tenant_id)) - if self.config.get('security_groups') and not self.security_groups_id: + raise vimconn.VimConnConnectionException( + "Error getting tenant information from name={} id={}".format( + self.tenant_name, self.tenant_id + ) + ) + + if self.config.get("security_groups") and not self.security_groups_id: # convert from name to id - neutron_sg_list = self.neutron.list_security_groups(tenant_id=self.my_tenant_id)["security_groups"] + neutron_sg_list = self.neutron.list_security_groups( + tenant_id=self.my_tenant_id + )["security_groups"] self.security_groups_id = [] - for sg in self.config.get('security_groups'): + for sg in self.config.get("security_groups"): for neutron_sg in neutron_sg_list: if sg in (neutron_sg["id"], neutron_sg["name"]): self.security_groups_id.append(neutron_sg["id"]) break else: self.security_groups_id = None - raise vimconn.VimConnConnectionException("Not found security group {} for this tenant".format(sg)) + + raise vimconn.VimConnConnectionException( + "Not found security group {} for this tenant".format(sg) + ) def check_vim_connectivity(self): # just get network list to check connectivity and credentials @@ -470,51 +633,88 @@ class vimconnector(vimconn.VimConnector): Returns the tenant list of dictionaries: [{'name':', 'id':', ...}, ...] """ self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict)) + try: self._reload_connection() + if self.api_version3: - project_class_list = self.keystone.projects.list(name=filter_dict.get("name")) + project_class_list = self.keystone.projects.list( + name=filter_dict.get("name") + ) else: project_class_list = self.keystone.tenants.findall(**filter_dict) + project_list = [] + for project in project_class_list: - if filter_dict.get('id') and filter_dict["id"] != project.id: + if filter_dict.get("id") and filter_dict["id"] != project.id: continue + project_list.append(project.to_dict()) + return project_list - except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e: + except ( + ksExceptions.ConnectionError, + ksExceptions.ClientException, + ConnectionError, + ) as e: self._format_exception(e) def new_tenant(self, tenant_name, tenant_description): """Adds a new tenant to openstack VIM. Returns the tenant identifier""" self.logger.debug("Adding a new tenant name: %s", tenant_name) + try: self._reload_connection() + if self.api_version3: - project = self.keystone.projects.create(tenant_name, self.config.get("project_domain_id", "default"), - description=tenant_description, is_domain=False) + project = self.keystone.projects.create( + tenant_name, + self.config.get("project_domain_id", "default"), + description=tenant_description, + is_domain=False, + ) else: project = self.keystone.tenants.create(tenant_name, tenant_description) + return project.id - except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.BadRequest, ConnectionError)\ - as e: + except ( + ksExceptions.ConnectionError, + ksExceptions.ClientException, + ksExceptions.BadRequest, + ConnectionError, + ) as e: self._format_exception(e) def delete_tenant(self, tenant_id): """Delete a tenant from openstack VIM. Returns the old tenant identifier""" self.logger.debug("Deleting tenant %s from VIM", tenant_id) + try: self._reload_connection() + if self.api_version3: self.keystone.projects.delete(tenant_id) else: self.keystone.tenants.delete(tenant_id) + return tenant_id - except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.NotFound, ConnectionError)\ - as e: + except ( + ksExceptions.ConnectionError, + ksExceptions.ClientException, + ksExceptions.NotFound, + ConnectionError, + ) as e: self._format_exception(e) - def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): + def new_network( + self, + net_name, + net_type, + ip_profile=None, + shared=False, + provider_network_profile=None, + ): """Adds a tenant network to VIM Params: 'net_name': name of the network @@ -539,126 +739,190 @@ class vimconnector(vimconn.VimConnector): Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same as not present. """ - self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type) + self.logger.debug( + "Adding a new network to VIM name '%s', type '%s'", net_name, net_type + ) # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile)) try: vlan = None + if provider_network_profile: vlan = provider_network_profile.get("segmentation-id") + new_net = None created_items = {} self._reload_connection() - network_dict = {'name': net_name, 'admin_state_up': True} + network_dict = {"name": net_name, "admin_state_up": True} + if net_type in ("data", "ptp"): provider_physical_network = None - if provider_network_profile and provider_network_profile.get("physical-network"): - provider_physical_network = provider_network_profile.get("physical-network") + + if provider_network_profile and provider_network_profile.get( + "physical-network" + ): + provider_physical_network = provider_network_profile.get( + "physical-network" + ) + # provider-network must be one of the dataplane_physcial_netowrk if this is a list. If it is string # or not declared, just ignore the checking - if isinstance(self.config.get('dataplane_physical_net'), (tuple, list)) and \ - provider_physical_network not in self.config['dataplane_physical_net']: + if ( + isinstance( + self.config.get("dataplane_physical_net"), (tuple, list) + ) + and provider_physical_network + not in self.config["dataplane_physical_net"] + ): raise vimconn.VimConnConflictException( - "Invalid parameter 'provider-network:physical-network' for network creation. '{}' is not " - "one of the declared list at VIM_config:dataplane_physical_net".format( - provider_physical_network)) - if not provider_physical_network: # use the default dataplane_physical_net - provider_physical_network = self.config.get('dataplane_physical_net') + "Invalid parameter 'provider-network:physical-network' " + "for network creation. '{}' is not one of the declared " + "list at VIM_config:dataplane_physical_net".format( + provider_physical_network + ) + ) + + # use the default dataplane_physical_net + if not provider_physical_network: + provider_physical_network = self.config.get( + "dataplane_physical_net" + ) + # if it is non empty list, use the first value. If it is a string use the value directly - if isinstance(provider_physical_network, (tuple, list)) and provider_physical_network: + if ( + isinstance(provider_physical_network, (tuple, list)) + and provider_physical_network + ): provider_physical_network = provider_physical_network[0] if not provider_physical_network: raise vimconn.VimConnConflictException( - "missing information needed for underlay networks. Provide 'dataplane_physical_net' " - "configuration at VIM or use the NS instantiation parameter 'provider-network.physical-network'" - " for the VLD") - - if not self.config.get('multisegment_support'): - network_dict["provider:physical_network"] = provider_physical_network - if provider_network_profile and "network-type" in provider_network_profile: - network_dict["provider:network_type"] = provider_network_profile["network-type"] + "missing information needed for underlay networks. Provide " + "'dataplane_physical_net' configuration at VIM or use the NS " + "instantiation parameter 'provider-network.physical-network'" + " for the VLD" + ) + + if not self.config.get("multisegment_support"): + network_dict[ + "provider:physical_network" + ] = provider_physical_network + + if ( + provider_network_profile + and "network-type" in provider_network_profile + ): + network_dict[ + "provider:network_type" + ] = provider_network_profile["network-type"] else: - network_dict["provider:network_type"] = self.config.get('dataplane_network_type', 'vlan') + network_dict["provider:network_type"] = self.config.get( + "dataplane_network_type", "vlan" + ) + if vlan: network_dict["provider:segmentation_id"] = vlan else: # Multi-segment case segment_list = [] segment1_dict = { - "provider:physical_network": '', - "provider:network_type": 'vxlan' + "provider:physical_network": "", + "provider:network_type": "vxlan", } segment_list.append(segment1_dict) segment2_dict = { "provider:physical_network": provider_physical_network, - "provider:network_type": "vlan" + "provider:network_type": "vlan", } + if vlan: segment2_dict["provider:segmentation_id"] = vlan - elif self.config.get('multisegment_vlan_range'): + elif self.config.get("multisegment_vlan_range"): vlanID = self._generate_multisegment_vlanID() segment2_dict["provider:segmentation_id"] = vlanID + # else # raise vimconn.VimConnConflictException( - # "You must provide 'multisegment_vlan_range' at config dict before creating a multisegment + # "You must provide "multisegment_vlan_range" at config dict before creating a multisegment # network") segment_list.append(segment2_dict) network_dict["segments"] = segment_list # VIO Specific Changes. It needs a concrete VLAN if self.vim_type == "VIO" and vlan is None: - if self.config.get('dataplane_net_vlan_range') is None: + if self.config.get("dataplane_net_vlan_range") is None: raise vimconn.VimConnConflictException( - "You must provide 'dataplane_net_vlan_range' in format [start_ID - end_ID] at VIM_config " - "for creating underlay networks") + "You must provide 'dataplane_net_vlan_range' in format " + "[start_ID - end_ID] at VIM_config for creating underlay " + "networks" + ) + network_dict["provider:segmentation_id"] = self._generate_vlanID() network_dict["shared"] = shared + if self.config.get("disable_network_port_security"): network_dict["port_security_enabled"] = False - new_net = self.neutron.create_network({'network': network_dict}) + + new_net = self.neutron.create_network({"network": network_dict}) # print new_net # create subnetwork, even if there is no profile + if not ip_profile: ip_profile = {} - if not ip_profile.get('subnet_address'): + + if not ip_profile.get("subnet_address"): # Fake subnet is required subnet_rand = random.randint(0, 255) - ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand) - if 'ip_version' not in ip_profile: - ip_profile['ip_version'] = "IPv4" - subnet = {"name": net_name+"-subnet", - "network_id": new_net["network"]["id"], - "ip_version": 4 if ip_profile['ip_version'] == "IPv4" else 6, - "cidr": ip_profile['subnet_address'] - } + ip_profile["subnet_address"] = "192.168.{}.0/24".format(subnet_rand) + + if "ip_version" not in ip_profile: + ip_profile["ip_version"] = "IPv4" + + subnet = { + "name": net_name + "-subnet", + "network_id": new_net["network"]["id"], + "ip_version": 4 if ip_profile["ip_version"] == "IPv4" else 6, + "cidr": ip_profile["subnet_address"], + } + # Gateway should be set to None if not needed. Otherwise openstack assigns one by default - if ip_profile.get('gateway_address'): - subnet['gateway_ip'] = ip_profile['gateway_address'] + if ip_profile.get("gateway_address"): + subnet["gateway_ip"] = ip_profile["gateway_address"] else: - subnet['gateway_ip'] = None - if ip_profile.get('dns_address'): - subnet['dns_nameservers'] = ip_profile['dns_address'].split(";") - if 'dhcp_enabled' in ip_profile: - subnet['enable_dhcp'] = False if \ - ip_profile['dhcp_enabled'] == "false" or ip_profile['dhcp_enabled'] is False else True - if ip_profile.get('dhcp_start_address'): - subnet['allocation_pools'] = [] - subnet['allocation_pools'].append(dict()) - subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address'] - if ip_profile.get('dhcp_count'): - # parts = ip_profile['dhcp_start_address'].split('.') + subnet["gateway_ip"] = None + + if ip_profile.get("dns_address"): + subnet["dns_nameservers"] = ip_profile["dns_address"].split(";") + + if "dhcp_enabled" in ip_profile: + subnet["enable_dhcp"] = ( + False + if ip_profile["dhcp_enabled"] == "false" + or ip_profile["dhcp_enabled"] is False + else True + ) + + if ip_profile.get("dhcp_start_address"): + subnet["allocation_pools"] = [] + subnet["allocation_pools"].append(dict()) + subnet["allocation_pools"][0]["start"] = ip_profile[ + "dhcp_start_address" + ] + + if ip_profile.get("dhcp_count"): + # parts = ip_profile["dhcp_start_address"].split(".") # ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3]) - ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address'])) - ip_int += ip_profile['dhcp_count'] - 1 + ip_int = int(netaddr.IPAddress(ip_profile["dhcp_start_address"])) + ip_int += ip_profile["dhcp_count"] - 1 ip_str = str(netaddr.IPAddress(ip_int)) - subnet['allocation_pools'][0]['end'] = ip_str + subnet["allocation_pools"][0]["end"] = ip_str + # self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet)) self.neutron.create_subnet({"subnet": subnet}) - if net_type == "data" and self.config.get('multisegment_support'): - if self.config.get('l2gw_support'): + if net_type == "data" and self.config.get("multisegment_support"): + if self.config.get("l2gw_support"): l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ()) for l2gw in l2gw_list: l2gw_conn = { @@ -666,22 +930,36 @@ class vimconnector(vimconn.VimConnector): "network_id": new_net["network"]["id"], "segmentation_id": str(vlanID), } - new_l2gw_conn = self.neutron.create_l2_gateway_connection({"l2_gateway_connection": l2gw_conn}) - created_items["l2gwconn:" + str(new_l2gw_conn["l2_gateway_connection"]["id"])] = True + new_l2gw_conn = self.neutron.create_l2_gateway_connection( + {"l2_gateway_connection": l2gw_conn} + ) + created_items[ + "l2gwconn:" + + str(new_l2gw_conn["l2_gateway_connection"]["id"]) + ] = True + return new_net["network"]["id"], created_items except Exception as e: # delete l2gw connections (if any) before deleting the network for k, v in created_items.items(): if not v: # skip already deleted continue + try: k_item, _, k_id = k.partition(":") + if k_item == "l2gwconn": self.neutron.delete_l2_gateway_connection(k_id) except Exception as e2: - self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e2).__name__, e2)) + self.logger.error( + "Error deleting l2 gateway connection: {}: {}".format( + type(e2).__name__, e2 + ) + ) + if new_net: - self.neutron.delete_network(new_net['network']['id']) + self.neutron.delete_network(new_net["network"]["id"]) + self._format_exception(e) def get_network_list(self, filter_dict={}): @@ -696,17 +974,26 @@ class vimconnector(vimconn.VimConnector): Returns the network list of dictionaries """ self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict)) + try: self._reload_connection() filter_dict_os = filter_dict.copy() + if self.api_version3 and "tenant_id" in filter_dict_os: - filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id') # TODO check + # TODO check + filter_dict_os["project_id"] = filter_dict_os.pop("tenant_id") + net_dict = self.neutron.list_networks(**filter_dict_os) net_list = net_dict["networks"] self.__net_os2mano(net_list) + return net_list - except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, - ConnectionError) as e: + except ( + neExceptions.ConnectionFailed, + ksExceptions.ClientException, + neExceptions.NeutronException, + ConnectionError, + ) as e: self._format_exception(e) def get_network(self, net_id): @@ -715,24 +1002,36 @@ class vimconnector(vimconn.VimConnector): self.logger.debug(" Getting tenant network %s from VIM", net_id) filter_dict = {"id": net_id} net_list = self.get_network_list(filter_dict) + if len(net_list) == 0: - raise vimconn.VimConnNotFoundException("Network '{}' not found".format(net_id)) + raise vimconn.VimConnNotFoundException( + "Network '{}' not found".format(net_id) + ) elif len(net_list) > 1: - raise vimconn.VimConnConflictException("Found more than one network with this criteria") + raise vimconn.VimConnConflictException( + "Found more than one network with this criteria" + ) + net = net_list[0] subnets = [] for subnet_id in net.get("subnets", ()): try: subnet = self.neutron.show_subnet(subnet_id) except Exception as e: - self.logger.error("osconnector.get_network(): Error getting subnet %s %s" % (net_id, str(e))) + self.logger.error( + "osconnector.get_network(): Error getting subnet %s %s" + % (net_id, str(e)) + ) subnet = {"id": subnet_id, "fault": str(e)} + subnets.append(subnet) + net["subnets"] = subnets - net["encapsulation"] = net.get('provider:network_type') - net["encapsulation_type"] = net.get('provider:network_type') - net["segmentation_id"] = net.get('provider:segmentation_id') - net["encapsulation_id"] = net.get('provider:segmentation_id') + net["encapsulation"] = net.get("provider:network_type") + net["encapsulation_type"] = net.get("provider:network_type") + net["segmentation_id"] = net.get("provider:segmentation_id") + net["encapsulation_id"] = net.get("provider:segmentation_id") + return net def delete_network(self, net_id, created_items=None): @@ -743,130 +1042,174 @@ class vimconnector(vimconn.VimConnector): Returns the network identifier or raises an exception upon error or when network is not found """ self.logger.debug("Deleting network '%s' from VIM", net_id) + if created_items is None: created_items = {} + try: self._reload_connection() # delete l2gw connections (if any) before deleting the network for k, v in created_items.items(): if not v: # skip already deleted continue + try: k_item, _, k_id = k.partition(":") if k_item == "l2gwconn": self.neutron.delete_l2_gateway_connection(k_id) except Exception as e: - self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e).__name__, e)) + self.logger.error( + "Error deleting l2 gateway connection: {}: {}".format( + type(e).__name__, e + ) + ) + # delete VM ports attached to this networks before the network ports = self.neutron.list_ports(network_id=net_id) - for p in ports['ports']: + for p in ports["ports"]: try: self.neutron.delete_port(p["id"]) except Exception as e: self.logger.error("Error deleting port %s: %s", p["id"], str(e)) + self.neutron.delete_network(net_id) + return net_id - except (neExceptions.ConnectionFailed, neExceptions.NetworkNotFoundClient, neExceptions.NeutronException, - ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e: + except ( + neExceptions.ConnectionFailed, + neExceptions.NetworkNotFoundClient, + neExceptions.NeutronException, + ksExceptions.ClientException, + neExceptions.NeutronException, + ConnectionError, + ) as e: self._format_exception(e) def refresh_nets_status(self, net_list): """Get the status of the networks - Params: the list of network identifiers - Returns a dictionary with: - net_id: #VIM id of this network - status: #Mandatory. Text with one of: - # DELETED (not found at vim) - # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) - # OTHER (Vim reported other status not understood) - # ERROR (VIM indicates an ERROR status) - # ACTIVE, INACTIVE, DOWN (admin down), - # BUILD (on building process) - # - error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR - vim_info: #Text with plain information obtained from vim (yaml.safe_dump) - + Params: the list of network identifiers + Returns a dictionary with: + net_id: #VIM id of this network + status: #Mandatory. Text with one of: + # DELETED (not found at vim) + # VIM_ERROR (Cannot connect to VIM, VIM response error, ...) + # OTHER (Vim reported other status not understood) + # ERROR (VIM indicates an ERROR status) + # ACTIVE, INACTIVE, DOWN (admin down), + # BUILD (on building process) + # + error_msg: #Text with VIM error message, if any. Or the VIM connection ERROR + vim_info: #Text with plain information obtained from vim (yaml.safe_dump) """ net_dict = {} + for net_id in net_list: net = {} + try: net_vim = self.get_network(net_id) - if net_vim['status'] in netStatus2manoFormat: - net["status"] = netStatus2manoFormat[net_vim['status']] + + if net_vim["status"] in netStatus2manoFormat: + net["status"] = netStatus2manoFormat[net_vim["status"]] else: net["status"] = "OTHER" - net["error_msg"] = "VIM status reported " + net_vim['status'] + net["error_msg"] = "VIM status reported " + net_vim["status"] - if net['status'] == "ACTIVE" and not net_vim['admin_state_up']: - net['status'] = 'DOWN' + if net["status"] == "ACTIVE" and not net_vim["admin_state_up"]: + net["status"] = "DOWN" - net['vim_info'] = self.serialize(net_vim) + net["vim_info"] = self.serialize(net_vim) - if net_vim.get('fault'): # TODO - net['error_msg'] = str(net_vim['fault']) + if net_vim.get("fault"): # TODO + net["error_msg"] = str(net_vim["fault"]) except vimconn.VimConnNotFoundException as e: self.logger.error("Exception getting net status: %s", str(e)) - net['status'] = "DELETED" - net['error_msg'] = str(e) + net["status"] = "DELETED" + net["error_msg"] = str(e) except vimconn.VimConnException as e: self.logger.error("Exception getting net status: %s", str(e)) - net['status'] = "VIM_ERROR" - net['error_msg'] = str(e) + net["status"] = "VIM_ERROR" + net["error_msg"] = str(e) net_dict[net_id] = net return net_dict def get_flavor(self, flavor_id): """Obtain flavor details from the VIM. Returns the flavor dict details""" self.logger.debug("Getting flavor '%s'", flavor_id) + try: self._reload_connection() flavor = self.nova.flavors.find(id=flavor_id) # TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema) + return flavor.to_dict() - except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, - ConnectionError) as e: + except ( + nvExceptions.NotFound, + nvExceptions.ClientException, + ksExceptions.ClientException, + ConnectionError, + ) as e: self._format_exception(e) def get_flavor_id_from_data(self, flavor_dict): """Obtain flavor id that match the flavor description - Returns the flavor_id or raises a vimconnNotFoundException - flavor_dict: contains the required ram, vcpus, disk - If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus - and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a - vimconnNotFoundException is raised + Returns the flavor_id or raises a vimconnNotFoundException + flavor_dict: contains the required ram, vcpus, disk + If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus + and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a + vimconnNotFoundException is raised """ - exact_match = False if self.config.get('use_existing_flavors') else True + exact_match = False if self.config.get("use_existing_flavors") else True + try: self._reload_connection() flavor_candidate_id = None flavor_candidate_data = (10000, 10000, 10000) - flavor_target = (flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]) + flavor_target = ( + flavor_dict["ram"], + flavor_dict["vcpus"], + flavor_dict["disk"], + ) # numa=None extended = flavor_dict.get("extended", {}) if extended: # TODO - raise vimconn.VimConnNotFoundException("Flavor with EPA still not implemented") + raise vimconn.VimConnNotFoundException( + "Flavor with EPA still not implemented" + ) # if len(numas) > 1: # raise vimconn.VimConnNotFoundException("Cannot find any flavor with more than one numa") # numa=numas[0] # numas = extended.get("numas") for flavor in self.nova.flavors.list(): epa = flavor.get_keys() + if epa: continue # TODO + flavor_data = (flavor.ram, flavor.vcpus, flavor.disk) if flavor_data == flavor_target: return flavor.id - elif not exact_match and flavor_target < flavor_data < flavor_candidate_data: + elif ( + not exact_match + and flavor_target < flavor_data < flavor_candidate_data + ): flavor_candidate_id = flavor.id flavor_candidate_data = flavor_data + if not exact_match and flavor_candidate_id: return flavor_candidate_id - raise vimconn.VimConnNotFoundException("Cannot find any flavor matching '{}'".format(flavor_dict)) - except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, - ConnectionError) as e: + + raise vimconn.VimConnNotFoundException( + "Cannot find any flavor matching '{}'".format(flavor_dict) + ) + except ( + nvExceptions.NotFound, + nvExceptions.ClientException, + ksExceptions.ClientException, + ConnectionError, + ) as e: self._format_exception(e) def process_resource_quota(self, quota, prefix, extra_specs): @@ -875,13 +1218,15 @@ class vimconnector(vimconn.VimConnector): :param extra_specs: :return: """ - if 'limit' in quota: - extra_specs["quota:" + prefix + "_limit"] = quota['limit'] - if 'reserve' in quota: - extra_specs["quota:" + prefix + "_reservation"] = quota['reserve'] - if 'shares' in quota: + if "limit" in quota: + extra_specs["quota:" + prefix + "_limit"] = quota["limit"] + + if "reserve" in quota: + extra_specs["quota:" + prefix + "_reservation"] = quota["reserve"] + + if "shares" in quota: extra_specs["quota:" + prefix + "_shares_level"] = "custom" - extra_specs["quota:" + prefix + "_shares_share"] = quota['shares'] + extra_specs["quota:" + prefix + "_shares_share"] = quota["shares"] def new_flavor(self, flavor_data, change_name_if_used=True): """Adds a tenant flavor to openstack VIM @@ -893,62 +1238,74 @@ class vimconnector(vimconn.VimConnector): retry = 0 max_retries = 3 name_suffix = 0 + try: - name = flavor_data['name'] + name = flavor_data["name"] while retry < max_retries: retry += 1 try: self._reload_connection() + if change_name_if_used: # get used names fl_names = [] fl = self.nova.flavors.list() + for f in fl: fl_names.append(f.name) + while name in fl_names: name_suffix += 1 - name = flavor_data['name']+"-" + str(name_suffix) + name = flavor_data["name"] + "-" + str(name_suffix) - ram = flavor_data.get('ram', 64) - vcpus = flavor_data.get('vcpus', 1) + ram = flavor_data.get("ram", 64) + vcpus = flavor_data.get("vcpus", 1) extra_specs = {} extended = flavor_data.get("extended") if extended: numas = extended.get("numas") + if numas: numa_nodes = len(numas) + if numa_nodes > 1: return -1, "Can not add flavor with more than one numa" + extra_specs["hw:numa_nodes"] = str(numa_nodes) extra_specs["hw:mem_page_size"] = "large" extra_specs["hw:cpu_policy"] = "dedicated" extra_specs["hw:numa_mempolicy"] = "strict" + if self.vim_type == "VIO": - extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}' + extra_specs[ + "vmware:extra_config" + ] = '{"numa.nodeAffinity":"0"}' extra_specs["vmware:latency_sensitivity_level"] = "high" + for numa in numas: # overwrite ram and vcpus - # check if key 'memory' is present in numa else use ram value at flavor - if 'memory' in numa: - ram = numa['memory']*1024 + # check if key "memory" is present in numa else use ram value at flavor + if "memory" in numa: + ram = numa["memory"] * 1024 # See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/ # implemented/virt-driver-cpu-thread-pinning.html extra_specs["hw:cpu_sockets"] = 1 - if 'paired-threads' in numa: - vcpus = numa['paired-threads']*2 - # cpu_thread_policy "require" implies that the compute node must have an + + if "paired-threads" in numa: + vcpus = numa["paired-threads"] * 2 + # cpu_thread_policy "require" implies that the compute node must have an # STM architecture extra_specs["hw:cpu_thread_policy"] = "require" extra_specs["hw:cpu_policy"] = "dedicated" - elif 'cores' in numa: - vcpus = numa['cores'] - # cpu_thread_policy "prefer" implies that the host must not have an SMT + elif "cores" in numa: + vcpus = numa["cores"] + # cpu_thread_policy "prefer" implies that the host must not have an SMT # architecture, or a non-SMT architecture will be emulated extra_specs["hw:cpu_thread_policy"] = "isolate" extra_specs["hw:cpu_policy"] = "dedicated" - elif 'threads' in numa: - vcpus = numa['threads'] + elif "threads" in numa: + vcpus = numa["threads"] # cpu_thread_policy "prefer" implies that the host may or may not have an SMT # architecture extra_specs["hw:cpu_thread_policy"] = "prefer" @@ -957,45 +1314,69 @@ class vimconnector(vimconn.VimConnector): # if interface["dedicated"]=="yes": # raise vimconn.VimConnException("Passthrough interfaces are not supported # for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable) - # #TODO, add the key 'pci_passthrough:alias"="