Reformatting RO 34/10334/3
authorsousaedu <eduardo.sousa@canonical.com>
Wed, 17 Feb 2021 14:05:18 +0000 (15:05 +0100)
committersousaedu <eduardo.sousa@canonical.com>
Wed, 17 Feb 2021 14:52:46 +0000 (15:52 +0100)
Change-Id: I86e6a102b5bf2e0221b29096bbb132ca656844c5
Signed-off-by: sousaedu <eduardo.sousa@canonical.com>
74 files changed:
NG-RO/osm_ng_ro/__init__.py
NG-RO/osm_ng_ro/html_out.py
NG-RO/osm_ng_ro/ns.py
NG-RO/osm_ng_ro/ns_thread.py
NG-RO/osm_ng_ro/ro_main.py
NG-RO/osm_ng_ro/validation.py
NG-RO/osm_ng_ro/vim_admin.py
NG-RO/setup.py
NG-RO/tox.ini
RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaConfigLet.py
RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/aristaTask.py
RO-SDN-arista_cloudvision/osm_rosdn_arista_cloudvision/wimconn_arista.py
RO-SDN-arista_cloudvision/setup.py
RO-SDN-arista_cloudvision/tox.ini
RO-SDN-dpb/osm_rosdn_dpb/wimconn_dpb.py
RO-SDN-dpb/setup.py
RO-SDN-dpb/tox.ini
RO-SDN-dynpac/osm_rosdn_dynpac/wimconn_dynpac.py
RO-SDN-dynpac/setup.py
RO-SDN-dynpac/tox.ini
RO-SDN-floodlight_openflow/osm_rosdn_floodlightof/floodlight_of.py
RO-SDN-floodlight_openflow/osm_rosdn_floodlightof/sdnconn_floodlightof.py
RO-SDN-floodlight_openflow/setup.py
RO-SDN-floodlight_openflow/tox.ini
RO-SDN-ietfl2vpn/osm_rosdn_ietfl2vpn/wimconn_ietfl2vpn.py
RO-SDN-ietfl2vpn/setup.py
RO-SDN-ietfl2vpn/tox.ini
RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/rest_lib.py
RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_api.py
RO-SDN-juniper_contrail/osm_rosdn_juniper_contrail/sdn_assist_juniper_contrail.py
RO-SDN-juniper_contrail/setup.py
RO-SDN-juniper_contrail/tox.ini
RO-SDN-odl_openflow/osm_rosdn_odlof/odl_of.py
RO-SDN-odl_openflow/osm_rosdn_odlof/sdnconn_odlof.py
RO-SDN-odl_openflow/setup.py
RO-SDN-odl_openflow/tox.ini
RO-SDN-onos_openflow/osm_rosdn_onosof/onos_of.py
RO-SDN-onos_openflow/osm_rosdn_onosof/sdnconn_onosof.py
RO-SDN-onos_openflow/setup.py
RO-SDN-onos_openflow/tox.ini
RO-SDN-onos_vpls/osm_rosdn_onos_vpls/sdn_assist_onos_vpls.py
RO-SDN-onos_vpls/setup.py
RO-SDN-onos_vpls/tox.ini
RO-VIM-aws/osm_rovim_aws/vimconn_aws.py
RO-VIM-aws/setup.py
RO-VIM-aws/tox.ini
RO-VIM-azure/osm_rovim_azure/vimconn_azure.py
RO-VIM-azure/setup.py
RO-VIM-azure/tox.ini
RO-VIM-fos/osm_rovim_fos/vimconn_fos.py
RO-VIM-fos/setup.py
RO-VIM-fos/tox.ini
RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py
RO-VIM-opennebula/setup.py
RO-VIM-opennebula/tox.ini
RO-VIM-openstack/osm_rovim_openstack/tests/test_vimconn_openstack.py
RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
RO-VIM-openstack/setup.py
RO-VIM-openstack/tox.ini
RO-VIM-openvim/osm_rovim_openvim/vimconn_openvim.py
RO-VIM-openvim/setup.py
RO-VIM-openvim/tox.ini
RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware.py
RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py
RO-VIM-vmware/setup.py
RO-VIM-vmware/tox.ini
RO-plugin/osm_ro_plugin/openflow_conn.py
RO-plugin/osm_ro_plugin/sdn_dummy.py
RO-plugin/osm_ro_plugin/sdn_failing.py
RO-plugin/osm_ro_plugin/sdnconn.py
RO-plugin/osm_ro_plugin/vim_dummy.py
RO-plugin/osm_ro_plugin/vimconn.py
RO-plugin/setup.py
RO-plugin/tox.ini

index d2ac4c4..3f15629 100644 (file)
 # under the License.
 ##
 
-version = '8.0.1.post0'
-version_date = '2020-06-29'
+version = "8.0.1.post0"
+version_date = "2020-06-29"
 
 # Obtain installed package version. Ignore if error, e.g. pkg_resources not installed
 try:
     from pkg_resources import get_distribution
+
     version = get_distribution("osm_ng_ro").version
 except Exception:
     pass
index 4059400..132bf68 100644 (file)
@@ -118,18 +118,37 @@ def format(data, request, response, toke_info):
     :param response: cherrypy response
     :return: string with teh html response
     """
-    response.headers["Content-Type"] = 'text/html'
+    response.headers["Content-Type"] = "text/html"
+
     if response.status == HTTPStatus.UNAUTHORIZED.value:
-        if response.headers.get("WWW-Authenticate") and request.config.get("auth.allow_basic_authentication"):
-            response.headers["WWW-Authenticate"] = "Basic" + response.headers["WWW-Authenticate"][6:]
+        if response.headers.get("WWW-Authenticate") and request.config.get(
+            "auth.allow_basic_authentication"
+        ):
+            response.headers["WWW-Authenticate"] = (
+                "Basic" + response.headers["WWW-Authenticate"][6:]
+            )
+
             return
         else:
             return html_auth2.format(error=data)
+
     if request.path_info in ("/version", "/system"):
-        return "<pre>" + yaml.safe_dump(data, explicit_start=False, indent=4, default_flow_style=False) + "</pre>"
+        return (
+            "<pre>"
+            + yaml.safe_dump(
+                data, explicit_start=False, indent=4, default_flow_style=False
+            )
+            + "</pre>"
+        )
+
     body = html_body.format(item=request.path_info)
+
     if response.status and response.status > 202:
-        body += html_body_error.format(yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False))
+        body += html_body_error.format(
+            yaml.safe_dump(
+                data, explicit_start=True, indent=4, default_flow_style=False
+            )
+        )
     elif isinstance(data, (list, tuple)):
         # if request.path_info == "/ns/v1/deploy":
         #     body += html_upload_body.format(request.path_info + "_content", "VNFD")
@@ -142,40 +161,61 @@ def format(data, request, response, toke_info):
                 data_id = k.pop("_id", None)
             elif isinstance(k, str):
                 data_id = k
+
             if request.path_info == "/ns/v1/deploy":
-                body += '<p> <a href="/ro/{url}/{id}?METHOD=DELETE"> <img src="/ro/static/delete.png" height="25"' \
-                        ' width="25"> </a><a href="/ro/{url}/{id}">{id}</a>: {t} </p>' \
-                    .format(url=request.path_info, id=data_id, t=html_escape(str(k)))
+                body += (
+                    '<p> <a href="/ro/{url}/{id}?METHOD=DELETE"> <img src="/ro/static/delete.png" height="25"'
+                    ' width="25"> </a><a href="/ro/{url}/{id}">{id}</a>: {t} </p>'.format(
+                        url=request.path_info, id=data_id, t=html_escape(str(k))
+                    )
+                )
             else:
-                body += '<p> <a href="/ro/{url}/{id}">{id}</a>: {t} </p>'.format(url=request.path_info, id=data_id,
-                                                                                 t=html_escape(str(k)))
+                body += '<p> <a href="/ro/{url}/{id}">{id}</a>: {t} </p>'.format(
+                    url=request.path_info, id=data_id, t=html_escape(str(k))
+                )
     elif isinstance(data, dict):
         if "Location" in response.headers:
             body += '<a href="{}"> show </a>'.format(response.headers["Location"])
         else:
-            body += '<a href="/ro/{}?METHOD=DELETE"> <img src="/ro/static/delete.png" height="25" width="25"> </a>'\
-                .format(request.path_info[:request.path_info.rfind("/")])
-            if request.path_info.startswith("/nslcm/v1/ns_instances_content/") or \
-                    request.path_info.startswith("/nslcm/v1/ns_instances/"):
-                _id = request.path_info[request.path_info.rfind("/")+1:]
+            body += (
+                '<a href="/ro/{}?METHOD=DELETE"> <img src="/ro/static/delete.png" height="25" width="25"> </a>'
+            ).format(request.path_info[: request.path_info.rfind("/")])
+
+            if request.path_info.startswith(
+                "/nslcm/v1/ns_instances_content/"
+            ) or request.path_info.startswith("/nslcm/v1/ns_instances/"):
+                _id = request.path_info[request.path_info.rfind("/") + 1 :]
                 body += html_nslcmop_body.format(id=_id)
-            elif request.path_info.startswith("/nsilcm/v1/netslice_instances_content/") or \
-                    request.path_info.startswith("/nsilcm/v1/netslice_instances/"):
-                _id = request.path_info[request.path_info.rfind("/")+1:]
+            elif request.path_info.startswith(
+                "/nsilcm/v1/netslice_instances_content/"
+            ) or request.path_info.startswith("/nsilcm/v1/netslice_instances/"):
+                _id = request.path_info[request.path_info.rfind("/") + 1 :]
                 body += html_nsilcmop_body.format(id=_id)
-        body += "<pre>" + html_escape(yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False)) + \
-                "</pre>"
+
+        body += (
+            "<pre>"
+            + html_escape(
+                yaml.safe_dump(
+                    data, explicit_start=True, indent=4, default_flow_style=False
+                )
+            )
+            + "</pre>"
+        )
     elif data is None:
         if request.method == "DELETE" or "METHOD=DELETE" in request.query_string:
             body += "<pre> deleted </pre>"
     else:
         body = html_escape(str(data))
+
     user_text = "    "
+
     if toke_info:
         if toke_info.get("username"):
             user_text += "user: {}".format(toke_info.get("username"))
+
         if toke_info.get("project_id"):
             user_text += ", project: {}".format(toke_info.get("project_name"))
+
     return html_start.format(user_text) + body + html_end
     # yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False)
     # tags=False,
index 1c2e960..660a5f5 100644 (file)
 # limitations under the License.
 ##
 
-import logging
 # import yaml
+import logging
 from traceback import format_exc as traceback_format_exc
 from osm_ng_ro.ns_thread import NsWorker, NsWorkerException, deep_get
 from osm_ng_ro.validation import validate_input, deploy_schema
-from osm_common import dbmongo, dbmemory, fslocal, fsmongo, msglocal, msgkafka, version as common_version
+from osm_common import (
+    dbmongo,
+    dbmemory,
+    fslocal,
+    fsmongo,
+    msglocal,
+    msgkafka,
+    version as common_version,
+)
 from osm_common.dbbase import DbException
 from osm_common.fsbase import FsException
 from osm_common.msgbase import MsgException
@@ -30,7 +38,13 @@ from uuid import uuid4
 from threading import Lock
 from random import choice as random_choice
 from time import time
-from jinja2 import Environment, TemplateError, TemplateNotFound, StrictUndefined, UndefinedError
+from jinja2 import (
+    Environment,
+    TemplateError,
+    TemplateNotFound,
+    StrictUndefined,
+    UndefinedError,
+)
 from cryptography.hazmat.primitives import serialization as crypto_serialization
 from cryptography.hazmat.primitives.asymmetric import rsa
 from cryptography.hazmat.backends import default_backend as crypto_default_backend
@@ -40,7 +54,6 @@ min_common_version = "0.1.16"
 
 
 class NsException(Exception):
-
     def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST):
         self.http_code = http_code
         super(Exception, self).__init__(message)
@@ -58,10 +71,12 @@ def get_process_id():
             text_id_ = f.readline()
             _, _, text_id = text_id_.rpartition("/")
             text_id = text_id.replace("\n", "")[:12]
+
             if text_id:
                 return text_id
     except Exception:
         pass
+
     # Return a random id
     return "".join(random_choice("0123456789abcdef") for _ in range(12))
 
@@ -69,13 +84,14 @@ def get_process_id():
 def versiontuple(v):
     """utility for compare dot separate versions. Fills with zeros to proper number comparison"""
     filled = []
+
     for point in v.split("."):
         filled.append(point.zfill(8))
+
     return tuple(filled)
 
 
 class Ns(object):
-
     def __init__(self):
         self.db = None
         self.fs = None
@@ -105,10 +121,14 @@ class Ns(object):
         self.config = config
         self.config["process_id"] = get_process_id()  # used for HA identity
         self.logger = logging.getLogger("ro.ns")
+
         # check right version of common
         if versiontuple(common_version) < versiontuple(min_common_version):
-            raise NsException("Not compatible osm/common version '{}'. Needed '{}' or higher".format(
-                common_version, min_common_version))
+            raise NsException(
+                "Not compatible osm/common version '{}'. Needed '{}' or higher".format(
+                    common_version, min_common_version
+                )
+            )
 
         try:
             if not self.db:
@@ -119,8 +139,12 @@ class Ns(object):
                     self.db = dbmemory.DbMemory()
                     self.db.db_connect(config["database"])
                 else:
-                    raise NsException("Invalid configuration param '{}' at '[database]':'driver'".format(
-                        config["database"]["driver"]))
+                    raise NsException(
+                        "Invalid configuration param '{}' at '[database]':'driver'".format(
+                            config["database"]["driver"]
+                        )
+                    )
+
             if not self.fs:
                 if config["storage"]["driver"] == "local":
                     self.fs = fslocal.FsLocal()
@@ -131,8 +155,12 @@ class Ns(object):
                 elif config["storage"]["driver"] is None:
                     pass
                 else:
-                    raise NsException("Invalid configuration param '{}' at '[storage]':'driver'".format(
-                        config["storage"]["driver"]))
+                    raise NsException(
+                        "Invalid configuration param '{}' at '[storage]':'driver'".format(
+                            config["storage"]["driver"]
+                        )
+                    )
+
             if not self.msg:
                 if config["message"]["driver"] == "local":
                     self.msg = msglocal.MsgLocal()
@@ -141,15 +169,18 @@ class Ns(object):
                     self.msg = msgkafka.MsgKafka()
                     self.msg.connect(config["message"])
                 else:
-                    raise NsException("Invalid configuration param '{}' at '[message]':'driver'".format(
-                        config["message"]["driver"]))
+                    raise NsException(
+                        "Invalid configuration param '{}' at '[message]':'driver'".format(
+                            config["message"]["driver"]
+                        )
+                    )
 
             # TODO load workers to deal with exising database tasks
 
             self.write_lock = Lock()
         except (DbException, FsException, MsgException) as e:
             raise NsException(str(e), http_code=e.http_code)
-    
+
     def get_assigned_vims(self):
         return list(self.vims_assigned.keys())
 
@@ -157,13 +188,17 @@ class Ns(object):
         try:
             if self.db:
                 self.db.db_disconnect()
+
             if self.fs:
                 self.fs.fs_disconnect()
+
             if self.msg:
                 self.msg.disconnect()
+
             self.write_lock = None
         except (DbException, FsException, MsgException) as e:
             raise NsException(str(e), http_code=e.http_code)
+
         for worker in self.workers:
             worker.insert_task(("terminate",))
 
@@ -174,20 +209,34 @@ class Ns(object):
         return the index of the assigned worker thread. Worker threads are storead at self.workers
         """
         # Look for a thread in idle status
-        worker_id = next((i for i in range(len(self.workers)) if self.workers[i] and self.workers[i].idle), None)
+        worker_id = next(
+            (
+                i
+                for i in range(len(self.workers))
+                if self.workers[i] and self.workers[i].idle
+            ),
+            None,
+        )
+
         if worker_id is not None:
             # unset idle status to avoid race conditions
             self.workers[worker_id].idle = False
         else:
             worker_id = len(self.workers)
+
             if worker_id < self.config["global"]["server.ns_threads"]:
                 # create a new worker
-                self.workers.append(NsWorker(worker_id, self.config, self.plugins, self.db))
+                self.workers.append(
+                    NsWorker(worker_id, self.config, self.plugins, self.db)
+                )
                 self.workers[worker_id].start()
             else:
                 # reached maximum number of threads, assign VIM to an existing one
                 worker_id = self.next_worker
-                self.next_worker = (self.next_worker + 1) % self.config["global"]["server.ns_threads"]
+                self.next_worker = (self.next_worker + 1) % self.config["global"][
+                    "server.ns_threads"
+                ]
+
         return worker_id
 
     def assign_vim(self, target_id):
@@ -230,12 +279,18 @@ class Ns(object):
     def unload_unused_vims(self):
         with self.write_lock:
             vims_to_unload = []
+
             for target_id in self.vims_assigned:
-                if not self.db.get_one("ro_tasks",
-                                       q_filter={"target_id": target_id,
-                                                 "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED']},
-                                       fail_on_empty=False):
+                if not self.db.get_one(
+                    "ro_tasks",
+                    q_filter={
+                        "target_id": target_id,
+                        "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"],
+                    },
+                    fail_on_empty=False,
+                ):
                     vims_to_unload.append(target_id)
+
             for target_id in vims_to_unload:
                 self._unload_vim(target_id)
 
@@ -248,70 +303,86 @@ class Ns(object):
         vnfd_id, _, other = where.partition(":")
         _type, _, name = other.partition(":")
         vnfd = self.db.get_one("vnfds", {"_id": vnfd_id})
+
         if _type == "file":
             base_folder = vnfd["_admin"]["storage"]
-            cloud_init_file = "{}/{}/cloud_init/{}".format(base_folder["folder"], base_folder["pkg-dir"], name)
+            cloud_init_file = "{}/{}/cloud_init/{}".format(
+                base_folder["folder"], base_folder["pkg-dir"], name
+            )
+
             if not self.fs:
-                raise NsException("Cannot read file '{}'. Filesystem not loaded, change configuration at storage.driver"
-                                  .format(cloud_init_file))
+                raise NsException(
+                    "Cannot read file '{}'. Filesystem not loaded, change configuration at storage.driver".format(
+                        cloud_init_file
+                    )
+                )
+
             with self.fs.file_open(cloud_init_file, "r") as ci_file:
                 cloud_init_content = ci_file.read()
         elif _type == "vdu":
             cloud_init_content = vnfd["vdu"][int(name)]["cloud-init"]
         else:
             raise NsException("Mismatch descriptor for cloud init: {}".format(where))
+
         return cloud_init_content
 
     def _parse_jinja2(self, cloud_init_content, params, context):
-
         try:
             env = Environment(undefined=StrictUndefined)
             template = env.from_string(cloud_init_content)
+
             return template.render(params or {})
         except UndefinedError as e:
             raise NsException(
                 "Variable '{}' defined at vnfd='{}' must be provided in the instantiation parameters"
-                "inside the 'additionalParamsForVnf' block".format(e, context))
+                "inside the 'additionalParamsForVnf' block".format(e, context)
+            )
         except (TemplateError, TemplateNotFound) as e:
-            raise NsException("Error parsing Jinja2 to cloud-init content at vnfd='{}': {}".format(context, e))
+            raise NsException(
+                "Error parsing Jinja2 to cloud-init content at vnfd='{}': {}".format(
+                    context, e
+                )
+            )
 
     def _create_db_ro_nsrs(self, nsr_id, now):
         try:
             key = rsa.generate_private_key(
-                backend=crypto_default_backend(),
-                public_exponent=65537,
-                key_size=2048
+                backend=crypto_default_backend(), public_exponent=65537, key_size=2048
             )
             private_key = key.private_bytes(
                 crypto_serialization.Encoding.PEM,
                 crypto_serialization.PrivateFormat.PKCS8,
-                crypto_serialization.NoEncryption())
+                crypto_serialization.NoEncryption(),
+            )
             public_key = key.public_key().public_bytes(
                 crypto_serialization.Encoding.OpenSSH,
-                crypto_serialization.PublicFormat.OpenSSH
+                crypto_serialization.PublicFormat.OpenSSH,
             )
-            private_key = private_key.decode('utf8')
+            private_key = private_key.decode("utf8")
             # Change first line because Paramiko needs a explicit start with 'BEGIN RSA PRIVATE KEY'
             i = private_key.find("\n")
             private_key = "-----BEGIN RSA PRIVATE KEY-----" + private_key[i:]
-            public_key = public_key.decode('utf8')
+            public_key = public_key.decode("utf8")
         except Exception as e:
             raise NsException("Cannot create ssh-keys: {}".format(e))
 
         schema_version = "1.1"
-        private_key_encrypted = self.db.encrypt(private_key, schema_version=schema_version, salt=nsr_id)
+        private_key_encrypted = self.db.encrypt(
+            private_key, schema_version=schema_version, salt=nsr_id
+        )
         db_content = {
             "_id": nsr_id,
             "_admin": {
                 "created": now,
                 "modified": now,
-                "schema_version": schema_version
+                "schema_version": schema_version,
             },
             "public_key": public_key,
             "private_key": private_key_encrypted,
-            "actions": []
+            "actions": [],
         }
         self.db.create("ro_nsrs", db_content)
+
         return db_content
 
     def deploy(self, session, indata, version, nsr_id, *args, **kwargs):
@@ -320,14 +391,15 @@ class Ns(object):
         action_id = indata.get("action_id", str(uuid4()))
         task_index = 0
         # get current deployment
-        db_nsr_update = {}        # update operation on nsrs
+        db_nsr_update = {}  # update operation on nsrs
         db_vnfrs_update = {}
-        db_vnfrs = {}     # vnf's info indexed by _id
+        db_vnfrs = {}  # vnf's info indexed by _id
         nb_ro_tasks = 0  # for logging
         vdu2cloud_init = indata.get("cloud_init_content") or {}
-        step = ''
+        step = ""
         logging_text = "Task deploy nsr_id={} action_id={} ".format(nsr_id, action_id)
         self.logger.debug(logging_text + "Enter")
+
         try:
             step = "Getting ns and vnfr record from db"
             db_nsr = self.db.get_one("nsrs", {"_id": nsr_id})
@@ -336,29 +408,47 @@ class Ns(object):
             # read from db: vnf's of this ns
             step = "Getting vnfrs from db"
             db_vnfrs_list = self.db.get_list("vnfrs", {"nsr-id-ref": nsr_id})
+
             if not db_vnfrs_list:
                 raise NsException("Cannot obtain associated VNF for ns")
+
             for vnfr in db_vnfrs_list:
                 db_vnfrs[vnfr["_id"]] = vnfr
                 db_vnfrs_update[vnfr["_id"]] = {}
+
             now = time()
             db_ro_nsr = self.db.get_one("ro_nsrs", {"_id": nsr_id}, fail_on_empty=False)
+
             if not db_ro_nsr:
                 db_ro_nsr = self._create_db_ro_nsrs(nsr_id, now)
+
             ro_nsr_public_key = db_ro_nsr["public_key"]
 
             # check that action_id is not in the list of actions. Suffixed with :index
             if action_id in db_ro_nsr["actions"]:
                 index = 1
+
                 while True:
                     new_action_id = "{}:{}".format(action_id, index)
+
                     if new_action_id not in db_ro_nsr["actions"]:
                         action_id = new_action_id
-                        self.logger.debug(logging_text + "Changing action_id in use to {}".format(action_id))
+                        self.logger.debug(
+                            logging_text
+                            + "Changing action_id in use to {}".format(action_id)
+                        )
                         break
+
                     index += 1
 
-            def _create_task(target_id, item, action, target_record, target_record_id, extra_dict=None):
+            def _create_task(
+                target_id,
+                item,
+                action,
+                target_record,
+                target_record_id,
+                extra_dict=None,
+            ):
                 nonlocal task_index
                 nonlocal action_id
                 nonlocal nsr_id
@@ -374,9 +464,12 @@ class Ns(object):
                     "target_record": target_record,
                     "target_record_id": target_record_id,
                 }
+
                 if extra_dict:
-                    task.update(extra_dict)   # params, find_params, depends_on
+                    task.update(extra_dict)  # params, find_params, depends_on
+
                 task_index += 1
+
                 return task
 
             def _create_ro_task(target_id, task):
@@ -404,20 +497,28 @@ class Ns(object):
                     "to_check_at": now,
                     "tasks": [task],
                 }
+
                 return db_ro_task
 
             def _process_image_params(target_image, vim_info, target_record_id):
                 find_params = {}
+
                 if target_image.get("image"):
                     find_params["filter_dict"] = {"name": target_image.get("image")}
+
                 if target_image.get("vim_image_id"):
-                    find_params["filter_dict"] = {"id": target_image.get("vim_image_id")}
+                    find_params["filter_dict"] = {
+                        "id": target_image.get("vim_image_id")
+                    }
+
                 if target_image.get("image_checksum"):
-                    find_params["filter_dict"] = {"checksum": target_image.get("image_checksum")}
+                    find_params["filter_dict"] = {
+                        "checksum": target_image.get("image_checksum")
+                    }
+
                 return {"find_params": find_params}
 
             def _process_flavor_params(target_flavor, vim_info, target_record_id):
-
                 def _get_resource_allocation_params(quota_descriptor):
                     """
                     read the quota_descriptor from vnfd and fetch the resource allocation properties from the
@@ -426,12 +527,16 @@ class Ns(object):
                     :return: quota params for limit, reserve, shares from the descriptor object
                     """
                     quota = {}
+
                     if quota_descriptor.get("limit"):
                         quota["limit"] = int(quota_descriptor["limit"])
+
                     if quota_descriptor.get("reserve"):
                         quota["reserve"] = int(quota_descriptor["reserve"])
+
                     if quota_descriptor.get("shares"):
                         quota["shares"] = int(quota_descriptor["shares"])
+
                     return quota
 
                 flavor_data = {
@@ -441,59 +546,121 @@ class Ns(object):
                 }
                 numa = {}
                 extended = {}
+
                 if target_flavor.get("guest-epa"):
                     extended = {}
                     epa_vcpu_set = False
+
                     if target_flavor["guest-epa"].get("numa-node-policy"):
-                        numa_node_policy = target_flavor["guest-epa"].get("numa-node-policy")
+                        numa_node_policy = target_flavor["guest-epa"].get(
+                            "numa-node-policy"
+                        )
+
                         if numa_node_policy.get("node"):
                             numa_node = numa_node_policy["node"][0]
+
                             if numa_node.get("num-cores"):
                                 numa["cores"] = numa_node["num-cores"]
                                 epa_vcpu_set = True
+
                             if numa_node.get("paired-threads"):
-                                if numa_node["paired-threads"].get("num-paired-threads"):
-                                    numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"])
+                                if numa_node["paired-threads"].get(
+                                    "num-paired-threads"
+                                ):
+                                    numa["paired-threads"] = int(
+                                        numa_node["paired-threads"][
+                                            "num-paired-threads"
+                                        ]
+                                    )
                                     epa_vcpu_set = True
-                                if len(numa_node["paired-threads"].get("paired-thread-ids")):
+
+                                if len(
+                                    numa_node["paired-threads"].get("paired-thread-ids")
+                                ):
                                     numa["paired-threads-id"] = []
-                                    for pair in numa_node["paired-threads"]["paired-thread-ids"]:
+
+                                    for pair in numa_node["paired-threads"][
+                                        "paired-thread-ids"
+                                    ]:
                                         numa["paired-threads-id"].append(
-                                            (str(pair["thread-a"]), str(pair["thread-b"]))
+                                            (
+                                                str(pair["thread-a"]),
+                                                str(pair["thread-b"]),
+                                            )
                                         )
+
                             if numa_node.get("num-threads"):
                                 numa["threads"] = int(numa_node["num-threads"])
                                 epa_vcpu_set = True
+
                             if numa_node.get("memory-mb"):
-                                numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1)
+                                numa["memory"] = max(
+                                    int(numa_node["memory-mb"] / 1024), 1
+                                )
+
                     if target_flavor["guest-epa"].get("mempage-size"):
-                        extended["mempage-size"] = target_flavor["guest-epa"].get("mempage-size")
-                    if target_flavor["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set:
-                        if target_flavor["guest-epa"]["cpu-pinning-policy"] == "DEDICATED":
-                            if target_flavor["guest-epa"].get("cpu-thread-pinning-policy") and \
-                                    target_flavor["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER":
+                        extended["mempage-size"] = target_flavor["guest-epa"].get(
+                            "mempage-size"
+                        )
+
+                    if (
+                        target_flavor["guest-epa"].get("cpu-pinning-policy")
+                        and not epa_vcpu_set
+                    ):
+                        if (
+                            target_flavor["guest-epa"]["cpu-pinning-policy"]
+                            == "DEDICATED"
+                        ):
+                            if (
+                                target_flavor["guest-epa"].get(
+                                    "cpu-thread-pinning-policy"
+                                )
+                                and target_flavor["guest-epa"][
+                                    "cpu-thread-pinning-policy"
+                                ]
+                                != "PREFER"
+                            ):
                                 numa["cores"] = max(flavor_data["vcpus"], 1)
                             else:
                                 numa["threads"] = max(flavor_data["vcpus"], 1)
+
                             epa_vcpu_set = True
+
                     if target_flavor["guest-epa"].get("cpu-quota") and not epa_vcpu_set:
-                        cpuquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("cpu-quota"))
+                        cpuquota = _get_resource_allocation_params(
+                            target_flavor["guest-epa"].get("cpu-quota")
+                        )
+
                         if cpuquota:
                             extended["cpu-quota"] = cpuquota
+
                     if target_flavor["guest-epa"].get("mem-quota"):
-                        vduquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("mem-quota"))
+                        vduquota = _get_resource_allocation_params(
+                            target_flavor["guest-epa"].get("mem-quota")
+                        )
+
                         if vduquota:
                             extended["mem-quota"] = vduquota
+
                     if target_flavor["guest-epa"].get("disk-io-quota"):
-                        diskioquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("disk-io-quota"))
+                        diskioquota = _get_resource_allocation_params(
+                            target_flavor["guest-epa"].get("disk-io-quota")
+                        )
+
                         if diskioquota:
                             extended["disk-io-quota"] = diskioquota
+
                     if target_flavor["guest-epa"].get("vif-quota"):
-                        vifquota = _get_resource_allocation_params(target_flavor["guest-epa"].get("vif-quota"))
+                        vifquota = _get_resource_allocation_params(
+                            target_flavor["guest-epa"].get("vif-quota")
+                        )
+
                         if vifquota:
                             extended["vif-quota"] = vifquota
+
                 if numa:
                     extended["numas"] = [numa]
+
                 if extended:
                     flavor_data["extended"] = extended
 
@@ -501,25 +668,38 @@ class Ns(object):
                 flavor_data_name = flavor_data.copy()
                 flavor_data_name["name"] = target_flavor["name"]
                 extra_dict["params"] = {"flavor_data": flavor_data_name}
+
                 return extra_dict
 
             def _ip_profile_2_ro(ip_profile):
                 if not ip_profile:
                     return None
+
                 ro_ip_profile = {
-                    "ip_version": "IPv4" if "v4" in ip_profile.get("ip-version", "ipv4") else "IPv6",
+                    "ip_version": "IPv4"
+                    if "v4" in ip_profile.get("ip-version", "ipv4")
+                    else "IPv6",
                     "subnet_address": ip_profile.get("subnet-address"),
                     "gateway_address": ip_profile.get("gateway-address"),
                     "dhcp_enabled": ip_profile["dhcp-params"].get("enabled", True)
-                    if "dhcp_params" in ip_profile else False,
+                    if "dhcp_params" in ip_profile
+                    else False,
                     "dhcp_start_address": ip_profile["dhcp-params"].get("start-address")
-                    if "dhcp_params" in ip_profile else None,
-                    "dhcp_count": ip_profile["dhcp-params"].get("count") if "dhcp_params" in ip_profile else None,
+                    if "dhcp_params" in ip_profile
+                    else None,
+                    "dhcp_count": ip_profile["dhcp-params"].get("count")
+                    if "dhcp_params" in ip_profile
+                    else None,
                 }
+
                 if ip_profile.get("dns-server"):
-                    ro_ip_profile["dns_address"] = ";".join([v["address"] for v in ip_profile["dns-server"]])
-                if ip_profile.get('security-group'):
-                    ro_ip_profile["security_group"] = ip_profile['security-group']
+                    ro_ip_profile["dns_address"] = ";".join(
+                        [v["address"] for v in ip_profile["dns-server"]]
+                    )
+
+                if ip_profile.get("security-group"):
+                    ro_ip_profile["security_group"] = ip_profile["security-group"]
+
                 return ro_ip_profile
 
             def _process_net_params(target_vld, vim_info, target_record_id):
@@ -529,31 +709,50 @@ class Ns(object):
                 if vim_info.get("sdn"):
                     # vnf_preffix = "vnfrs:{}".format(vnfr_id)
                     # ns_preffix = "nsrs:{}".format(nsr_id)
-                    vld_target_record_id, _, _ = target_record_id.rpartition(".")  # remove the ending ".sdn
-                    extra_dict["params"] = {k: vim_info[k] for k in ("sdn-ports", "target_vim", "vlds", "type")
-                                            if vim_info.get(k)}
+                    # remove the ending ".sdn
+                    vld_target_record_id, _, _ = target_record_id.rpartition(".")
+                    extra_dict["params"] = {
+                        k: vim_info[k]
+                        for k in ("sdn-ports", "target_vim", "vlds", "type")
+                        if vim_info.get(k)
+                    }
+
                     # TODO needed to add target_id in the dependency.
                     if vim_info.get("target_vim"):
-                        extra_dict["depends_on"] = [vim_info.get("target_vim") + " " + vld_target_record_id]
+                        extra_dict["depends_on"] = [
+                            vim_info.get("target_vim") + " " + vld_target_record_id
+                        ]
+
                     return extra_dict
 
                 if vim_info.get("vim_network_name"):
-                    extra_dict["find_params"] = {"filter_dict": {"name": vim_info.get("vim_network_name")}}
+                    extra_dict["find_params"] = {
+                        "filter_dict": {"name": vim_info.get("vim_network_name")}
+                    }
                 elif vim_info.get("vim_network_id"):
-                    extra_dict["find_params"] = {"filter_dict": {"id": vim_info.get("vim_network_id")}}
+                    extra_dict["find_params"] = {
+                        "filter_dict": {"id": vim_info.get("vim_network_id")}
+                    }
                 elif target_vld.get("mgmt-network"):
                     extra_dict["find_params"] = {"mgmt": True, "name": target_vld["id"]}
                 else:
                     # create
                     extra_dict["params"] = {
-                        "net_name": "{}-{}".format(indata["name"][:16], target_vld.get("name", target_vld["id"])[:16]),
-                        "ip_profile": _ip_profile_2_ro(vim_info.get('ip_profile')),
-                        "provider_network_profile": vim_info.get('provider_network'),
+                        "net_name": "{}-{}".format(
+                            indata["name"][:16],
+                            target_vld.get("name", target_vld["id"])[:16],
+                        ),
+                        "ip_profile": _ip_profile_2_ro(vim_info.get("ip_profile")),
+                        "provider_network_profile": vim_info.get("provider_network"),
                     }
+
                     if not target_vld.get("underlay"):
                         extra_dict["params"]["net_type"] = "bridge"
                     else:
-                        extra_dict["params"]["net_type"] = "ptp" if target_vld.get("type") == "ELINE" else "data"
+                        extra_dict["params"]["net_type"] = (
+                            "ptp" if target_vld.get("type") == "ELINE" else "data"
+                        )
+
                 return extra_dict
 
             def _process_vdu_params(target_vdu, vim_info, target_record_id):
@@ -563,71 +762,119 @@ class Ns(object):
                 nonlocal vnfr
                 nonlocal vdu2cloud_init
                 nonlocal tasks_by_target_record_id
+
                 vnf_preffix = "vnfrs:{}".format(vnfr_id)
                 ns_preffix = "nsrs:{}".format(nsr_id)
                 image_text = ns_preffix + ":image." + target_vdu["ns-image-id"]
                 flavor_text = ns_preffix + ":flavor." + target_vdu["ns-flavor-id"]
                 extra_dict = {"depends_on": [image_text, flavor_text]}
                 net_list = []
+
                 for iface_index, interface in enumerate(target_vdu["interfaces"]):
                     if interface.get("ns-vld-id"):
                         net_text = ns_preffix + ":vld." + interface["ns-vld-id"]
                     elif interface.get("vnf-vld-id"):
                         net_text = vnf_preffix + ":vld." + interface["vnf-vld-id"]
                     else:
-                        self.logger.error("Interface {} from vdu {} not connected to any vld".format(
-                            iface_index, target_vdu["vdu-name"]))
-                        continue   # interface not connected to any vld
+                        self.logger.error(
+                            "Interface {} from vdu {} not connected to any vld".format(
+                                iface_index, target_vdu["vdu-name"]
+                            )
+                        )
+
+                        continue  # interface not connected to any vld
+
                     extra_dict["depends_on"].append(net_text)
-                    net_item = {x: v for x, v in interface.items() if x in
-                                ("name", "vpci", "port_security", "port_security_disable_strategy", "floating_ip")}
+                    net_item = {
+                        x: v
+                        for x, v in interface.items()
+                        if x
+                        in (
+                            "name",
+                            "vpci",
+                            "port_security",
+                            "port_security_disable_strategy",
+                            "floating_ip",
+                        )
+                    }
                     net_item["net_id"] = "TASK-" + net_text
                     net_item["type"] = "virtual"
+
                     # TODO mac_address: used for  SR-IOV ifaces #TODO for other types
                     # TODO floating_ip: True/False (or it can be None)
                     if interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
                         # mark the net create task as type data
-                        if deep_get(tasks_by_target_record_id, net_text, "params", "net_type"):
-                            tasks_by_target_record_id[net_text]["params"]["net_type"] = "data"
+                        if deep_get(
+                            tasks_by_target_record_id, net_text, "params", "net_type"
+                        ):
+                            tasks_by_target_record_id[net_text]["params"][
+                                "net_type"
+                            ] = "data"
+
                         net_item["use"] = "data"
                         net_item["model"] = interface["type"]
                         net_item["type"] = interface["type"]
-                    elif interface.get("type") == "OM-MGMT" or interface.get("mgmt-interface") or \
-                            interface.get("mgmt-vnf"):
+                    elif (
+                        interface.get("type") == "OM-MGMT"
+                        or interface.get("mgmt-interface")
+                        or interface.get("mgmt-vnf")
+                    ):
                         net_item["use"] = "mgmt"
-                    else:   # if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
+                    else:
+                        # if interface.get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
                         net_item["use"] = "bridge"
                         net_item["model"] = interface.get("type")
+
                     if interface.get("ip-address"):
                         net_item["ip_address"] = interface["ip-address"]
+
                     if interface.get("mac-address"):
                         net_item["mac_address"] = interface["mac-address"]
+
                     net_list.append(net_item)
+
                     if interface.get("mgmt-vnf"):
                         extra_dict["mgmt_vnf_interface"] = iface_index
                     elif interface.get("mgmt-interface"):
                         extra_dict["mgmt_vdu_interface"] = iface_index
+
                 # cloud config
                 cloud_config = {}
+
                 if target_vdu.get("cloud-init"):
                     if target_vdu["cloud-init"] not in vdu2cloud_init:
-                        vdu2cloud_init[target_vdu["cloud-init"]] = self._get_cloud_init(target_vdu["cloud-init"])
+                        vdu2cloud_init[target_vdu["cloud-init"]] = self._get_cloud_init(
+                            target_vdu["cloud-init"]
+                        )
+
                     cloud_content_ = vdu2cloud_init[target_vdu["cloud-init"]]
-                    cloud_config["user-data"] = self._parse_jinja2(cloud_content_, target_vdu.get("additionalParams"),
-                                                                   target_vdu["cloud-init"])
+                    cloud_config["user-data"] = self._parse_jinja2(
+                        cloud_content_,
+                        target_vdu.get("additionalParams"),
+                        target_vdu["cloud-init"],
+                    )
+
                 if target_vdu.get("boot-data-drive"):
                     cloud_config["boot-data-drive"] = target_vdu.get("boot-data-drive")
+
                 ssh_keys = []
+
                 if target_vdu.get("ssh-keys"):
                     ssh_keys += target_vdu.get("ssh-keys")
+
                 if target_vdu.get("ssh-access-required"):
                     ssh_keys.append(ro_nsr_public_key)
+
                 if ssh_keys:
                     cloud_config["key-pairs"] = ssh_keys
 
                 extra_dict["params"] = {
-                    "name": "{}-{}-{}-{}".format(indata["name"][:16], vnfr["member-vnf-index-ref"][:16],
-                                                 target_vdu["vdu-name"][:32], target_vdu.get("count-index") or 0),
+                    "name": "{}-{}-{}-{}".format(
+                        indata["name"][:16],
+                        vnfr["member-vnf-index-ref"][:16],
+                        target_vdu["vdu-name"][:32],
+                        target_vdu.get("count-index") or 0,
+                    ),
                     "description": target_vdu["vdu-name"],
                     "start": True,
                     "image_id": "TASK-" + image_text,
@@ -638,9 +885,18 @@ class Ns(object):
                     "availability_zone_index": None,  # TODO
                     "availability_zone_list": None,  # TODO
                 }
+
                 return extra_dict
 
-            def _process_items(target_list, existing_list, db_record, db_update, db_path, item, process_params):
+            def _process_items(
+                target_list,
+                existing_list,
+                db_record,
+                db_update,
+                db_path,
+                item,
+                process_params,
+            ):
                 nonlocal db_new_tasks
                 nonlocal tasks_by_target_record_id
                 nonlocal task_index
@@ -652,27 +908,45 @@ class Ns(object):
 
                 # step 1 items (networks,vdus,...) to be deleted/updated
                 for item_index, existing_item in enumerate(existing_list):
-                    target_item = next((t for t in target_list if t["id"] == existing_item["id"]), None)
-                    for target_vim, existing_viminfo in existing_item.get("vim_info", {}).items():
+                    target_item = next(
+                        (t for t in target_list if t["id"] == existing_item["id"]), None
+                    )
+
+                    for target_vim, existing_viminfo in existing_item.get(
+                        "vim_info", {}
+                    ).items():
                         if existing_viminfo is None:
                             continue
+
                         if target_item:
-                            target_viminfo = target_item.get("vim_info", {}).get(target_vim)
+                            target_viminfo = target_item.get("vim_info", {}).get(
+                                target_vim
+                            )
                         else:
                             target_viminfo = None
+
                         if target_viminfo is None:
                             # must be deleted
                             self._assign_vim(target_vim)
-                            target_record_id = "{}.{}".format(db_record, existing_item["id"])
+                            target_record_id = "{}.{}".format(
+                                db_record, existing_item["id"]
+                            )
                             item_ = item
+
                             if target_vim.startswith("sdn"):
                                 # item must be sdn-net instead of net if target_vim is a sdn
                                 item_ = "sdn_net"
                                 target_record_id += ".sdn"
+
                             task = _create_task(
-                                target_vim, item_, "DELETE",
-                                target_record="{}.{}.vim_info.{}".format(db_record, item_index, target_vim),
-                                target_record_id=target_record_id)
+                                target_vim,
+                                item_,
+                                "DELETE",
+                                target_record="{}.{}.vim_info.{}".format(
+                                    db_record, item_index, target_vim
+                                ),
+                                target_record_id=target_record_id,
+                            )
                             tasks_by_target_record_id[target_record_id] = task
                             db_new_tasks.append(task)
                             # TODO delete
@@ -681,6 +955,7 @@ class Ns(object):
                 # step 2 items (networks,vdus,...) to be created
                 for target_item in target_list:
                     item_index = -1
+
                     for item_index, existing_item in enumerate(existing_list):
                         if existing_item["id"] == target_item["id"]:
                             break
@@ -690,10 +965,16 @@ class Ns(object):
                         existing_list.append(target_item)
                         existing_item = None
 
-                    for target_vim, target_viminfo in target_item.get("vim_info", {}).items():
+                    for target_vim, target_viminfo in target_item.get(
+                        "vim_info", {}
+                    ).items():
                         existing_viminfo = None
+
                         if existing_item:
-                            existing_viminfo = existing_item.get("vim_info", {}).get(target_vim)
+                            existing_viminfo = existing_item.get("vim_info", {}).get(
+                                target_vim
+                            )
+
                         # TODO check if different. Delete and create???
                         # TODO delete if not exist
                         if existing_viminfo is not None:
@@ -701,20 +982,29 @@ class Ns(object):
 
                         target_record_id = "{}.{}".format(db_record, target_item["id"])
                         item_ = item
+
                         if target_vim.startswith("sdn"):
                             # item must be sdn-net instead of net if target_vim is a sdn
                             item_ = "sdn_net"
                             target_record_id += ".sdn"
-                        extra_dict = process_params(target_item, target_viminfo, target_record_id)
 
+                        extra_dict = process_params(
+                            target_item, target_viminfo, target_record_id
+                        )
                         self._assign_vim(target_vim)
                         task = _create_task(
-                            target_vim, item_, "CREATE",
-                            target_record="{}.{}.vim_info.{}".format(db_record, item_index, target_vim),
+                            target_vim,
+                            item_,
+                            "CREATE",
+                            target_record="{}.{}.vim_info.{}".format(
+                                db_record, item_index, target_vim
+                            ),
                             target_record_id=target_record_id,
-                            extra_dict=extra_dict)
+                            extra_dict=extra_dict,
+                        )
                         tasks_by_target_record_id[target_record_id] = task
                         db_new_tasks.append(task)
+
                         if target_item.get("common_id"):
                             task["common_id"] = target_item["common_id"]
 
@@ -730,20 +1020,41 @@ class Ns(object):
                     key = indata["action"].get("key")
                     user = indata["action"].get("user")
                     password = indata["action"].get("password")
+
                     for vnf in indata.get("vnf", ()):
                         if vnf["_id"] not in db_vnfrs:
                             raise NsException("Invalid vnf={}".format(vnf["_id"]))
+
                         db_vnfr = db_vnfrs[vnf["_id"]]
+
                         for target_vdu in vnf.get("vdur", ()):
-                            vdu_index, vdur = next((i_v for i_v in enumerate(db_vnfr["vdur"]) if
-                                                    i_v[1]["id"] == target_vdu["id"]), (None, None))
+                            vdu_index, vdur = next(
+                                (
+                                    i_v
+                                    for i_v in enumerate(db_vnfr["vdur"])
+                                    if i_v[1]["id"] == target_vdu["id"]
+                                ),
+                                (None, None),
+                            )
+
                             if not vdur:
-                                raise NsException("Invalid vdu vnf={}.{}".format(vnf["_id"], target_vdu["id"]))
-                            target_vim, vim_info = next(k_v for k_v in vdur["vim_info"].items())
+                                raise NsException(
+                                    "Invalid vdu vnf={}.{}".format(
+                                        vnf["_id"], target_vdu["id"]
+                                    )
+                                )
+
+                            target_vim, vim_info = next(
+                                k_v for k_v in vdur["vim_info"].items()
+                            )
                             self._assign_vim(target_vim)
-                            target_record = "vnfrs:{}:vdur.{}.ssh_keys".format(vnf["_id"], vdu_index)
+                            target_record = "vnfrs:{}:vdur.{}.ssh_keys".format(
+                                vnf["_id"], vdu_index
+                            )
                             extra_dict = {
-                                "depends_on": ["vnfrs:{}:vdur.{}".format(vnf["_id"], vdur["id"])],
+                                "depends_on": [
+                                    "vnfrs:{}:vdur.{}".format(vnf["_id"], vdur["id"])
+                                ],
                                 "params": {
                                     "ip_address": vdur.get("ip-address"),
                                     "user": user,
@@ -751,13 +1062,19 @@ class Ns(object):
                                     "password": password,
                                     "private_key": db_ro_nsr["private_key"],
                                     "salt": db_ro_nsr["_id"],
-                                    "schema_version": db_ro_nsr["_admin"]["schema_version"]
-                                }
+                                    "schema_version": db_ro_nsr["_admin"][
+                                        "schema_version"
+                                    ],
+                                },
                             }
-                            task = _create_task(target_vim, "vdu", "EXEC",
-                                                target_record=target_record,
-                                                target_record_id=None,
-                                                extra_dict=extra_dict)
+                            task = _create_task(
+                                target_vim,
+                                "vdu",
+                                "EXEC",
+                                target_record=target_record,
+                                target_record_id=None,
+                                extra_dict=extra_dict,
+                            )
                             db_new_tasks.append(task)
 
             with self.write_lock:
@@ -767,88 +1084,152 @@ class Ns(object):
                     # compute network differences
                     # NS.vld
                     step = "process NS VLDs"
-                    _process_items(target_list=indata["ns"]["vld"] or [], existing_list=db_nsr.get("vld") or [],
-                                   db_record="nsrs:{}:vld".format(nsr_id), db_update=db_nsr_update,
-                                   db_path="vld", item="net", process_params=_process_net_params)
+                    _process_items(
+                        target_list=indata["ns"]["vld"] or [],
+                        existing_list=db_nsr.get("vld") or [],
+                        db_record="nsrs:{}:vld".format(nsr_id),
+                        db_update=db_nsr_update,
+                        db_path="vld",
+                        item="net",
+                        process_params=_process_net_params,
+                    )
 
                     step = "process NS images"
-                    _process_items(target_list=indata.get("image") or [], existing_list=db_nsr.get("image") or [],
-                                   db_record="nsrs:{}:image".format(nsr_id),
-                                   db_update=db_nsr_update, db_path="image", item="image",
-                                   process_params=_process_image_params)
+                    _process_items(
+                        target_list=indata.get("image") or [],
+                        existing_list=db_nsr.get("image") or [],
+                        db_record="nsrs:{}:image".format(nsr_id),
+                        db_update=db_nsr_update,
+                        db_path="image",
+                        item="image",
+                        process_params=_process_image_params,
+                    )
 
                     step = "process NS flavors"
-                    _process_items(target_list=indata.get("flavor") or [], existing_list=db_nsr.get("flavor") or [],
-                                   db_record="nsrs:{}:flavor".format(nsr_id),
-                                   db_update=db_nsr_update, db_path="flavor", item="flavor",
-                                   process_params=_process_flavor_params)
+                    _process_items(
+                        target_list=indata.get("flavor") or [],
+                        existing_list=db_nsr.get("flavor") or [],
+                        db_record="nsrs:{}:flavor".format(nsr_id),
+                        db_update=db_nsr_update,
+                        db_path="flavor",
+                        item="flavor",
+                        process_params=_process_flavor_params,
+                    )
 
                     # VNF.vld
                     for vnfr_id, vnfr in db_vnfrs.items():
                         # vnfr_id need to be set as global variable for among others nested method _process_vdu_params
                         step = "process VNF={} VLDs".format(vnfr_id)
-                        target_vnf = next((vnf for vnf in indata.get("vnf", ()) if vnf["_id"] == vnfr_id), None)
+                        target_vnf = next(
+                            (
+                                vnf
+                                for vnf in indata.get("vnf", ())
+                                if vnf["_id"] == vnfr_id
+                            ),
+                            None,
+                        )
                         target_list = target_vnf.get("vld") if target_vnf else None
-                        _process_items(target_list=target_list or [], existing_list=vnfr.get("vld") or [],
-                                       db_record="vnfrs:{}:vld".format(vnfr_id), db_update=db_vnfrs_update[vnfr["_id"]],
-                                       db_path="vld", item="net", process_params=_process_net_params)
+                        _process_items(
+                            target_list=target_list or [],
+                            existing_list=vnfr.get("vld") or [],
+                            db_record="vnfrs:{}:vld".format(vnfr_id),
+                            db_update=db_vnfrs_update[vnfr["_id"]],
+                            db_path="vld",
+                            item="net",
+                            process_params=_process_net_params,
+                        )
 
                         target_list = target_vnf.get("vdur") if target_vnf else None
                         step = "process VNF={} VDUs".format(vnfr_id)
-                        _process_items(target_list=target_list or [], existing_list=vnfr.get("vdur") or [],
-                                       db_record="vnfrs:{}:vdur".format(vnfr_id),
-                                       db_update=db_vnfrs_update[vnfr["_id"]], db_path="vdur", item="vdu",
-                                       process_params=_process_vdu_params)
+                        _process_items(
+                            target_list=target_list or [],
+                            existing_list=vnfr.get("vdur") or [],
+                            db_record="vnfrs:{}:vdur".format(vnfr_id),
+                            db_update=db_vnfrs_update[vnfr["_id"]],
+                            db_path="vdur",
+                            item="vdu",
+                            process_params=_process_vdu_params,
+                        )
 
                 for db_task in db_new_tasks:
                     step = "Updating database, Appending tasks to ro_tasks"
                     target_id = db_task.pop("target_id")
                     common_id = db_task.get("common_id")
+
                     if common_id:
-                        if self.db.set_one("ro_tasks",
-                                           q_filter={"target_id": target_id,
-                                                     "tasks.common_id": common_id},
-                                           update_dict={"to_check_at": now, "modified_at": now},
-                                           push={"tasks": db_task}, fail_on_empty=False):
+                        if self.db.set_one(
+                            "ro_tasks",
+                            q_filter={
+                                "target_id": target_id,
+                                "tasks.common_id": common_id,
+                            },
+                            update_dict={"to_check_at": now, "modified_at": now},
+                            push={"tasks": db_task},
+                            fail_on_empty=False,
+                        ):
                             continue
-                    if not self.db.set_one("ro_tasks",
-                                           q_filter={"target_id": target_id,
-                                                     "tasks.target_record": db_task["target_record"]},
-                                           update_dict={"to_check_at": now, "modified_at": now},
-                                           push={"tasks": db_task}, fail_on_empty=False):
+
+                    if not self.db.set_one(
+                        "ro_tasks",
+                        q_filter={
+                            "target_id": target_id,
+                            "tasks.target_record": db_task["target_record"],
+                        },
+                        update_dict={"to_check_at": now, "modified_at": now},
+                        push={"tasks": db_task},
+                        fail_on_empty=False,
+                    ):
                         # Create a ro_task
                         step = "Updating database, Creating ro_tasks"
                         db_ro_task = _create_ro_task(target_id, db_task)
                         nb_ro_tasks += 1
                         self.db.create("ro_tasks", db_ro_task)
+
                 step = "Updating database, nsrs"
                 if db_nsr_update:
                     self.db.set_one("nsrs", {"_id": nsr_id}, db_nsr_update)
+
                 for vnfr_id, db_vnfr_update in db_vnfrs_update.items():
                     if db_vnfr_update:
                         step = "Updating database, vnfrs={}".format(vnfr_id)
                         self.db.set_one("vnfrs", {"_id": vnfr_id}, db_vnfr_update)
 
-            self.logger.debug(logging_text + "Exit. Created {} ro_tasks; {} tasks".format(nb_ro_tasks,
-                                                                                          len(db_new_tasks)))
-            return {"status": "ok", "nsr_id": nsr_id, "action_id": action_id}, action_id, True
+            self.logger.debug(
+                logging_text
+                + "Exit. Created {} ro_tasks; {} tasks".format(
+                    nb_ro_tasks, len(db_new_tasks)
+                )
+            )
 
+            return (
+                {"status": "ok", "nsr_id": nsr_id, "action_id": action_id},
+                action_id,
+                True,
+            )
         except Exception as e:
             if isinstance(e, (DbException, NsException)):
-                self.logger.error(logging_text + "Exit Exception while '{}': {}".format(step, e))
+                self.logger.error(
+                    logging_text + "Exit Exception while '{}': {}".format(step, e)
+                )
             else:
                 e = traceback_format_exc()
-                self.logger.critical(logging_text + "Exit Exception while '{}': {}".format(step, e), exc_info=True)
+                self.logger.critical(
+                    logging_text + "Exit Exception while '{}': {}".format(step, e),
+                    exc_info=True,
+                )
+
             raise NsException(e)
 
     def delete(self, session, indata, version, nsr_id, *args, **kwargs):
         self.logger.debug("ns.delete version={} nsr_id={}".format(version, nsr_id))
         # self.db.del_list({"_id": ro_task["_id"], "tasks.nsr_id.ne": nsr_id})
+
         with self.write_lock:
             try:
                 NsWorker.delete_db_tasks(self.db, nsr_id, None)
             except NsWorkerException as e:
                 raise NsException(e)
+
         return None, None, True
 
     def status(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
@@ -860,47 +1241,64 @@ class Ns(object):
         ro_tasks = self.db.get_list("ro_tasks", {"tasks.action_id": action_id})
         global_status = "DONE"
         details = []
+
         for ro_task in ro_tasks:
             for task in ro_task["tasks"]:
                 if task and task["action_id"] == action_id:
                     task_list.append(task)
                     total += 1
+
                     if task["status"] == "FAILED":
                         global_status = "FAILED"
-                        error_text = "Error at {} {}: {}".format(task["action"].lower(), task["item"],
-                                                                 ro_task["vim_info"].get("vim_details") or "unknown")
+                        error_text = "Error at {} {}: {}".format(
+                            task["action"].lower(),
+                            task["item"],
+                            ro_task["vim_info"].get("vim_details") or "unknown",
+                        )
                         details.append(error_text)
                     elif task["status"] in ("SCHEDULED", "BUILD"):
                         if global_status != "FAILED":
                             global_status = "BUILD"
                     else:
                         done += 1
+
         return_data = {
             "status": global_status,
-            "details": ". ".join(details) if details else "progress {}/{}".format(done, total),
+            "details": ". ".join(details)
+            if details
+            else "progress {}/{}".format(done, total),
             "nsr_id": nsr_id,
             "action_id": action_id,
-            "tasks": task_list
+            "tasks": task_list,
         }
+
         return return_data, None, True
 
     def cancel(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
-        print("ns.cancel session={} indata={} version={} nsr_id={}, action_id={}".format(session, indata, version,
-                                                                                         nsr_id, action_id))
+        print(
+            "ns.cancel session={} indata={} version={} nsr_id={}, action_id={}".format(
+                session, indata, version, nsr_id, action_id
+            )
+        )
+
         return None, None, True
 
     def get_deploy(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
         nsrs = self.db.get_list("nsrs", {})
         return_data = []
+
         for ns in nsrs:
             return_data.append({"_id": ns["_id"], "name": ns["name"]})
+
         return return_data, None, True
 
     def get_actions(self, session, indata, version, nsr_id, action_id, *args, **kwargs):
         ro_tasks = self.db.get_list("ro_tasks", {"tasks.nsr_id": nsr_id})
         return_data = []
+
         for ro_task in ro_tasks:
             for task in ro_task["tasks"]:
                 if task["action_id"] not in return_data:
                     return_data.append(task["action_id"])
+
         return return_data, None, True
index f967f83..f15831a 100644 (file)
@@ -24,23 +24,25 @@ A single ro_task refers to a VIM element (flavor, image, network, ...).
 A ro_task can contain several 'tasks', each one with a target, where to store the results
 """
 
+import logging
+import queue
 import threading
 import time
-import queue
-import logging
 import yaml
+from copy import deepcopy
+from http import HTTPStatus
+from os import mkdir
 from pkg_resources import iter_entry_points
+from shutil import rmtree
+from unittest.mock import Mock
+
 # from osm_common import dbmongo, dbmemory, fslocal, fsmongo, msglocal, msgkafka, version as common_version
 from osm_common.dbbase import DbException
 from osm_ro_plugin.vim_dummy import VimDummyConnector
 from osm_ro_plugin.sdn_dummy import SdnDummyConnector
 from osm_ro_plugin import vimconn, sdnconn
 from osm_ng_ro.vim_admin import LockRenew
-from copy import deepcopy
-from unittest.mock import Mock
-from http import HTTPStatus
-from os import mkdir
-from shutil import rmtree
+
 
 __author__ = "Alfonso Tierno"
 __date__ = "$28-Sep-2017 12:07:15$"
@@ -69,12 +71,18 @@ class NsWorkerException(Exception):
 class FailingConnector:
     def __init__(self, error_msg):
         self.error_msg = error_msg
+
         for method in dir(vimconn.VimConnector):
             if method[0] != "_":
-                setattr(self, method, Mock(side_effect=vimconn.VimConnException(error_msg)))
+                setattr(
+                    self, method, Mock(side_effect=vimconn.VimConnException(error_msg))
+                )
+
         for method in dir(sdnconn.SdnConnectorBase):
             if method[0] != "_":
-                setattr(self, method, Mock(side_effect=sdnconn.SdnConnectorError(error_msg)))
+                setattr(
+                    self, method, Mock(side_effect=sdnconn.SdnConnectorError(error_msg))
+                )
 
 
 class NsWorkerExceptionNotFound(NsWorkerException):
@@ -82,8 +90,9 @@ class NsWorkerExceptionNotFound(NsWorkerException):
 
 
 class VimInteractionBase:
-    """ Base class to call VIM/SDN for creating, deleting and refresh networks, VMs, flavors, ...
+    """Base class to call VIM/SDN for creating, deleting and refresh networks, VMs, flavors, ...
     It implements methods that does nothing and return ok"""
+
     def __init__(self, db, my_vims, db_vims, logger):
         self.db = db
         self.logger = logger
@@ -97,6 +106,7 @@ class VimInteractionBase:
         """skip calling VIM to get image, flavor status. Assumes ok"""
         if ro_task["vim_info"]["vim_status"] == "VIM_ERROR":
             return "FAILED", {}
+
         return "DONE", {}
 
     def delete(self, ro_task, task_index):
@@ -108,7 +118,6 @@ class VimInteractionBase:
 
 
 class VimInteractionNet(VimInteractionBase):
-
     def new(self, ro_task, task_index, task_depends):
         vim_net_id = None
         task = ro_task["tasks"][task_index]
@@ -116,29 +125,56 @@ class VimInteractionNet(VimInteractionBase):
         created = False
         created_items = {}
         target_vim = self.my_vims[ro_task["target_id"]]
+
         try:
             # FIND
             if task.get("find_params"):
                 # if management, get configuration of VIM
                 if task["find_params"].get("filter_dict"):
                     vim_filter = task["find_params"]["filter_dict"]
-                elif task["find_params"].get("mgmt"):   # mamagement network
-                    if deep_get(self.db_vims[ro_task["target_id"]], "config", "management_network_id"):
-                        vim_filter = {"id": self.db_vims[ro_task["target_id"]]["config"]["management_network_id"]}
-                    elif deep_get(self.db_vims[ro_task["target_id"]], "config", "management_network_name"):
-                        vim_filter = {"name": self.db_vims[ro_task["target_id"]]["config"]["management_network_name"]}
+                # mamagement network
+                elif task["find_params"].get("mgmt"):
+                    if deep_get(
+                        self.db_vims[ro_task["target_id"]],
+                        "config",
+                        "management_network_id",
+                    ):
+                        vim_filter = {
+                            "id": self.db_vims[ro_task["target_id"]]["config"][
+                                "management_network_id"
+                            ]
+                        }
+                    elif deep_get(
+                        self.db_vims[ro_task["target_id"]],
+                        "config",
+                        "management_network_name",
+                    ):
+                        vim_filter = {
+                            "name": self.db_vims[ro_task["target_id"]]["config"][
+                                "management_network_name"
+                            ]
+                        }
                     else:
                         vim_filter = {"name": task["find_params"]["name"]}
                 else:
-                    raise NsWorkerExceptionNotFound("Invalid find_params for new_net {}".format(task["find_params"]))
+                    raise NsWorkerExceptionNotFound(
+                        "Invalid find_params for new_net {}".format(task["find_params"])
+                    )
 
                 vim_nets = target_vim.get_network_list(vim_filter)
                 if not vim_nets and not task.get("params"):
-                    raise NsWorkerExceptionNotFound("Network not found with this criteria: '{}'".format(
-                        task.get("find_params")))
+                    raise NsWorkerExceptionNotFound(
+                        "Network not found with this criteria: '{}'".format(
+                            task.get("find_params")
+                        )
+                    )
                 elif len(vim_nets) > 1:
                     raise NsWorkerException(
-                        "More than one network found with this criteria: '{}'".format(task["find_params"]))
+                        "More than one network found with this criteria: '{}'".format(
+                            task["find_params"]
+                        )
+                    )
+
                 if vim_nets:
                     vim_net_id = vim_nets[0]["id"]
             else:
@@ -147,31 +183,43 @@ class VimInteractionNet(VimInteractionBase):
                 vim_net_id, created_items = target_vim.new_network(**params)
                 created = True
 
-            ro_vim_item_update = {"vim_id": vim_net_id,
-                                  "vim_status": "BUILD",
-                                  "created": created,
-                                  "created_items": created_items,
-                                  "vim_details": None}
+            ro_vim_item_update = {
+                "vim_id": vim_net_id,
+                "vim_status": "BUILD",
+                "created": created,
+                "created_items": created_items,
+                "vim_details": None,
+            }
             self.logger.debug(
-                "task={} {} new-net={} created={}".format(task_id, ro_task["target_id"], vim_net_id, created))
+                "task={} {} new-net={} created={}".format(
+                    task_id, ro_task["target_id"], vim_net_id, created
+                )
+            )
+
             return "BUILD", ro_vim_item_update
         except (vimconn.VimConnException, NsWorkerException) as e:
-            self.logger.error("task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e))
-            ro_vim_item_update = {"vim_status": "VIM_ERROR",
-                                  "created": created,
-                                  "vim_details": str(e)}
+            self.logger.error(
+                "task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e)
+            )
+            ro_vim_item_update = {
+                "vim_status": "VIM_ERROR",
+                "created": created,
+                "vim_details": str(e),
+            }
+
             return "FAILED", ro_vim_item_update
 
     def refresh(self, ro_task):
         """Call VIM to get network status"""
         ro_task_id = ro_task["_id"]
         target_vim = self.my_vims[ro_task["target_id"]]
-
         vim_id = ro_task["vim_info"]["vim_id"]
         net_to_refresh_list = [vim_id]
+
         try:
             vim_dict = target_vim.refresh_nets_status(net_to_refresh_list)
             vim_info = vim_dict[vim_id]
+
             if vim_info["status"] == "ACTIVE":
                 task_status = "DONE"
             elif vim_info["status"] == "BUILD":
@@ -180,15 +228,21 @@ class VimInteractionNet(VimInteractionBase):
                 task_status = "FAILED"
         except vimconn.VimConnException as e:
             # Mark all tasks at VIM_ERROR status
-            self.logger.error("ro_task={} vim={} get-net={}: {}".format(ro_task_id, ro_task["target_id"], vim_id, e))
+            self.logger.error(
+                "ro_task={} vim={} get-net={}: {}".format(
+                    ro_task_id, ro_task["target_id"], vim_id, e
+                )
+            )
             vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
             task_status = "FAILED"
 
         ro_vim_item_update = {}
         if ro_task["vim_info"]["vim_status"] != vim_info["status"]:
             ro_vim_item_update["vim_status"] = vim_info["status"]
+
         if ro_task["vim_info"]["vim_name"] != vim_info.get("name"):
             ro_vim_item_update["vim_name"] = vim_info.get("name")
+
         if vim_info["status"] in ("ERROR", "VIM_ERROR"):
             if ro_task["vim_info"]["vim_details"] != vim_info.get("error_msg"):
                 ro_vim_item_update["vim_details"] = vim_info.get("error_msg")
@@ -198,43 +252,69 @@ class VimInteractionNet(VimInteractionBase):
         else:
             if ro_task["vim_info"]["vim_details"] != vim_info["vim_info"]:
                 ro_vim_item_update["vim_details"] = vim_info["vim_info"]
+
         if ro_vim_item_update:
-            self.logger.debug("ro_task={} {} get-net={}: status={} {}".format(
-                ro_task_id, ro_task["target_id"], vim_id, ro_vim_item_update.get("vim_status"),
-                ro_vim_item_update.get("vim_details") if ro_vim_item_update.get("vim_status") != "ACTIVE" else ''))
+            self.logger.debug(
+                "ro_task={} {} get-net={}: status={} {}".format(
+                    ro_task_id,
+                    ro_task["target_id"],
+                    vim_id,
+                    ro_vim_item_update.get("vim_status"),
+                    ro_vim_item_update.get("vim_details")
+                    if ro_vim_item_update.get("vim_status") != "ACTIVE"
+                    else "",
+                )
+            )
+
         return task_status, ro_vim_item_update
 
     def delete(self, ro_task, task_index):
         task = ro_task["tasks"][task_index]
         task_id = task["task_id"]
         net_vim_id = ro_task["vim_info"]["vim_id"]
-        ro_vim_item_update_ok = {"vim_status": "DELETED",
-                                 "created": False,
-                                 "vim_details": "DELETED",
-                                 "vim_id": None}
+        ro_vim_item_update_ok = {
+            "vim_status": "DELETED",
+            "created": False,
+            "vim_details": "DELETED",
+            "vim_id": None,
+        }
+
         try:
             if net_vim_id or ro_task["vim_info"]["created_items"]:
                 target_vim = self.my_vims[ro_task["target_id"]]
-                target_vim.delete_network(net_vim_id, ro_task["vim_info"]["created_items"])
-
+                target_vim.delete_network(
+                    net_vim_id, ro_task["vim_info"]["created_items"]
+                )
         except vimconn.VimConnNotFoundException:
             ro_vim_item_update_ok["vim_details"] = "already deleted"
-
         except vimconn.VimConnException as e:
-            self.logger.error("ro_task={} vim={} del-net={}: {}".format(ro_task["_id"], ro_task["target_id"],
-                                                                        net_vim_id, e))
-            ro_vim_item_update = {"vim_status": "VIM_ERROR",
-                                  "vim_details": "Error while deleting: {}".format(e)}
+            self.logger.error(
+                "ro_task={} vim={} del-net={}: {}".format(
+                    ro_task["_id"], ro_task["target_id"], net_vim_id, e
+                )
+            )
+            ro_vim_item_update = {
+                "vim_status": "VIM_ERROR",
+                "vim_details": "Error while deleting: {}".format(e),
+            }
+
             return "FAILED", ro_vim_item_update
 
-        self.logger.debug("task={} {} del-net={} {}".format(task_id, ro_task["target_id"], net_vim_id,
-                                                            ro_vim_item_update_ok.get("vim_details", "")))
+        self.logger.debug(
+            "task={} {} del-net={} {}".format(
+                task_id,
+                ro_task["target_id"],
+                net_vim_id,
+                ro_vim_item_update_ok.get("vim_details", ""),
+            )
+        )
+
         return "DONE", ro_vim_item_update_ok
 
 
 class VimInteractionVdu(VimInteractionBase):
-    max_retries_inject_ssh_key = 20    # 20 times
-    time_retries_inject_ssh_key = 30   # wevery 30 seconds
+    max_retries_inject_ssh_key = 20  # 20 times
+    time_retries_inject_ssh_key = 30  # wevery 30 seconds
 
     def new(self, ro_task, task_index, task_depends):
         task = ro_task["tasks"][task_index]
@@ -242,89 +322,127 @@ class VimInteractionVdu(VimInteractionBase):
         created = False
         created_items = {}
         target_vim = self.my_vims[ro_task["target_id"]]
+
         try:
             created = True
             params = task["params"]
             params_copy = deepcopy(params)
             net_list = params_copy["net_list"]
+
             for net in net_list:
-                if "net_id" in net and net["net_id"].startswith("TASK-"):  # change task_id into network_id
+                # change task_id into network_id
+                if "net_id" in net and net["net_id"].startswith("TASK-"):
                     network_id = task_depends[net["net_id"]]
+
                     if not network_id:
-                        raise NsWorkerException("Cannot create VM because depends on a network not created or found "
-                                                "for {}".format(net["net_id"]))
+                        raise NsWorkerException(
+                            "Cannot create VM because depends on a network not created or found "
+                            "for {}".format(net["net_id"])
+                        )
+
                     net["net_id"] = network_id
+
             if params_copy["image_id"].startswith("TASK-"):
                 params_copy["image_id"] = task_depends[params_copy["image_id"]]
+
             if params_copy["flavor_id"].startswith("TASK-"):
                 params_copy["flavor_id"] = task_depends[params_copy["flavor_id"]]
 
             vim_vm_id, created_items = target_vim.new_vminstance(**params_copy)
             interfaces = [iface["vim_id"] for iface in params_copy["net_list"]]
 
-            ro_vim_item_update = {"vim_id": vim_vm_id,
-                                  "vim_status": "BUILD",
-                                  "created": created,
-                                  "created_items": created_items,
-                                  "vim_details": None,
-                                  "interfaces_vim_ids": interfaces,
-                                  "interfaces": [],
-                                  }
+            ro_vim_item_update = {
+                "vim_id": vim_vm_id,
+                "vim_status": "BUILD",
+                "created": created,
+                "created_items": created_items,
+                "vim_details": None,
+                "interfaces_vim_ids": interfaces,
+                "interfaces": [],
+            }
             self.logger.debug(
-                "task={} {} new-vm={} created={}".format(task_id, ro_task["target_id"], vim_vm_id, created))
+                "task={} {} new-vm={} created={}".format(
+                    task_id, ro_task["target_id"], vim_vm_id, created
+                )
+            )
+
             return "BUILD", ro_vim_item_update
         except (vimconn.VimConnException, NsWorkerException) as e:
-            self.logger.error("task={} {} new-vm: {}".format(task_id, ro_task["target_id"], e))
-            ro_vim_item_update = {"vim_status": "VIM_ERROR",
-                                  "created": created,
-                                  "vim_details": str(e)}
+            self.logger.error(
+                "task={} {} new-vm: {}".format(task_id, ro_task["target_id"], e)
+            )
+            ro_vim_item_update = {
+                "vim_status": "VIM_ERROR",
+                "created": created,
+                "vim_details": str(e),
+            }
+
             return "FAILED", ro_vim_item_update
 
     def delete(self, ro_task, task_index):
         task = ro_task["tasks"][task_index]
         task_id = task["task_id"]
         vm_vim_id = ro_task["vim_info"]["vim_id"]
-        ro_vim_item_update_ok = {"vim_status": "DELETED",
-                                 "created": False,
-                                 "vim_details": "DELETED",
-                                 "vim_id": None}
+        ro_vim_item_update_ok = {
+            "vim_status": "DELETED",
+            "created": False,
+            "vim_details": "DELETED",
+            "vim_id": None,
+        }
+
         try:
             if vm_vim_id or ro_task["vim_info"]["created_items"]:
                 target_vim = self.my_vims[ro_task["target_id"]]
-                target_vim.delete_vminstance(vm_vim_id, ro_task["vim_info"]["created_items"])
-
+                target_vim.delete_vminstance(
+                    vm_vim_id, ro_task["vim_info"]["created_items"]
+                )
         except vimconn.VimConnNotFoundException:
             ro_vim_item_update_ok["vim_details"] = "already deleted"
-
         except vimconn.VimConnException as e:
-            self.logger.error("ro_task={} vim={} del-vm={}: {}".format(ro_task["_id"], ro_task["target_id"],
-                                                                       vm_vim_id, e))
-            ro_vim_item_update = {"vim_status": "VIM_ERROR",
-                                  "vim_details": "Error while deleting: {}".format(e)}
+            self.logger.error(
+                "ro_task={} vim={} del-vm={}: {}".format(
+                    ro_task["_id"], ro_task["target_id"], vm_vim_id, e
+                )
+            )
+            ro_vim_item_update = {
+                "vim_status": "VIM_ERROR",
+                "vim_details": "Error while deleting: {}".format(e),
+            }
+
             return "FAILED", ro_vim_item_update
 
-        self.logger.debug("task={} {} del-vm={} {}".format(task_id, ro_task["target_id"], vm_vim_id,
-                                                           ro_vim_item_update_ok.get("vim_details", "")))
+        self.logger.debug(
+            "task={} {} del-vm={} {}".format(
+                task_id,
+                ro_task["target_id"],
+                vm_vim_id,
+                ro_vim_item_update_ok.get("vim_details", ""),
+            )
+        )
+
         return "DONE", ro_vim_item_update_ok
 
     def refresh(self, ro_task):
         """Call VIM to get vm status"""
         ro_task_id = ro_task["_id"]
         target_vim = self.my_vims[ro_task["target_id"]]
-
         vim_id = ro_task["vim_info"]["vim_id"]
+
         if not vim_id:
             return None, None
+
         vm_to_refresh_list = [vim_id]
         try:
             vim_dict = target_vim.refresh_vms_status(vm_to_refresh_list)
             vim_info = vim_dict[vim_id]
+
             if vim_info["status"] == "ACTIVE":
                 task_status = "DONE"
             elif vim_info["status"] == "BUILD":
                 task_status = "BUILD"
             else:
                 task_status = "FAILED"
+
             # try to load and parse vim_information
             try:
                 vim_info_info = yaml.safe_load(vim_info["vim_info"])
@@ -334,34 +452,57 @@ class VimInteractionVdu(VimInteractionBase):
                 pass
         except vimconn.VimConnException as e:
             # Mark all tasks at VIM_ERROR status
-            self.logger.error("ro_task={} vim={} get-vm={}: {}".format(ro_task_id, ro_task["target_id"], vim_id, e))
+            self.logger.error(
+                "ro_task={} vim={} get-vm={}: {}".format(
+                    ro_task_id, ro_task["target_id"], vim_id, e
+                )
+            )
             vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
             task_status = "FAILED"
 
         ro_vim_item_update = {}
+
         # Interfaces cannot be present if e.g. VM is not present, that is status=DELETED
         vim_interfaces = []
         if vim_info.get("interfaces"):
             for vim_iface_id in ro_task["vim_info"]["interfaces_vim_ids"]:
-                iface = next((iface for iface in vim_info["interfaces"] if vim_iface_id == iface["vim_interface_id"]),
-                             None)
+                iface = next(
+                    (
+                        iface
+                        for iface in vim_info["interfaces"]
+                        if vim_iface_id == iface["vim_interface_id"]
+                    ),
+                    None,
+                )
                 # if iface:
                 #     iface.pop("vim_info", None)
                 vim_interfaces.append(iface)
 
-        task_create = next(t for t in ro_task["tasks"] if t and t["action"] == "CREATE" and t["status"] != "FINISHED")
+        task_create = next(
+            t
+            for t in ro_task["tasks"]
+            if t and t["action"] == "CREATE" and t["status"] != "FINISHED"
+        )
         if vim_interfaces and task_create.get("mgmt_vnf_interface") is not None:
-            vim_interfaces[task_create["mgmt_vnf_interface"]]["mgmt_vnf_interface"] = True
-        mgmt_vdu_iface = task_create.get("mgmt_vdu_interface", task_create.get("mgmt_vnf_interface", 0))
+            vim_interfaces[task_create["mgmt_vnf_interface"]][
+                "mgmt_vnf_interface"
+            ] = True
+
+        mgmt_vdu_iface = task_create.get(
+            "mgmt_vdu_interface", task_create.get("mgmt_vnf_interface", 0)
+        )
         if vim_interfaces:
             vim_interfaces[mgmt_vdu_iface]["mgmt_vdu_interface"] = True
 
         if ro_task["vim_info"]["interfaces"] != vim_interfaces:
             ro_vim_item_update["interfaces"] = vim_interfaces
+
         if ro_task["vim_info"]["vim_status"] != vim_info["status"]:
             ro_vim_item_update["vim_status"] = vim_info["status"]
+
         if ro_task["vim_info"]["vim_name"] != vim_info.get("name"):
             ro_vim_item_update["vim_name"] = vim_info.get("name")
+
         if vim_info["status"] in ("ERROR", "VIM_ERROR"):
             if ro_task["vim_info"]["vim_details"] != vim_info.get("error_msg"):
                 ro_vim_item_update["vim_details"] = vim_info.get("error_msg")
@@ -371,10 +512,20 @@ class VimInteractionVdu(VimInteractionBase):
         else:
             if ro_task["vim_info"]["vim_details"] != vim_info["vim_info"]:
                 ro_vim_item_update["vim_details"] = vim_info["vim_info"]
+
         if ro_vim_item_update:
-            self.logger.debug("ro_task={} {} get-vm={}: status={} {}".format(
-                ro_task_id, ro_task["target_id"], vim_id, ro_vim_item_update.get("vim_status"),
-                ro_vim_item_update.get("vim_details") if ro_vim_item_update.get("vim_status") != "ACTIVE" else ''))
+            self.logger.debug(
+                "ro_task={} {} get-vm={}: status={} {}".format(
+                    ro_task_id,
+                    ro_task["target_id"],
+                    vim_id,
+                    ro_vim_item_update.get("vim_status"),
+                    ro_vim_item_update.get("vim_details")
+                    if ro_vim_item_update.get("vim_status") != "ACTIVE"
+                    else "",
+                )
+            )
+
         return task_status, ro_vim_item_update
 
     def exec(self, ro_task, task_index, task_depends):
@@ -383,89 +534,142 @@ class VimInteractionVdu(VimInteractionBase):
         target_vim = self.my_vims[ro_task["target_id"]]
         db_task_update = {"retries": 0}
         retries = task.get("retries", 0)
+
         try:
             params = task["params"]
             params_copy = deepcopy(params)
-            params_copy["ro_key"] = self.db.decrypt(params_copy.pop("private_key"),
-                                                    params_copy.pop("schema_version"), params_copy.pop("salt"))
+            params_copy["ro_key"] = self.db.decrypt(
+                params_copy.pop("private_key"),
+                params_copy.pop("schema_version"),
+                params_copy.pop("salt"),
+            )
             params_copy["ip_addr"] = params_copy.pop("ip_address")
             target_vim.inject_user_key(**params_copy)
             self.logger.debug(
-                "task={} {} action-vm=inject_key".format(task_id, ro_task["target_id"]))
-            return "DONE", None, db_task_update,    # params_copy["key"]
+                "task={} {} action-vm=inject_key".format(task_id, ro_task["target_id"])
+            )
+
+            return (
+                "DONE",
+                None,
+                db_task_update,
+            )  # params_copy["key"]
         except (vimconn.VimConnException, NsWorkerException) as e:
             retries += 1
+
             if retries < self.max_retries_inject_ssh_key:
-                return "BUILD", None, {"retries": retries, "next_retry": self.time_retries_inject_ssh_key}
-            self.logger.error("task={} {} inject-ssh-key: {}".format(task_id, ro_task["target_id"], e))
+                return (
+                    "BUILD",
+                    None,
+                    {
+                        "retries": retries,
+                        "next_retry": self.time_retries_inject_ssh_key,
+                    },
+                )
+
+            self.logger.error(
+                "task={} {} inject-ssh-key: {}".format(task_id, ro_task["target_id"], e)
+            )
             ro_vim_item_update = {"vim_details": str(e)}
+
             return "FAILED", ro_vim_item_update, db_task_update
 
 
 class VimInteractionImage(VimInteractionBase):
-
     def new(self, ro_task, task_index, task_depends):
         task = ro_task["tasks"][task_index]
         task_id = task["task_id"]
         created = False
         created_items = {}
         target_vim = self.my_vims[ro_task["target_id"]]
+
         try:
             # FIND
             if task.get("find_params"):
                 vim_images = target_vim.get_image_list(**task["find_params"])
+
                 if not vim_images:
-                    raise NsWorkerExceptionNotFound("Image not found with this criteria: '{}'".format(
-                        task["find_params"]))
+                    raise NsWorkerExceptionNotFound(
+                        "Image not found with this criteria: '{}'".format(
+                            task["find_params"]
+                        )
+                    )
                 elif len(vim_images) > 1:
                     raise NsWorkerException(
-                        "More than one network found with this criteria: '{}'".format(task["find_params"]))
+                        "More than one network found with this criteria: '{}'".format(
+                            task["find_params"]
+                        )
+                    )
                 else:
                     vim_image_id = vim_images[0]["id"]
 
-            ro_vim_item_update = {"vim_id": vim_image_id,
-                                  "vim_status": "DONE",
-                                  "created": created,
-                                  "created_items": created_items,
-                                  "vim_details": None}
+            ro_vim_item_update = {
+                "vim_id": vim_image_id,
+                "vim_status": "DONE",
+                "created": created,
+                "created_items": created_items,
+                "vim_details": None,
+            }
             self.logger.debug(
-                "task={} {} new-image={} created={}".format(task_id, ro_task["target_id"], vim_image_id, created))
+                "task={} {} new-image={} created={}".format(
+                    task_id, ro_task["target_id"], vim_image_id, created
+                )
+            )
+
             return "DONE", ro_vim_item_update
         except (NsWorkerException, vimconn.VimConnException) as e:
-            self.logger.error("task={} {} new-image: {}".format(task_id, ro_task["target_id"], e))
-            ro_vim_item_update = {"vim_status": "VIM_ERROR",
-                                  "created": created,
-                                  "vim_details": str(e)}
+            self.logger.error(
+                "task={} {} new-image: {}".format(task_id, ro_task["target_id"], e)
+            )
+            ro_vim_item_update = {
+                "vim_status": "VIM_ERROR",
+                "created": created,
+                "vim_details": str(e),
+            }
+
             return "FAILED", ro_vim_item_update
 
 
 class VimInteractionFlavor(VimInteractionBase):
-
     def delete(self, ro_task, task_index):
         task = ro_task["tasks"][task_index]
         task_id = task["task_id"]
         flavor_vim_id = ro_task["vim_info"]["vim_id"]
-        ro_vim_item_update_ok = {"vim_status": "DELETED",
-                                 "created": False,
-                                 "vim_details": "DELETED",
-                                 "vim_id": None}
+        ro_vim_item_update_ok = {
+            "vim_status": "DELETED",
+            "created": False,
+            "vim_details": "DELETED",
+            "vim_id": None,
+        }
+
         try:
             if flavor_vim_id:
                 target_vim = self.my_vims[ro_task["target_id"]]
                 target_vim.delete_flavor(flavor_vim_id)
-
         except vimconn.VimConnNotFoundException:
             ro_vim_item_update_ok["vim_details"] = "already deleted"
-
         except vimconn.VimConnException as e:
-            self.logger.error("ro_task={} vim={} del-flavor={}: {}".format(
-                ro_task["_id"], ro_task["target_id"], flavor_vim_id, e))
-            ro_vim_item_update = {"vim_status": "VIM_ERROR",
-                                  "vim_details": "Error while deleting: {}".format(e)}
+            self.logger.error(
+                "ro_task={} vim={} del-flavor={}: {}".format(
+                    ro_task["_id"], ro_task["target_id"], flavor_vim_id, e
+                )
+            )
+            ro_vim_item_update = {
+                "vim_status": "VIM_ERROR",
+                "vim_details": "Error while deleting: {}".format(e),
+            }
+
             return "FAILED", ro_vim_item_update
 
-        self.logger.debug("task={} {} del-flavor={} {}".format(
-            task_id, ro_task["target_id"], flavor_vim_id, ro_vim_item_update_ok.get("vim_details", "")))
+        self.logger.debug(
+            "task={} {} del-flavor={} {}".format(
+                task_id,
+                ro_task["target_id"],
+                flavor_vim_id,
+                ro_vim_item_update_ok.get("vim_details", ""),
+            )
+        )
+
         return "DONE", ro_vim_item_update_ok
 
     def new(self, ro_task, task_index, task_depends):
@@ -474,9 +678,11 @@ class VimInteractionFlavor(VimInteractionBase):
         created = False
         created_items = {}
         target_vim = self.my_vims[ro_task["target_id"]]
+
         try:
             # FIND
             vim_flavor_id = None
+
             if task.get("find_params"):
                 try:
                     flavor_data = task["find_params"]["flavor_data"]
@@ -490,24 +696,34 @@ class VimInteractionFlavor(VimInteractionBase):
                 vim_flavor_id = target_vim.new_flavor(flavor_data)
                 created = True
 
-            ro_vim_item_update = {"vim_id": vim_flavor_id,
-                                  "vim_status": "DONE",
-                                  "created": created,
-                                  "created_items": created_items,
-                                  "vim_details": None}
+            ro_vim_item_update = {
+                "vim_id": vim_flavor_id,
+                "vim_status": "DONE",
+                "created": created,
+                "created_items": created_items,
+                "vim_details": None,
+            }
             self.logger.debug(
-                "task={} {} new-flavor={} created={}".format(task_id, ro_task["target_id"], vim_flavor_id, created))
+                "task={} {} new-flavor={} created={}".format(
+                    task_id, ro_task["target_id"], vim_flavor_id, created
+                )
+            )
+
             return "DONE", ro_vim_item_update
         except (vimconn.VimConnException, NsWorkerException) as e:
-            self.logger.error("task={} vim={} new-flavor: {}".format(task_id, ro_task["target_id"], e))
-            ro_vim_item_update = {"vim_status": "VIM_ERROR",
-                                  "created": created,
-                                  "vim_details": str(e)}
+            self.logger.error(
+                "task={} vim={} new-flavor: {}".format(task_id, ro_task["target_id"], e)
+            )
+            ro_vim_item_update = {
+                "vim_status": "VIM_ERROR",
+                "created": created,
+                "vim_details": str(e),
+            }
+
             return "FAILED", ro_vim_item_update
 
 
 class VimInteractionSdnNet(VimInteractionBase):
-
     @staticmethod
     def _match_pci(port_pci, mapping):
         """
@@ -527,21 +743,34 @@ class VimInteractionSdnNet(VimInteractionBase):
         pci_index = 0
         while True:
             bracket_start = mapping.find("[", mapping_index)
+
             if bracket_start == -1:
                 break
+
             bracket_end = mapping.find("]", bracket_start)
             if bracket_end == -1:
                 break
+
             length = bracket_start - mapping_index
-            if length and port_pci[pci_index:pci_index + length] != mapping[mapping_index:bracket_start]:
+            if (
+                length
+                and port_pci[pci_index : pci_index + length]
+                != mapping[mapping_index:bracket_start]
+            ):
                 return False
-            if port_pci[pci_index + length] not in mapping[bracket_start+1:bracket_end]:
+
+            if (
+                port_pci[pci_index + length]
+                not in mapping[bracket_start + 1 : bracket_end]
+            ):
                 return False
+
             pci_index += length + 1
             mapping_index = bracket_end + 1
 
         if port_pci[pci_index:] != mapping[mapping_index:]:
             return False
+
         return True
 
     def _get_interfaces(self, vlds_to_connect, vim_account_id):
@@ -551,35 +780,49 @@ class VimInteractionSdnNet(VimInteractionBase):
         :return:
         """
         interfaces = []
+
         for vld in vlds_to_connect:
             table, _, db_id = vld.partition(":")
             db_id, _, vld = db_id.partition(":")
             _, _, vld_id = vld.partition(".")
+
             if table == "vnfrs":
                 q_filter = {"vim-account-id": vim_account_id, "_id": db_id}
                 iface_key = "vnf-vld-id"
             else:  # table == "nsrs"
                 q_filter = {"vim-account-id": vim_account_id, "nsr-id-ref": db_id}
                 iface_key = "ns-vld-id"
+
             db_vnfrs = self.db.get_list("vnfrs", q_filter=q_filter)
+
             for db_vnfr in db_vnfrs:
                 for vdu_index, vdur in enumerate(db_vnfr.get("vdur", ())):
                     for iface_index, interface in enumerate(vdur["interfaces"]):
-                        if interface.get(iface_key) == vld_id and \
-                                interface.get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
+                        if interface.get(iface_key) == vld_id and interface.get(
+                            "type"
+                        ) in ("SR-IOV", "PCI-PASSTHROUGH"):
                             # only SR-IOV o PT
                             interface_ = interface.copy()
-                            interface_["id"] = "vnfrs:{}:vdu.{}.interfaces.{}".format(db_vnfr["_id"], vdu_index,
-                                                                                      iface_index)
+                            interface_["id"] = "vnfrs:{}:vdu.{}.interfaces.{}".format(
+                                db_vnfr["_id"], vdu_index, iface_index
+                            )
+
                             if vdur.get("status") == "ERROR":
                                 interface_["status"] = "ERROR"
+
                             interfaces.append(interface_)
+
         return interfaces
 
     def refresh(self, ro_task):
         # look for task create
-        task_create_index, _ = next(i_t for i_t in enumerate(ro_task["tasks"])
-                                    if i_t[1] and i_t[1]["action"] == "CREATE" and i_t[1]["status"] != "FINISHED")
+        task_create_index, _ = next(
+            i_t
+            for i_t in enumerate(ro_task["tasks"])
+            if i_t[1]
+            and i_t[1]["action"] == "CREATE"
+            and i_t[1]["status"] != "FINISHED"
+        )
 
         return self.new(ro_task, task_create_index, None)
 
@@ -600,17 +843,21 @@ class VimInteractionSdnNet(VimInteractionBase):
         created = ro_task["vim_info"].get("created", False)
 
         try:
-
             # CREATE
             params = task["params"]
             vlds_to_connect = params["vlds"]
             associated_vim = params["target_vim"]
-            additional_ports = params.get("sdn-ports") or ()  # external additional ports
+            # external additional ports
+            additional_ports = params.get("sdn-ports") or ()
             _, _, vim_account_id = associated_vim.partition(":")
+
             if associated_vim:
                 # get associated VIM
                 if associated_vim not in self.db_vims:
-                    self.db_vims[associated_vim] = self.db.get_one("vim_accounts", {"_id": vim_account_id})
+                    self.db_vims[associated_vim] = self.db.get_one(
+                        "vim_accounts", {"_id": vim_account_id}
+                    )
+
                 db_vim = self.db_vims[associated_vim]
 
             # look for ports to connect
@@ -621,8 +868,10 @@ class VimInteractionSdnNet(VimInteractionBase):
             pending_ports = error_ports = 0
             vlan_used = None
             sdn_need_update = False
+
             for port in ports:
                 vlan_used = port.get("vlan") or vlan_used
+
                 # TODO. Do not connect if already done
                 if not port.get("compute_node") or not port.get("pci"):
                     if port.get("status") == "ERROR":
@@ -630,33 +879,56 @@ class VimInteractionSdnNet(VimInteractionBase):
                     else:
                         pending_ports += 1
                     continue
+
                 pmap = None
-                compute_node_mappings = next((c for c in db_vim["config"].get("sdn-port-mapping", ())
-                                             if c and c["compute_node"] == port["compute_node"]), None)
+                compute_node_mappings = next(
+                    (
+                        c
+                        for c in db_vim["config"].get("sdn-port-mapping", ())
+                        if c and c["compute_node"] == port["compute_node"]
+                    ),
+                    None,
+                )
+
                 if compute_node_mappings:
                     # process port_mapping pci of type 0000:af:1[01].[1357]
-                    pmap = next((p for p in compute_node_mappings["ports"]
-                                 if self._match_pci(port["pci"], p.get("pci"))), None)
+                    pmap = next(
+                        (
+                            p
+                            for p in compute_node_mappings["ports"]
+                            if self._match_pci(port["pci"], p.get("pci"))
+                        ),
+                        None,
+                    )
+
                 if not pmap:
                     if not db_vim["config"].get("mapping_not_needed"):
-                        error_list.append("Port mapping not found for compute_node={} pci={}".format(
-                            port["compute_node"], port["pci"]))
+                        error_list.append(
+                            "Port mapping not found for compute_node={} pci={}".format(
+                                port["compute_node"], port["pci"]
+                            )
+                        )
                         continue
+
                     pmap = {}
 
                 service_endpoint_id = "{}:{}".format(port["compute_node"], port["pci"])
                 new_port = {
-                    "service_endpoint_id": pmap.get("service_endpoint_id") or service_endpoint_id,
-                    "service_endpoint_encapsulation_type": "dot1q" if port["type"] == "SR-IOV" else None,
+                    "service_endpoint_id": pmap.get("service_endpoint_id")
+                    or service_endpoint_id,
+                    "service_endpoint_encapsulation_type": "dot1q"
+                    if port["type"] == "SR-IOV"
+                    else None,
                     "service_endpoint_encapsulation_info": {
                         "vlan": port.get("vlan"),
                         "mac": port.get("mac_address"),
-                        "device_id": pmap.get("device_id") or port["compute_node"],  # device_id
-                        "device_interface_id": pmap.get("device_interface_id") or port["pci"],
+                        "device_id": pmap.get("device_id") or port["compute_node"],
+                        "device_interface_id": pmap.get("device_interface_id")
+                        or port["pci"],
                         "switch_dpid": pmap.get("switch_id") or pmap.get("switch_dpid"),
                         "switch_port": pmap.get("switch_port"),
                         "service_mapping_info": pmap.get("service_mapping_info"),
-                    }
+                    },
                 }
 
                 # TODO
@@ -666,109 +938,179 @@ class VimInteractionSdnNet(VimInteractionBase):
                 sdn_ports.append(new_port)
 
             if error_ports:
-                error_list.append("{} interfaces have not been created as VDU is on ERROR status".format(error_ports))
+                error_list.append(
+                    "{} interfaces have not been created as VDU is on ERROR status".format(
+                        error_ports
+                    )
+                )
 
             # connect external ports
             for index, additional_port in enumerate(additional_ports):
-                additional_port_id = additional_port.get("service_endpoint_id") or "external-{}".format(index)
-                sdn_ports.append({
-                    "service_endpoint_id": additional_port_id,
-                    "service_endpoint_encapsulation_type": additional_port.get("service_endpoint_encapsulation_type",
-                                                                               "dot1q"),
-                    "service_endpoint_encapsulation_info": {
-                        "vlan": additional_port.get("vlan") or vlan_used,
-                        "mac": additional_port.get("mac_address"),
-                        "device_id": additional_port.get("device_id"),
-                        "device_interface_id": additional_port.get("device_interface_id"),
-                        "switch_dpid": additional_port.get("switch_dpid") or additional_port.get("switch_id"),
-                        "switch_port": additional_port.get("switch_port"),
-                        "service_mapping_info": additional_port.get("service_mapping_info"),
-                    }})
+                additional_port_id = additional_port.get(
+                    "service_endpoint_id"
+                ) or "external-{}".format(index)
+                sdn_ports.append(
+                    {
+                        "service_endpoint_id": additional_port_id,
+                        "service_endpoint_encapsulation_type": additional_port.get(
+                            "service_endpoint_encapsulation_type", "dot1q"
+                        ),
+                        "service_endpoint_encapsulation_info": {
+                            "vlan": additional_port.get("vlan") or vlan_used,
+                            "mac": additional_port.get("mac_address"),
+                            "device_id": additional_port.get("device_id"),
+                            "device_interface_id": additional_port.get(
+                                "device_interface_id"
+                            ),
+                            "switch_dpid": additional_port.get("switch_dpid")
+                            or additional_port.get("switch_id"),
+                            "switch_port": additional_port.get("switch_port"),
+                            "service_mapping_info": additional_port.get(
+                                "service_mapping_info"
+                            ),
+                        },
+                    }
+                )
                 new_connected_ports.append(additional_port_id)
             sdn_info = ""
+
             # if there are more ports to connect or they have been modified, call create/update
             if error_list:
                 sdn_status = "ERROR"
                 sdn_info = "; ".join(error_list)
             elif set(connected_ports) != set(new_connected_ports) or sdn_need_update:
                 last_update = time.time()
+
                 if not sdn_net_id:
                     if len(sdn_ports) < 2:
                         sdn_status = "ACTIVE"
+
                         if not pending_ports:
-                            self.logger.debug("task={} {} new-sdn-net done, less than 2 ports".
-                                              format(task_id, ro_task["target_id"]))
+                            self.logger.debug(
+                                "task={} {} new-sdn-net done, less than 2 ports".format(
+                                    task_id, ro_task["target_id"]
+                                )
+                            )
                     else:
                         net_type = params.get("type") or "ELAN"
-                        sdn_net_id, created_items = target_vim.create_connectivity_service(
-                            net_type, sdn_ports)
+                        (
+                            sdn_net_id,
+                            created_items,
+                        ) = target_vim.create_connectivity_service(net_type, sdn_ports)
                         created = True
-                        self.logger.debug("task={} {} new-sdn-net={} created={}".
-                                          format(task_id, ro_task["target_id"], sdn_net_id, created))
+                        self.logger.debug(
+                            "task={} {} new-sdn-net={} created={}".format(
+                                task_id, ro_task["target_id"], sdn_net_id, created
+                            )
+                        )
                 else:
                     created_items = target_vim.edit_connectivity_service(
-                        sdn_net_id, conn_info=created_items, connection_points=sdn_ports)
+                        sdn_net_id, conn_info=created_items, connection_points=sdn_ports
+                    )
                     created = True
-                    self.logger.debug("task={} {} update-sdn-net={} created={}".
-                                      format(task_id, ro_task["target_id"], sdn_net_id, created))
+                    self.logger.debug(
+                        "task={} {} update-sdn-net={} created={}".format(
+                            task_id, ro_task["target_id"], sdn_net_id, created
+                        )
+                    )
+
                 connected_ports = new_connected_ports
             elif sdn_net_id:
-                wim_status_dict = target_vim.get_connectivity_service_status(sdn_net_id, conn_info=created_items)
+                wim_status_dict = target_vim.get_connectivity_service_status(
+                    sdn_net_id, conn_info=created_items
+                )
                 sdn_status = wim_status_dict["sdn_status"]
+
                 if wim_status_dict.get("sdn_info"):
                     sdn_info = str(wim_status_dict.get("sdn_info")) or ""
+
                 if wim_status_dict.get("error_msg"):
                     sdn_info = wim_status_dict.get("error_msg") or ""
 
             if pending_ports:
                 if sdn_status != "ERROR":
                     sdn_info = "Waiting for getting interfaces location from VIM. Obtained '{}' of {}".format(
-                        len(ports)-pending_ports, len(ports))
+                        len(ports) - pending_ports, len(ports)
+                    )
+
                 if sdn_status == "ACTIVE":
                     sdn_status = "BUILD"
 
-            ro_vim_item_update = {"vim_id": sdn_net_id,
-                                  "vim_status": sdn_status,
-                                  "created": created,
-                                  "created_items": created_items,
-                                  "connected_ports": connected_ports,
-                                  "vim_details": sdn_info,
-                                  "last_update": last_update}
+            ro_vim_item_update = {
+                "vim_id": sdn_net_id,
+                "vim_status": sdn_status,
+                "created": created,
+                "created_items": created_items,
+                "connected_ports": connected_ports,
+                "vim_details": sdn_info,
+                "last_update": last_update,
+            }
+
             return sdn_status, ro_vim_item_update
         except Exception as e:
-            self.logger.error("task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e),
-                              exc_info=not isinstance(e, (sdnconn.SdnConnectorError, vimconn.VimConnException)))
-            ro_vim_item_update = {"vim_status": "VIM_ERROR",
-                                  "created": created,
-                                  "vim_details": str(e)}
+            self.logger.error(
+                "task={} vim={} new-net: {}".format(task_id, ro_task["target_id"], e),
+                exc_info=not isinstance(
+                    e, (sdnconn.SdnConnectorError, vimconn.VimConnException)
+                ),
+            )
+            ro_vim_item_update = {
+                "vim_status": "VIM_ERROR",
+                "created": created,
+                "vim_details": str(e),
+            }
+
             return "FAILED", ro_vim_item_update
 
     def delete(self, ro_task, task_index):
         task = ro_task["tasks"][task_index]
         task_id = task["task_id"]
         sdn_vim_id = ro_task["vim_info"].get("vim_id")
-        ro_vim_item_update_ok = {"vim_status": "DELETED",
-                                 "created": False,
-                                 "vim_details": "DELETED",
-                                 "vim_id": None}
+        ro_vim_item_update_ok = {
+            "vim_status": "DELETED",
+            "created": False,
+            "vim_details": "DELETED",
+            "vim_id": None,
+        }
+
         try:
             if sdn_vim_id:
                 target_vim = self.my_vims[ro_task["target_id"]]
-                target_vim.delete_connectivity_service(sdn_vim_id, ro_task["vim_info"].get("created_items"))
+                target_vim.delete_connectivity_service(
+                    sdn_vim_id, ro_task["vim_info"].get("created_items")
+                )
 
         except Exception as e:
-            if isinstance(e, sdnconn.SdnConnectorError) and e.http_code == HTTPStatus.NOT_FOUND.value:
+            if (
+                isinstance(e, sdnconn.SdnConnectorError)
+                and e.http_code == HTTPStatus.NOT_FOUND.value
+            ):
                 ro_vim_item_update_ok["vim_details"] = "already deleted"
             else:
-                self.logger.error("ro_task={} vim={} del-sdn-net={}: {}".format(ro_task["_id"], ro_task["target_id"],
-                                                                                sdn_vim_id, e),
-                                  exc_info=not isinstance(e, (sdnconn.SdnConnectorError, vimconn.VimConnException)))
-                ro_vim_item_update = {"vim_status": "VIM_ERROR",
-                                      "vim_details": "Error while deleting: {}".format(e)}
+                self.logger.error(
+                    "ro_task={} vim={} del-sdn-net={}: {}".format(
+                        ro_task["_id"], ro_task["target_id"], sdn_vim_id, e
+                    ),
+                    exc_info=not isinstance(
+                        e, (sdnconn.SdnConnectorError, vimconn.VimConnException)
+                    ),
+                )
+                ro_vim_item_update = {
+                    "vim_status": "VIM_ERROR",
+                    "vim_details": "Error while deleting: {}".format(e),
+                }
+
                 return "FAILED", ro_vim_item_update
 
-        self.logger.debug("task={} {} del-sdn-net={} {}".format(task_id, ro_task["target_id"], sdn_vim_id,
-                                                                ro_vim_item_update_ok.get("vim_details", "")))
+        self.logger.debug(
+            "task={} {} del-sdn-net={} {}".format(
+                task_id,
+                ro_task["target_id"],
+                sdn_vim_id,
+                ro_vim_item_update_ok.get("vim_details", ""),
+            )
+        )
+
         return "DONE", ro_vim_item_update_ok
 
 
@@ -793,24 +1135,35 @@ class NsWorker(threading.Thread):
         self.config = config
         self.plugins = plugins
         self.plugin_name = "unknown"
-        self.logger = logging.getLogger('ro.worker{}'.format(worker_index))
+        self.logger = logging.getLogger("ro.worker{}".format(worker_index))
         self.worker_index = worker_index
         self.task_queue = queue.Queue(self.QUEUE_SIZE)
-        self.my_vims = {}   # targetvim: vimplugin class
-        self.db_vims = {}   # targetvim: vim information from database
-        self.vim_targets = []   # targetvim list
+        # targetvim: vimplugin class
+        self.my_vims = {}
+        # targetvim: vim information from database
+        self.db_vims = {}
+        # targetvim list
+        self.vim_targets = []
         self.my_id = config["process_id"] + ":" + str(worker_index)
         self.db = db
         self.item2class = {
             "net": VimInteractionNet(self.db, self.my_vims, self.db_vims, self.logger),
             "vdu": VimInteractionVdu(self.db, self.my_vims, self.db_vims, self.logger),
-            "image": VimInteractionImage(self.db, self.my_vims, self.db_vims, self.logger),
-            "flavor": VimInteractionFlavor(self.db, self.my_vims, self.db_vims, self.logger),
-            "sdn_net": VimInteractionSdnNet(self.db, self.my_vims, self.db_vims, self.logger),
+            "image": VimInteractionImage(
+                self.db, self.my_vims, self.db_vims, self.logger
+            ),
+            "flavor": VimInteractionFlavor(
+                self.db, self.my_vims, self.db_vims, self.logger
+            ),
+            "sdn_net": VimInteractionSdnNet(
+                self.db, self.my_vims, self.db_vims, self.logger
+            ),
         }
         self.time_last_task_processed = None
-        self.tasks_to_delete = []  # lists of tasks to delete because nsrs or vnfrs has been deleted from db
-        self.idle = True  # it is idle when there are not vim_targets associated
+        # lists of tasks to delete because nsrs or vnfrs has been deleted from db
+        self.tasks_to_delete = []
+        # it is idle when there are not vim_targets associated
+        self.idle = True
         self.task_locked_time = config["global"]["task_locked_time"]
 
     def insert_task(self, task):
@@ -841,37 +1194,51 @@ class NsWorker(threading.Thread):
         """
         if not db_vim.get("config"):
             return
+
         file_name = ""
+
         try:
             if db_vim["config"].get("ca_cert_content"):
                 file_name = "{}:{}".format(target_id, self.worker_index)
+
                 try:
                     mkdir(file_name)
                 except FileExistsError:
                     pass
+
                 file_name = file_name + "/ca_cert"
+
                 with open(file_name, "w") as f:
                     f.write(db_vim["config"]["ca_cert_content"])
                     del db_vim["config"]["ca_cert_content"]
                     db_vim["config"]["ca_cert"] = file_name
         except Exception as e:
-            raise NsWorkerException("Error writing to file '{}': {}".format(file_name, e))
+            raise NsWorkerException(
+                "Error writing to file '{}': {}".format(file_name, e)
+            )
 
     def _load_plugin(self, name, type="vim"):
         # type can be vim or sdn
         if "rovim_dummy" not in self.plugins:
             self.plugins["rovim_dummy"] = VimDummyConnector
+
         if "rosdn_dummy" not in self.plugins:
             self.plugins["rosdn_dummy"] = SdnDummyConnector
+
         if name in self.plugins:
             return self.plugins[name]
+
         try:
-            for v in iter_entry_points('osm_ro{}.plugins'.format(type), name):
+            for v in iter_entry_points("osm_ro{}.plugins".format(type), name):
                 self.plugins[name] = v.load()
         except Exception as e:
             raise NsWorkerException("Cannot load plugin osm_{}: {}".format(name, e))
+
         if name and name not in self.plugins:
-            raise NsWorkerException("Plugin 'osm_{n}' has not been installed".format(n=name))
+            raise NsWorkerException(
+                "Plugin 'osm_{n}' has not been installed".format(n=name)
+            )
+
         return self.plugins[name]
 
     def _unload_vim(self, target_id):
@@ -883,8 +1250,10 @@ class NsWorker(threading.Thread):
         try:
             self.db_vims.pop(target_id, None)
             self.my_vims.pop(target_id, None)
+
             if target_id in self.vim_targets:
                 self.vim_targets.remove(target_id)
+
             self.logger.info("Unloaded {}".format(target_id))
             rmtree("{}:{}".format(target_id, self.worker_index))
         except FileNotFoundError:
@@ -905,42 +1274,66 @@ class NsWorker(threading.Thread):
         op_text = ""
         step = ""
         loaded = target_id in self.vim_targets
-        target_database = "vim_accounts" if target == "vim" else "wim_accounts" if target == "wim" else "sdns"
+        target_database = (
+            "vim_accounts"
+            if target == "vim"
+            else "wim_accounts"
+            if target == "wim"
+            else "sdns"
+        )
+
         try:
             step = "Getting {} from db".format(target_id)
             db_vim = self.db.get_one(target_database, {"_id": _id})
-            for op_index, operation in enumerate(db_vim["_admin"].get("operations", ())):
+
+            for op_index, operation in enumerate(
+                db_vim["_admin"].get("operations", ())
+            ):
                 if operation["operationState"] != "PROCESSING":
                     continue
+
                 locked_at = operation.get("locked_at")
+
                 if locked_at is not None and locked_at >= now - self.task_locked_time:
                     # some other thread is doing this operation
                     return
+
                 # lock
                 op_text = "_admin.operations.{}.".format(op_index)
-                if not self.db.set_one(target_database,
-                                       q_filter={"_id": _id,
-                                                 op_text + "operationState": "PROCESSING",
-                                                 op_text + "locked_at": locked_at
-                                                 },
-                                       update_dict={op_text + "locked_at": now,
-                                                    "admin.current_operation": op_index},
-                                       fail_on_empty=False):
+
+                if not self.db.set_one(
+                    target_database,
+                    q_filter={
+                        "_id": _id,
+                        op_text + "operationState": "PROCESSING",
+                        op_text + "locked_at": locked_at,
+                    },
+                    update_dict={
+                        op_text + "locked_at": now,
+                        "admin.current_operation": op_index,
+                    },
+                    fail_on_empty=False,
+                ):
                     return
+
                 unset_dict[op_text + "locked_at"] = None
                 unset_dict["current_operation"] = None
                 step = "Loading " + target_id
                 error_text = self._load_vim(target_id)
+
                 if not error_text:
                     step = "Checking connectivity"
-                    if target == 'vim':
+
+                    if target == "vim":
                         self.my_vims[target_id].check_vim_connectivity()
                     else:
                         self.my_vims[target_id].check_credentials()
+
                 update_dict["_admin.operationalState"] = "ENABLED"
                 update_dict["_admin.detailed-status"] = ""
                 unset_dict[op_text + "detailed-status"] = None
                 update_dict[op_text + "operationState"] = "COMPLETED"
+
                 return
 
         except Exception as e:
@@ -955,10 +1348,18 @@ class NsWorker(threading.Thread):
                     unset_dict.pop(op_text + "detailed-status", None)
                     update_dict["_admin.operationalState"] = "ERROR"
                     update_dict["_admin.detailed-status"] = error_text
+
                 if op_text:
                     update_dict[op_text + "statusEnteredTime"] = now
-                self.db.set_one(target_database, q_filter={"_id": _id}, update_dict=update_dict, unset=unset_dict,
-                                fail_on_empty=False)
+
+                self.db.set_one(
+                    target_database,
+                    q_filter={"_id": _id},
+                    update_dict=update_dict,
+                    unset=unset_dict,
+                    fail_on_empty=False,
+                )
+
             if not loaded:
                 self._unload_vim(target_id)
 
@@ -980,9 +1381,16 @@ class NsWorker(threading.Thread):
         :return: None if ok, descriptive text if error
         """
         target, _, _id = target_id.partition(":")
-        target_database = "vim_accounts" if target == "vim" else "wim_accounts" if target == "wim" else "sdns"
+        target_database = (
+            "vim_accounts"
+            if target == "vim"
+            else "wim_accounts"
+            if target == "wim"
+            else "sdns"
+        )
         plugin_name = ""
         vim = None
+
         try:
             step = "Getting {}={} from db".format(target, _id)
             # TODO process for wim, sdnc, ...
@@ -994,20 +1402,31 @@ class NsWorker(threading.Thread):
 
             step = "Decrypting password"
             schema_version = vim.get("schema_version")
-            self.db.encrypt_decrypt_fields(vim, "decrypt", fields=('password', 'secret'),
-                                           schema_version=schema_version, salt=_id)
+            self.db.encrypt_decrypt_fields(
+                vim,
+                "decrypt",
+                fields=("password", "secret"),
+                schema_version=schema_version,
+                salt=_id,
+            )
             self._process_vim_config(target_id, vim)
+
             if target == "vim":
                 plugin_name = "rovim_" + vim["vim_type"]
                 step = "Loading plugin '{}'".format(plugin_name)
                 vim_module_conn = self._load_plugin(plugin_name)
                 step = "Loading {}'".format(target_id)
                 self.my_vims[target_id] = vim_module_conn(
-                    uuid=vim['_id'], name=vim['name'],
-                    tenant_id=vim.get('vim_tenant_id'), tenant_name=vim.get('vim_tenant_name'),
-                    url=vim['vim_url'], url_admin=None,
-                    user=vim['vim_user'], passwd=vim['vim_password'],
-                    config=vim.get('config') or {}, persistent_info={}
+                    uuid=vim["_id"],
+                    name=vim["name"],
+                    tenant_id=vim.get("vim_tenant_id"),
+                    tenant_name=vim.get("vim_tenant_name"),
+                    url=vim["vim_url"],
+                    url_admin=None,
+                    user=vim["vim_user"],
+                    passwd=vim["vim_password"],
+                    config=vim.get("config") or {},
+                    persistent_info={},
                 )
             else:  # sdn
                 plugin_name = "rosdn_" + vim["type"]
@@ -1018,20 +1437,32 @@ class NsWorker(threading.Thread):
                 wim_config = wim.pop("config", {}) or {}
                 wim["uuid"] = wim["_id"]
                 wim["wim_url"] = wim["url"]
+
                 if wim.get("dpid"):
                     wim_config["dpid"] = wim.pop("dpid")
+
                 if wim.get("switch_id"):
                     wim_config["switch_id"] = wim.pop("switch_id")
-                self.my_vims[target_id] = vim_module_conn(wim, wim, wim_config)  # wim, wim_account, config
+
+                # wim, wim_account, config
+                self.my_vims[target_id] = vim_module_conn(wim, wim, wim_config)
             self.db_vims[target_id] = vim
             self.error_status = None
-            self.logger.info("Connector loaded for {}, plugin={}".format(target_id, plugin_name))
+
+            self.logger.info(
+                "Connector loaded for {}, plugin={}".format(target_id, plugin_name)
+            )
         except Exception as e:
-            self.logger.error("Cannot load {} plugin={}: {} {}".format(
-                target_id, plugin_name, step, e))
+            self.logger.error(
+                "Cannot load {} plugin={}: {} {}".format(
+                    target_id, plugin_name, step, e
+                )
+            )
+
             self.db_vims[target_id] = vim or {}
             self.db_vims[target_id] = FailingConnector(str(e))
             error_status = "{} Error: {}".format(step, e)
+
             return error_status
         finally:
             if target_id not in self.vim_targets:
@@ -1043,26 +1474,36 @@ class NsWorker(threading.Thread):
         :return: None
         """
         now = time.time()
+
         if not self.time_last_task_processed:
             self.time_last_task_processed = now
+
         try:
             while True:
                 locked = self.db.set_one(
                     "ro_tasks",
-                    q_filter={"target_id": self.vim_targets,
-                              "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
-                              "locked_at.lt": now - self.task_locked_time,
-                              "to_check_at.lt": self.time_last_task_processed},
+                    q_filter={
+                        "target_id": self.vim_targets,
+                        "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"],
+                        "locked_at.lt": now - self.task_locked_time,
+                        "to_check_at.lt": self.time_last_task_processed,
+                    },
                     update_dict={"locked_by": self.my_id, "locked_at": now},
-                    fail_on_empty=False)
+                    fail_on_empty=False,
+                )
+
                 if locked:
                     # read and return
                     ro_task = self.db.get_one(
                         "ro_tasks",
-                        q_filter={"target_id": self.vim_targets,
-                                  "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
-                                  "locked_at": now})
+                        q_filter={
+                            "target_id": self.vim_targets,
+                            "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"],
+                            "locked_at": now,
+                        },
+                    )
                     return ro_task
+
                 if self.time_last_task_processed == now:
                     self.time_last_task_processed = None
                     return None
@@ -1073,7 +1514,10 @@ class NsWorker(threading.Thread):
         except DbException as e:
             self.logger.error("Database exception at _get_db_task: {}".format(e))
         except Exception as e:
-            self.logger.critical("Unexpected exception at _get_db_task: {}".format(e), exc_info=True)
+            self.logger.critical(
+                "Unexpected exception at _get_db_task: {}".format(e), exc_info=True
+            )
+
         return None
 
     def _delete_task(self, ro_task, task_index, task_depends, db_update):
@@ -1083,26 +1527,45 @@ class NsWorker(threading.Thread):
         """
         my_task = ro_task["tasks"][task_index]
         task_id = my_task["task_id"]
-        needed_delete = ro_task["vim_info"]["created"] or ro_task["vim_info"].get("created_items", False)
+        needed_delete = ro_task["vim_info"]["created"] or ro_task["vim_info"].get(
+            "created_items", False
+        )
+
         if my_task["status"] == "FAILED":
             return None, None  # TODO need to be retry??
+
         try:
             for index, task in enumerate(ro_task["tasks"]):
                 if index == task_index or not task:
                     continue  # own task
-                if my_task["target_record"] == task["target_record"] and task["action"] == "CREATE":
+
+                if (
+                    my_task["target_record"] == task["target_record"]
+                    and task["action"] == "CREATE"
+                ):
                     # set to finished
-                    db_update["tasks.{}.status".format(index)] = task["status"] = "FINISHED"
-                elif task["action"] == "CREATE" and task["status"] not in ("FINISHED", "SUPERSEDED"):
+                    db_update["tasks.{}.status".format(index)] = task[
+                        "status"
+                    ] = "FINISHED"
+                elif task["action"] == "CREATE" and task["status"] not in (
+                    "FINISHED",
+                    "SUPERSEDED",
+                ):
                     needed_delete = False
+
             if needed_delete:
                 return self.item2class[my_task["item"]].delete(ro_task, task_index)
             else:
                 return "SUPERSEDED", None
         except Exception as e:
             if not isinstance(e, NsWorkerException):
-                self.logger.critical("Unexpected exception at _delete_task task={}: {}".format(task_id, e),
-                                     exc_info=True)
+                self.logger.critical(
+                    "Unexpected exception at _delete_task task={}: {}".format(
+                        task_id, e
+                    ),
+                    exc_info=True,
+                )
+
             return "FAILED", {"vim_status": "VIM_ERROR", "vim_details": str(e)}
 
     def _create_task(self, ro_task, task_index, task_depends, db_update):
@@ -1113,6 +1576,7 @@ class NsWorker(threading.Thread):
         my_task = ro_task["tasks"][task_index]
         task_id = my_task["task_id"]
         task_status = None
+
         if my_task["status"] == "FAILED":
             return None, None  # TODO need to be retry??
         elif my_task["status"] == "SCHEDULED":
@@ -1120,19 +1584,29 @@ class NsWorker(threading.Thread):
             for index, task in enumerate(ro_task["tasks"]):
                 if index == task_index or not task:
                     continue  # own task
-                if task["action"] == "CREATE" and task["status"] not in ("SCHEDULED", "FINISHED", "SUPERSEDED"):
+
+                if task["action"] == "CREATE" and task["status"] not in (
+                    "SCHEDULED",
+                    "FINISHED",
+                    "SUPERSEDED",
+                ):
                     return task["status"], "COPY_VIM_INFO"
 
             try:
                 task_status, ro_vim_item_update = self.item2class[my_task["item"]].new(
-                    ro_task, task_index, task_depends)
+                    ro_task, task_index, task_depends
+                )
                 # TODO update other CREATE tasks
             except Exception as e:
                 if not isinstance(e, NsWorkerException):
-                    self.logger.error("Error executing task={}: {}".format(task_id, e), exc_info=True)
+                    self.logger.error(
+                        "Error executing task={}: {}".format(task_id, e), exc_info=True
+                    )
+
                 task_status = "FAILED"
                 ro_vim_item_update = {"vim_status": "VIM_ERROR", "vim_details": str(e)}
                 # TODO update    ro_vim_item_update
+
             return task_status, ro_vim_item_update
         else:
             return None, None
@@ -1148,16 +1622,20 @@ class NsWorker(threading.Thread):
         :param target_id:
         :return: database ro_task plus index of task
         """
-        if task_id.startswith("vim:") or task_id.startswith("sdn:") or task_id.startswith("wim:"):
+        if (
+            task_id.startswith("vim:")
+            or task_id.startswith("sdn:")
+            or task_id.startswith("wim:")
+        ):
             target_id, _, task_id = task_id.partition(" ")
 
         if task_id.startswith("nsrs:") or task_id.startswith("vnfrs:"):
             ro_task_dependency = self.db.get_one(
                 "ro_tasks",
-                q_filter={"target_id": target_id,
-                          "tasks.target_record_id": task_id
-                          },
-                fail_on_empty=False)
+                q_filter={"target_id": target_id, "tasks.target_record_id": task_id},
+                fail_on_empty=False,
+            )
+
             if ro_task_dependency:
                 for task_index, task in enumerate(ro_task_dependency["tasks"]):
                     if task["target_record_id"] == task_id:
@@ -1168,12 +1646,16 @@ class NsWorker(threading.Thread):
                 for task_index, task in enumerate(ro_task["tasks"]):
                     if task and task["task_id"] == task_id:
                         return ro_task, task_index
+
             ro_task_dependency = self.db.get_one(
                 "ro_tasks",
-                q_filter={"tasks.ANYINDEX.task_id": task_id,
-                          "tasks.ANYINDEX.target_record.ne": None
-                          },
-                fail_on_empty=False)
+                q_filter={
+                    "tasks.ANYINDEX.task_id": task_id,
+                    "tasks.ANYINDEX.target_record.ne": None,
+                },
+                fail_on_empty=False,
+            )
+
             if ro_task_dependency:
                 for task_index, task in ro_task_dependency["tasks"]:
                     if task["task_id"] == task_id:
@@ -1183,7 +1665,8 @@ class NsWorker(threading.Thread):
     def _process_pending_tasks(self, ro_task):
         ro_task_id = ro_task["_id"]
         now = time.time()
-        next_check_at = now + (24*60*60)   # one day
+        # one day
+        next_check_at = now + (24 * 60 * 60)
         db_ro_task_update = {}
 
         def _update_refresh(new_status):
@@ -1194,6 +1677,7 @@ class NsWorker(threading.Thread):
             nonlocal ro_task
 
             next_refresh = time.time()
+
             if task["item"] in ("image", "flavor"):
                 next_refresh += self.REFRESH_IMAGE
             elif new_status == "BUILD":
@@ -1202,6 +1686,7 @@ class NsWorker(threading.Thread):
                 next_refresh += self.REFRESH_ACTIVE
             else:
                 next_refresh += self.REFRESH_ERROR
+
             next_check_at = min(next_check_at, next_refresh)
             db_ro_task_update["vim_info.refresh_at"] = next_refresh
             ro_task["vim_info"]["refresh_at"] = next_refresh
@@ -1210,72 +1695,138 @@ class NsWorker(threading.Thread):
             # 0: get task_status_create
             lock_object = None
             task_status_create = None
-            task_create = next((t for t in ro_task["tasks"] if t and t["action"] == "CREATE" and
-                                t["status"] in ("BUILD", "DONE")), None)
+            task_create = next(
+                (
+                    t
+                    for t in ro_task["tasks"]
+                    if t
+                    and t["action"] == "CREATE"
+                    and t["status"] in ("BUILD", "DONE")
+                ),
+                None,
+            )
+
             if task_create:
                 task_status_create = task_create["status"]
+
             # 1: look for tasks in status SCHEDULED, or in status CREATE if action is  DONE or BUILD
             for task_action in ("DELETE", "CREATE", "EXEC"):
                 db_vim_update = None
                 new_status = None
+
                 for task_index, task in enumerate(ro_task["tasks"]):
                     if not task:
                         continue  # task deleted
+
                     task_depends = {}
                     target_update = None
-                    if (task_action in ("DELETE", "EXEC") and task["status"] not in ("SCHEDULED", "BUILD")) or \
-                            task["action"] != task_action or \
-                            (task_action == "CREATE" and task["status"] in ("FINISHED", "SUPERSEDED")):
+
+                    if (
+                        (
+                            task_action in ("DELETE", "EXEC")
+                            and task["status"] not in ("SCHEDULED", "BUILD")
+                        )
+                        or task["action"] != task_action
+                        or (
+                            task_action == "CREATE"
+                            and task["status"] in ("FINISHED", "SUPERSEDED")
+                        )
+                    ):
                         continue
+
                     task_path = "tasks.{}.status".format(task_index)
                     try:
                         db_vim_info_update = None
+
                         if task["status"] == "SCHEDULED":
                             # check if tasks that this depends on have been completed
                             dependency_not_completed = False
-                            for dependency_task_id in (task.get("depends_on") or ()):
-                                dependency_ro_task, dependency_task_index = \
-                                    self._get_dependency(dependency_task_id, target_id=ro_task["target_id"])
-                                dependency_task = dependency_ro_task["tasks"][dependency_task_index]
+
+                            for dependency_task_id in task.get("depends_on") or ():
+                                (
+                                    dependency_ro_task,
+                                    dependency_task_index,
+                                ) = self._get_dependency(
+                                    dependency_task_id, target_id=ro_task["target_id"]
+                                )
+                                dependency_task = dependency_ro_task["tasks"][
+                                    dependency_task_index
+                                ]
+
                                 if dependency_task["status"] == "SCHEDULED":
                                     dependency_not_completed = True
-                                    next_check_at = min(next_check_at, dependency_ro_task["to_check_at"])
+                                    next_check_at = min(
+                                        next_check_at, dependency_ro_task["to_check_at"]
+                                    )
                                     break
                                 elif dependency_task["status"] == "FAILED":
                                     error_text = "Cannot {} {} because depends on failed {} {} id={}): {}".format(
-                                        task["action"], task["item"], dependency_task["action"],
-                                        dependency_task["item"], dependency_task_id,
-                                        dependency_ro_task["vim_info"].get("vim_details"))
-                                    self.logger.error("task={} {}".format(task["task_id"], error_text))
+                                        task["action"],
+                                        task["item"],
+                                        dependency_task["action"],
+                                        dependency_task["item"],
+                                        dependency_task_id,
+                                        dependency_ro_task["vim_info"].get(
+                                            "vim_details"
+                                        ),
+                                    )
+                                    self.logger.error(
+                                        "task={} {}".format(task["task_id"], error_text)
+                                    )
                                     raise NsWorkerException(error_text)
 
-                                task_depends[dependency_task_id] = dependency_ro_task["vim_info"]["vim_id"]
-                                task_depends["TASK-{}".format(dependency_task_id)] = \
-                                    dependency_ro_task["vim_info"]["vim_id"]
+                                task_depends[dependency_task_id] = dependency_ro_task[
+                                    "vim_info"
+                                ]["vim_id"]
+                                task_depends[
+                                    "TASK-{}".format(dependency_task_id)
+                                ] = dependency_ro_task["vim_info"]["vim_id"]
+
                             if dependency_not_completed:
                                 # TODO set at vim_info.vim_details that it is waiting
                                 continue
+
                         # before calling VIM-plugin as it can take more than task_locked_time, insert to LockRenew
                         # the task of renew this locking. It will update database locket_at periodically
                         if not lock_object:
-                            lock_object = LockRenew.add_lock_object("ro_tasks", ro_task, self)
+                            lock_object = LockRenew.add_lock_object(
+                                "ro_tasks", ro_task, self
+                            )
+
                         if task["action"] == "DELETE":
-                            new_status, db_vim_info_update = self._delete_task(ro_task, task_index,
-                                                                               task_depends, db_ro_task_update)
-                            new_status = "FINISHED" if new_status == "DONE" else new_status
+                            (new_status, db_vim_info_update,) = self._delete_task(
+                                ro_task, task_index, task_depends, db_ro_task_update
+                            )
+                            new_status = (
+                                "FINISHED" if new_status == "DONE" else new_status
+                            )
                             # ^with FINISHED instead of DONE it will not be refreshing
+
                             if new_status in ("FINISHED", "SUPERSEDED"):
                                 target_update = "DELETE"
                         elif task["action"] == "EXEC":
-                            new_status, db_vim_info_update, db_task_update = self.item2class[task["item"]].exec(
-                                ro_task, task_index, task_depends)
-                            new_status = "FINISHED" if new_status == "DONE" else new_status
+                            (
+                                new_status,
+                                db_vim_info_update,
+                                db_task_update,
+                            ) = self.item2class[task["item"]].exec(
+                                ro_task, task_index, task_depends
+                            )
+                            new_status = (
+                                "FINISHED" if new_status == "DONE" else new_status
+                            )
                             # ^with FINISHED instead of DONE it will not be refreshing
+
                             if db_task_update:
                                 # load into database the modified db_task_update "retries" and "next_retry"
                                 if db_task_update.get("retries"):
-                                    db_ro_task_update["tasks.{}.retries".format(task_index)] = db_task_update["retries"]
-                                next_check_at = time.time() + db_task_update.get("next_retry", 60)
+                                    db_ro_task_update[
+                                        "tasks.{}.retries".format(task_index)
+                                    ] = db_task_update["retries"]
+
+                                next_check_at = time.time() + db_task_update.get(
+                                    "next_retry", 60
+                                )
                             target_update = None
                         elif task["action"] == "CREATE":
                             if task["status"] == "SCHEDULED":
@@ -1283,33 +1834,55 @@ class NsWorker(threading.Thread):
                                     new_status = task_status_create
                                     target_update = "COPY_VIM_INFO"
                                 else:
-                                    new_status, db_vim_info_update = \
-                                        self.item2class[task["item"]].new(ro_task, task_index, task_depends)
+                                    new_status, db_vim_info_update = self.item2class[
+                                        task["item"]
+                                    ].new(ro_task, task_index, task_depends)
                                     # self._create_task(ro_task, task_index, task_depends, db_ro_task_update)
                                     _update_refresh(new_status)
                             else:
-                                if ro_task["vim_info"]["refresh_at"] and now > ro_task["vim_info"]["refresh_at"]:
-                                    new_status, db_vim_info_update = self.item2class[task["item"]].refresh(ro_task)
+                                if (
+                                    ro_task["vim_info"]["refresh_at"]
+                                    and now > ro_task["vim_info"]["refresh_at"]
+                                ):
+                                    new_status, db_vim_info_update = self.item2class[
+                                        task["item"]
+                                    ].refresh(ro_task)
                                     _update_refresh(new_status)
+
                     except Exception as e:
                         new_status = "FAILED"
-                        db_vim_info_update = {"vim_status": "VIM_ERROR", "vim_details": str(e)}
-                        if not isinstance(e, (NsWorkerException, vimconn.VimConnException)):
-                            self.logger.error("Unexpected exception at _delete_task task={}: {}".
-                                              format(task["task_id"], e), exc_info=True)
+                        db_vim_info_update = {
+                            "vim_status": "VIM_ERROR",
+                            "vim_details": str(e),
+                        }
+
+                        if not isinstance(
+                            e, (NsWorkerException, vimconn.VimConnException)
+                        ):
+                            self.logger.error(
+                                "Unexpected exception at _delete_task task={}: {}".format(
+                                    task["task_id"], e
+                                ),
+                                exc_info=True,
+                            )
 
                     try:
                         if db_vim_info_update:
                             db_vim_update = db_vim_info_update.copy()
-                            db_ro_task_update.update({"vim_info." + k: v for k, v in db_vim_info_update.items()})
+                            db_ro_task_update.update(
+                                {
+                                    "vim_info." + k: v
+                                    for k, v in db_vim_info_update.items()
+                                }
+                            )
                             ro_task["vim_info"].update(db_vim_info_update)
 
                         if new_status:
                             if task_action == "CREATE":
                                 task_status_create = new_status
                             db_ro_task_update[task_path] = new_status
-                        if target_update or db_vim_update:
 
+                        if target_update or db_vim_update:
                             if target_update == "DELETE":
                                 self._update_target(task, None)
                             elif target_update == "COPY_VIM_INFO":
@@ -1318,21 +1891,39 @@ class NsWorker(threading.Thread):
                                 self._update_target(task, db_vim_update)
 
                     except Exception as e:
-                        if isinstance(e, DbException) and e.http_code == HTTPStatus.NOT_FOUND:
+                        if (
+                            isinstance(e, DbException)
+                            and e.http_code == HTTPStatus.NOT_FOUND
+                        ):
                             # if the vnfrs or nsrs has been removed from database, this task must be removed
-                            self.logger.debug("marking to delete task={}".format(task["task_id"]))
+                            self.logger.debug(
+                                "marking to delete task={}".format(task["task_id"])
+                            )
                             self.tasks_to_delete.append(task)
                         else:
-                            self.logger.error("Unexpected exception at _update_target task={}: {}".
-                                              format(task["task_id"], e), exc_info=True)
+                            self.logger.error(
+                                "Unexpected exception at _update_target task={}: {}".format(
+                                    task["task_id"], e
+                                ),
+                                exc_info=True,
+                            )
 
             locked_at = ro_task["locked_at"]
+
             if lock_object:
-                locked_at = [lock_object["locked_at"], lock_object["locked_at"] + self.task_locked_time]
+                locked_at = [
+                    lock_object["locked_at"],
+                    lock_object["locked_at"] + self.task_locked_time,
+                ]
                 # locked_at contains two times to avoid race condition. In case the lock has been renew, it will
                 # contain exactly locked_at + self.task_locked_time
                 LockRenew.remove_lock_object(lock_object)
-            q_filter = {"_id": ro_task["_id"], "to_check_at": ro_task["to_check_at"], "locked_at": locked_at}
+
+            q_filter = {
+                "_id": ro_task["_id"],
+                "to_check_at": ro_task["to_check_at"],
+                "locked_at": locked_at,
+            }
             # modify own task. Try filtering by to_next_check. For race condition if to_check_at has been modified,
             # outside this task (by ro_nbi) do not update it
             db_ro_task_update["locked_by"] = None
@@ -1340,61 +1931,104 @@ class NsWorker(threading.Thread):
             db_ro_task_update["locked_at"] = int(now - self.task_locked_time)
             db_ro_task_update["modified_at"] = now
             db_ro_task_update["to_check_at"] = next_check_at
-            if not self.db.set_one("ro_tasks",
-                                   update_dict=db_ro_task_update,
-                                   q_filter=q_filter,
-                                   fail_on_empty=False):
+
+            if not self.db.set_one(
+                "ro_tasks",
+                update_dict=db_ro_task_update,
+                q_filter=q_filter,
+                fail_on_empty=False,
+            ):
                 del db_ro_task_update["to_check_at"]
                 del q_filter["to_check_at"]
-                self.db.set_one("ro_tasks",
-                                q_filter=q_filter,
-                                update_dict=db_ro_task_update,
-                                fail_on_empty=True)
+                self.db.set_one(
+                    "ro_tasks",
+                    q_filter=q_filter,
+                    update_dict=db_ro_task_update,
+                    fail_on_empty=True,
+                )
         except DbException as e:
-            self.logger.error("ro_task={} Error updating database {}".format(ro_task_id, e))
+            self.logger.error(
+                "ro_task={} Error updating database {}".format(ro_task_id, e)
+            )
         except Exception as e:
-            self.logger.error("Error executing ro_task={}: {}".format(ro_task_id, e), exc_info=True)
+            self.logger.error(
+                "Error executing ro_task={}: {}".format(ro_task_id, e), exc_info=True
+            )
 
     def _update_target(self, task, ro_vim_item_update):
         table, _, temp = task["target_record"].partition(":")
         _id, _, path_vim_status = temp.partition(":")
-        path_item = path_vim_status[:path_vim_status.rfind(".")]
-        path_item = path_item[:path_item.rfind(".")]
+        path_item = path_vim_status[: path_vim_status.rfind(".")]
+        path_item = path_item[: path_item.rfind(".")]
         # path_vim_status: dot separated list targeting vim information, e.g. "vdur.10.vim_info.vim:id"
         # path_item: dot separated list targeting record information, e.g. "vdur.10"
+
         if ro_vim_item_update:
-            update_dict = {path_vim_status + "." + k: v for k, v in ro_vim_item_update.items() if k in
-                           ('vim_id', 'vim_details', 'vim_name', 'vim_status', 'interfaces')}
+            update_dict = {
+                path_vim_status + "." + k: v
+                for k, v in ro_vim_item_update.items()
+                if k
+                in ("vim_id", "vim_details", "vim_name", "vim_status", "interfaces")
+            }
+
             if path_vim_status.startswith("vdur."):
                 # for backward compatibility, add vdur.name apart from vdur.vim_name
                 if ro_vim_item_update.get("vim_name"):
                     update_dict[path_item + ".name"] = ro_vim_item_update["vim_name"]
+
                 # for backward compatibility, add vdur.vim-id apart from vdur.vim_id
                 if ro_vim_item_update.get("vim_id"):
                     update_dict[path_item + ".vim-id"] = ro_vim_item_update["vim_id"]
+
                 # update general status
                 if ro_vim_item_update.get("vim_status"):
-                    update_dict[path_item + ".status"] = ro_vim_item_update["vim_status"]
+                    update_dict[path_item + ".status"] = ro_vim_item_update[
+                        "vim_status"
+                    ]
+
             if ro_vim_item_update.get("interfaces"):
                 path_interfaces = path_item + ".interfaces"
+
                 for i, iface in enumerate(ro_vim_item_update.get("interfaces")):
                     if iface:
-                        update_dict.update({path_interfaces + ".{}.".format(i) + k: v for k, v in iface.items() if
-                                            k in ('vlan', 'compute_node', 'pci')})
+                        update_dict.update(
+                            {
+                                path_interfaces + ".{}.".format(i) + k: v
+                                for k, v in iface.items()
+                                if k in ("vlan", "compute_node", "pci")
+                            }
+                        )
+
                         # put ip_address and mac_address with ip-address and mac-address
-                        if iface.get('ip_address'):
-                            update_dict[path_interfaces + ".{}.".format(i) + "ip-address"] = iface['ip_address']
-                        if iface.get('mac_address'):
-                            update_dict[path_interfaces + ".{}.".format(i) + "mac-address"] = iface['mac_address']
+                        if iface.get("ip_address"):
+                            update_dict[
+                                path_interfaces + ".{}.".format(i) + "ip-address"
+                            ] = iface["ip_address"]
+
+                        if iface.get("mac_address"):
+                            update_dict[
+                                path_interfaces + ".{}.".format(i) + "mac-address"
+                            ] = iface["mac_address"]
+
                         if iface.get("mgmt_vnf_interface") and iface.get("ip_address"):
-                            update_dict["ip-address"] = iface.get("ip_address").split(";")[0]
+                            update_dict["ip-address"] = iface.get("ip_address").split(
+                                ";"
+                            )[0]
+
                         if iface.get("mgmt_vdu_interface") and iface.get("ip_address"):
-                            update_dict[path_item + ".ip-address"] = iface.get("ip_address").split(";")[0]
+                            update_dict[path_item + ".ip-address"] = iface.get(
+                                "ip_address"
+                            ).split(";")[0]
 
             self.db.set_one(table, q_filter={"_id": _id}, update_dict=update_dict)
         else:
             update_dict = {path_item + ".status": "DELETED"}
-            self.db.set_one(table, q_filter={"_id": _id}, update_dict=update_dict, unset={path_vim_status: None})
+            self.db.set_one(
+                table,
+                q_filter={"_id": _id},
+                update_dict=update_dict,
+                unset={path_vim_status: None},
+            )
 
     def _process_delete_db_tasks(self):
         """
@@ -1405,14 +2039,18 @@ class NsWorker(threading.Thread):
             task = self.tasks_to_delete[0]
             vnfrs_deleted = None
             nsr_id = task["nsr_id"]
+
             if task["target_record"].startswith("vnfrs:"):
                 # check if nsrs is present
                 if self.db.get_one("nsrs", {"_id": nsr_id}, fail_on_empty=False):
                     vnfrs_deleted = task["target_record"].split(":")[1]
+
             try:
                 self.delete_db_tasks(self.db, nsr_id, vnfrs_deleted)
             except Exception as e:
-                self.logger.error("Error deleting task={}: {}".format(task["task_id"], e))
+                self.logger.error(
+                    "Error deleting task={}: {}".format(task["task_id"], e)
+                )
             self.tasks_to_delete.pop(0)
 
     @staticmethod
@@ -1429,29 +2067,45 @@ class NsWorker(threading.Thread):
             ro_tasks = db.get_list("ro_tasks", {"tasks.nsr_id": nsr_id})
             now = time.time()
             conflict = False
+
             for ro_task in ro_tasks:
                 db_update = {}
                 to_delete_ro_task = True
+
                 for index, task in enumerate(ro_task["tasks"]):
                     if not task:
                         pass
-                    elif (not vnfrs_deleted and task["nsr_id"] == nsr_id) or \
-                            (vnfrs_deleted and task["target_record"].startswith("vnfrs:"+vnfrs_deleted)):
+                    elif (not vnfrs_deleted and task["nsr_id"] == nsr_id) or (
+                        vnfrs_deleted
+                        and task["target_record"].startswith("vnfrs:" + vnfrs_deleted)
+                    ):
                         db_update["tasks.{}".format(index)] = None
                     else:
-                        to_delete_ro_task = False  # used by other nsr, ro_task cannot be deleted
+                        # used by other nsr, ro_task cannot be deleted
+                        to_delete_ro_task = False
+
                 # delete or update if nobody has changed ro_task meanwhile. Used modified_at for known if changed
                 if to_delete_ro_task:
-                    if not db.del_one("ro_tasks",
-                                      q_filter={"_id": ro_task["_id"], "modified_at": ro_task["modified_at"]},
-                                      fail_on_empty=False):
+                    if not db.del_one(
+                        "ro_tasks",
+                        q_filter={
+                            "_id": ro_task["_id"],
+                            "modified_at": ro_task["modified_at"],
+                        },
+                        fail_on_empty=False,
+                    ):
                         conflict = True
                 elif db_update:
                     db_update["modified_at"] = now
-                    if not db.set_one("ro_tasks",
-                                      q_filter={"_id": ro_task["_id"], "modified_at": ro_task["modified_at"]},
-                                      update_dict=db_update,
-                                      fail_on_empty=False):
+                    if not db.set_one(
+                        "ro_tasks",
+                        q_filter={
+                            "_id": ro_task["_id"],
+                            "modified_at": ro_task["modified_at"],
+                        },
+                        update_dict=db_update,
+                        fail_on_empty=False,
+                    ):
                         conflict = True
             if not conflict:
                 return
@@ -1491,7 +2145,9 @@ class NsWorker(threading.Thread):
                 if isinstance(e, queue.Empty):
                     pass
                 else:
-                    self.logger.critical("Error processing task: {}".format(e), exc_info=True)
+                    self.logger.critical(
+                        "Error processing task: {}".format(e), exc_info=True
+                    )
 
             # step 2: process pending_tasks, delete not needed tasks
             try:
@@ -1505,6 +2161,8 @@ class NsWorker(threading.Thread):
                 if not busy:
                     time.sleep(5)
             except Exception as e:
-                self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
+                self.logger.critical(
+                    "Unexpected exception at run: " + str(e), exc_info=True
+                )
 
         self.logger.info("Finishing")
index 485f15d..c9cad85 100644 (file)
@@ -41,13 +41,13 @@ from osm_ng_ro import version as ro_version, version_date as ro_version_date
 
 __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
 
-__version__ = "0.1."    # file version, not NBI version
+__version__ = "0.1."  # file version, not NBI version
 version_date = "May 2020"
 
-database_version = '1.2'
-auth_database_version = '1.0'
-ro_server = None           # instance of Server class
-vim_admin_thread = None   # instance of VimAdminThread class
+database_version = "1.2"
+auth_database_version = "1.0"
+ro_server = None  # instance of Server class
+vim_admin_thread = None  # instance of VimAdminThread class
 
 # vim_threads = None  # instance of VimThread class
 
@@ -76,10 +76,7 @@ valid_url_methods = {
             "tokens": {
                 "METHODS": ("POST",),
                 "ROLE_PERMISSION": "tokens:",
-                "<ID>": {
-                    "METHODS": ("DELETE",),
-                    "ROLE_PERMISSION": "tokens:id:"
-                }
+                "<ID>": {"METHODS": ("DELETE",), "ROLE_PERMISSION": "tokens:id:"},
             },
         }
     },
@@ -97,9 +94,9 @@ valid_url_methods = {
                         "cancel": {
                             "METHODS": ("POST",),
                             "ROLE_PERMISSION": "deploy:id:id:cancel",
-                        }
-                    }
-                }
+                        },
+                    },
+                },
             },
         }
     },
@@ -107,7 +104,6 @@ valid_url_methods = {
 
 
 class RoException(Exception):
-
     def __init__(self, message, http_code=HTTPStatus.METHOD_NOT_ALLOWED):
         Exception.__init__(self, message)
         self.http_code = http_code
@@ -118,18 +114,15 @@ class AuthException(RoException):
 
 
 class Authenticator:
-    
     def __init__(self, valid_url_methods, valid_query_string):
         self.valid_url_methods = valid_url_methods
         self.valid_query_string = valid_query_string
 
     def authorize(self, *args, **kwargs):
         return {"token": "ok", "id": "ok"}
-    
+
     def new_token(self, token_info, indata, remote):
-        return {"token": "ok",
-                "id": "ok",
-                "remote": remote}
+        return {"token": "ok", "id": "ok", "remote": remote}
 
     def del_token(self, token_id):
         pass
@@ -161,6 +154,7 @@ class Server(object):
     def _format_in(self, kwargs):
         try:
             indata = None
+
             if cherrypy.request.body.length:
                 error_text = "Invalid input format "
 
@@ -171,32 +165,50 @@ class Server(object):
                         cherrypy.request.headers.pop("Content-File-MD5", None)
                     elif "application/yaml" in cherrypy.request.headers["Content-Type"]:
                         error_text = "Invalid yaml format "
-                        indata = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader)
+                        indata = yaml.load(
+                            cherrypy.request.body, Loader=yaml.SafeLoader
+                        )
                         cherrypy.request.headers.pop("Content-File-MD5", None)
-                    elif "application/binary" in cherrypy.request.headers["Content-Type"] or \
-                         "application/gzip" in cherrypy.request.headers["Content-Type"] or \
-                         "application/zip" in cherrypy.request.headers["Content-Type"] or \
-                         "text/plain" in cherrypy.request.headers["Content-Type"]:
+                    elif (
+                        "application/binary" in cherrypy.request.headers["Content-Type"]
+                        or "application/gzip"
+                        in cherrypy.request.headers["Content-Type"]
+                        or "application/zip" in cherrypy.request.headers["Content-Type"]
+                        or "text/plain" in cherrypy.request.headers["Content-Type"]
+                    ):
                         indata = cherrypy.request.body  # .read()
-                    elif "multipart/form-data" in cherrypy.request.headers["Content-Type"]:
+                    elif (
+                        "multipart/form-data"
+                        in cherrypy.request.headers["Content-Type"]
+                    ):
                         if "descriptor_file" in kwargs:
                             filecontent = kwargs.pop("descriptor_file")
+
                             if not filecontent.file:
-                                raise RoException("empty file or content", HTTPStatus.BAD_REQUEST)
+                                raise RoException(
+                                    "empty file or content", HTTPStatus.BAD_REQUEST
+                                )
+
                             indata = filecontent.file  # .read()
+
                             if filecontent.content_type.value:
-                                cherrypy.request.headers["Content-Type"] = filecontent.content_type.value
+                                cherrypy.request.headers[
+                                    "Content-Type"
+                                ] = filecontent.content_type.value
                     else:
                         # raise cherrypy.HTTPError(HTTPStatus.Not_Acceptable,
                         #                          "Only 'Content-Type' of type 'application/json' or
                         # 'application/yaml' for input format are available")
                         error_text = "Invalid yaml format "
-                        indata = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader)
+                        indata = yaml.load(
+                            cherrypy.request.body, Loader=yaml.SafeLoader
+                        )
                         cherrypy.request.headers.pop("Content-File-MD5", None)
                 else:
                     error_text = "Invalid yaml format "
                     indata = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader)
                     cherrypy.request.headers.pop("Content-File-MD5", None)
+
             if not indata:
                 indata = {}
 
@@ -213,7 +225,12 @@ class Server(object):
                             kwargs[k] = yaml.load(v, Loader=yaml.SafeLoader)
                         except Exception:
                             pass
-                    elif k.endswith(".gt") or k.endswith(".lt") or k.endswith(".gte") or k.endswith(".lte"):
+                    elif (
+                        k.endswith(".gt")
+                        or k.endswith(".lt")
+                        or k.endswith(".gte")
+                        or k.endswith(".lte")
+                    ):
                         try:
                             kwargs[k] = int(v)
                         except Exception:
@@ -251,55 +268,83 @@ class Server(object):
         :return: None
         """
         accept = cherrypy.request.headers.get("Accept")
+
         if data is None:
             if accept and "text/html" in accept:
-                return html.format(data, cherrypy.request, cherrypy.response, token_info)
+                return html.format(
+                    data, cherrypy.request, cherrypy.response, token_info
+                )
+
             # cherrypy.response.status = HTTPStatus.NO_CONTENT.value
             return
         elif hasattr(data, "read"):  # file object
             if _format:
                 cherrypy.response.headers["Content-Type"] = _format
             elif "b" in data.mode:  # binariy asssumig zip
-                cherrypy.response.headers["Content-Type"] = 'application/zip'
+                cherrypy.response.headers["Content-Type"] = "application/zip"
             else:
-                cherrypy.response.headers["Content-Type"] = 'text/plain'
+                cherrypy.response.headers["Content-Type"] = "text/plain"
+
             # TODO check that cherrypy close file. If not implement pending things to close  per thread next
             return data
+
         if accept:
             if "application/json" in accept:
-                cherrypy.response.headers["Content-Type"] = 'application/json; charset=utf-8'
+                cherrypy.response.headers[
+                    "Content-Type"
+                ] = "application/json; charset=utf-8"
                 a = json.dumps(data, indent=4) + "\n"
+
                 return a.encode("utf8")
             elif "text/html" in accept:
-                return html.format(data, cherrypy.request, cherrypy.response, token_info)
-
-            elif "application/yaml" in accept or "*/*" in accept or "text/plain" in accept:
+                return html.format(
+                    data, cherrypy.request, cherrypy.response, token_info
+                )
+            elif (
+                "application/yaml" in accept
+                or "*/*" in accept
+                or "text/plain" in accept
+            ):
                 pass
             # if there is not any valid accept, raise an error. But if response is already an error, format in yaml
             elif cherrypy.response.status >= 400:
-                raise cherrypy.HTTPError(HTTPStatus.NOT_ACCEPTABLE.value,
-                                         "Only 'Accept' of type 'application/json' or 'application/yaml' "
-                                         "for output format are available")
-        cherrypy.response.headers["Content-Type"] = 'application/yaml'
-        return yaml.safe_dump(data, explicit_start=True, indent=4, default_flow_style=False, tags=False,
-                              encoding='utf-8', allow_unicode=True)  # , canonical=True, default_style='"'
+                raise cherrypy.HTTPError(
+                    HTTPStatus.NOT_ACCEPTABLE.value,
+                    "Only 'Accept' of type 'application/json' or 'application/yaml' "
+                    "for output format are available",
+                )
+
+        cherrypy.response.headers["Content-Type"] = "application/yaml"
+
+        return yaml.safe_dump(
+            data,
+            explicit_start=True,
+            indent=4,
+            default_flow_style=False,
+            tags=False,
+            encoding="utf-8",
+            allow_unicode=True,
+        )  # , canonical=True, default_style='"'
 
     @cherrypy.expose
     def index(self, *args, **kwargs):
         token_info = None
+
         try:
             if cherrypy.request.method == "GET":
                 token_info = self.authenticator.authorize()
-                outdata = token_info   # Home page
+                outdata = token_info  # Home page
             else:
-                raise cherrypy.HTTPError(HTTPStatus.METHOD_NOT_ALLOWED.value,
-                                         "Method {} not allowed for tokens".format(cherrypy.request.method))
+                raise cherrypy.HTTPError(
+                    HTTPStatus.METHOD_NOT_ALLOWED.value,
+                    "Method {} not allowed for tokens".format(cherrypy.request.method),
+                )
 
             return self._format_out(outdata, token_info)
-
         except (NsException, AuthException) as e:
             # cherrypy.log("index Exception {}".format(e))
             cherrypy.response.status = e.http_code.value
+
             return self._format_out("Welcome to OSM!", token_info)
 
     @cherrypy.expose
@@ -307,11 +352,19 @@ class Server(object):
         # TODO consider to remove and provide version using the static version file
         try:
             if cherrypy.request.method != "GET":
-                raise RoException("Only method GET is allowed", HTTPStatus.METHOD_NOT_ALLOWED)
+                raise RoException(
+                    "Only method GET is allowed",
+                    HTTPStatus.METHOD_NOT_ALLOWED,
+                )
             elif args or kwargs:
-                raise RoException("Invalid URL or query string for version", HTTPStatus.METHOD_NOT_ALLOWED)
+                raise RoException(
+                    "Invalid URL or query string for version",
+                    HTTPStatus.METHOD_NOT_ALLOWED,
+                )
+
             # TODO include version of other modules, pick up from some kafka admin message
             osm_ng_ro_version = {"version": ro_version, "date": ro_version_date}
+
             return self._format_out(osm_ng_ro_version)
         except RoException as e:
             cherrypy.response.status = e.http_code.value
@@ -320,6 +373,7 @@ class Server(object):
                 "status": e.http_code.value,
                 "detail": str(e),
             }
+
             return self._format_out(problem_details, None)
 
     def new_token(self, engine_session, indata, *args, **kwargs):
@@ -329,58 +383,77 @@ class Server(object):
             token_info = self.authenticator.authorize()
         except Exception:
             token_info = None
+
         if kwargs:
             indata.update(kwargs)
+
         # This is needed to log the user when authentication fails
         cherrypy.request.login = "{}".format(indata.get("username", "-"))
-        token_info = self.authenticator.new_token(token_info, indata, cherrypy.request.remote)
-        cherrypy.session['Authorization'] = token_info["id"]
+        token_info = self.authenticator.new_token(
+            token_info, indata, cherrypy.request.remote
+        )
+        cherrypy.session["Authorization"] = token_info["id"]
         self._set_location_header("admin", "v1", "tokens", token_info["id"])
         # for logging
 
         # cherrypy.response.cookie["Authorization"] = outdata["id"]
         # cherrypy.response.cookie["Authorization"]['expires'] = 3600
+
         return token_info, token_info["id"], True
 
     def del_token(self, engine_session, indata, version, _id, *args, **kwargs):
         token_id = _id
+
         if not token_id and "id" in kwargs:
             token_id = kwargs["id"]
         elif not token_id:
             token_info = self.authenticator.authorize()
             # for logging
             token_id = token_info["id"]
+
         self.authenticator.del_token(token_id)
         token_info = None
-        cherrypy.session['Authorization'] = "logout"
+        cherrypy.session["Authorization"] = "logout"
         # cherrypy.response.cookie["Authorization"] = token_id
         # cherrypy.response.cookie["Authorization"]['expires'] = 0
+
         return None, None, True
-    
+
     @cherrypy.expose
     def test(self, *args, **kwargs):
-        if not cherrypy.config.get("server.enable_test") or (isinstance(cherrypy.config["server.enable_test"], str) and
-                                                             cherrypy.config["server.enable_test"].lower() == "false"):
+        if not cherrypy.config.get("server.enable_test") or (
+            isinstance(cherrypy.config["server.enable_test"], str)
+            and cherrypy.config["server.enable_test"].lower() == "false"
+        ):
             cherrypy.response.status = HTTPStatus.METHOD_NOT_ALLOWED.value
+
             return "test URL is disabled"
+
         thread_info = None
-        if args and args[0] == "help":
-            return "<html><pre>\ninit\nfile/<name>  download file\ndb-clear/table\nfs-clear[/folder]\nlogin\nlogin2\n"\
-                   "sleep/<time>\nmessage/topic\n</pre></html>"
 
+        if args and args[0] == "help":
+            return (
+                "<html><pre>\ninit\nfile/<name>  download file\ndb-clear/table\nfs-clear[/folder]\nlogin\nlogin2\n"
+                "sleep/<time>\nmessage/topic\n</pre></html>"
+            )
         elif args and args[0] == "init":
             try:
                 # self.ns.load_dbase(cherrypy.request.app.config)
                 self.ns.create_admin()
+
                 return "Done. User 'admin', password 'admin' created"
             except Exception:
                 cherrypy.response.status = HTTPStatus.FORBIDDEN.value
+
                 return self._format_out("Database already initialized")
         elif args and args[0] == "file":
-            return cherrypy.lib.static.serve_file(cherrypy.tree.apps['/ro'].config["storage"]["path"] + "/" + args[1],
-                                                  "text/plain", "attachment")
+            return cherrypy.lib.static.serve_file(
+                cherrypy.tree.apps["/ro"].config["storage"]["path"] + "/" + args[1],
+                "text/plain",
+                "attachment",
+            )
         elif args and args[0] == "file2":
-            f_path = cherrypy.tree.apps['/ro'].config["storage"]["path"] + "/" + args[1]
+            f_path = cherrypy.tree.apps["/ro"].config["storage"]["path"] + "/" + args[1]
             f = open(f_path, "r")
             cherrypy.response.headers["Content-type"] = "text/plain"
             return f
@@ -393,24 +466,32 @@ class Server(object):
                 folders = (args[1],)
             else:
                 folders = self.ns.fs.dir_ls(".")
+
             for folder in folders:
                 self.ns.fs.file_delete(folder)
+
             return ",".join(folders) + " folders deleted\n"
         elif args and args[0] == "login":
             if not cherrypy.request.headers.get("Authorization"):
-                cherrypy.response.headers["WWW-Authenticate"] = 'Basic realm="Access to OSM site", charset="UTF-8"'
+                cherrypy.response.headers[
+                    "WWW-Authenticate"
+                ] = 'Basic realm="Access to OSM site", charset="UTF-8"'
                 cherrypy.response.status = HTTPStatus.UNAUTHORIZED.value
         elif args and args[0] == "login2":
             if not cherrypy.request.headers.get("Authorization"):
-                cherrypy.response.headers["WWW-Authenticate"] = 'Bearer realm="Access to OSM site"'
+                cherrypy.response.headers[
+                    "WWW-Authenticate"
+                ] = 'Bearer realm="Access to OSM site"'
                 cherrypy.response.status = HTTPStatus.UNAUTHORIZED.value
         elif args and args[0] == "sleep":
             sleep_time = 5
+
             try:
                 sleep_time = int(args[1])
             except Exception:
                 cherrypy.response.status = HTTPStatus.FORBIDDEN.value
                 return self._format_out("Database already initialized")
+
             thread_info = cherrypy.thread_data
             print(thread_info)
             time.sleep(sleep_time)
@@ -418,53 +499,76 @@ class Server(object):
         elif len(args) >= 2 and args[0] == "message":
             main_topic = args[1]
             return_text = "<html><pre>{} ->\n".format(main_topic)
+
             try:
-                if cherrypy.request.method == 'POST':
+                if cherrypy.request.method == "POST":
                     to_send = yaml.load(cherrypy.request.body, Loader=yaml.SafeLoader)
                     for k, v in to_send.items():
                         self.ns.msg.write(main_topic, k, v)
                         return_text += "  {}: {}\n".format(k, v)
-                elif cherrypy.request.method == 'GET':
+                elif cherrypy.request.method == "GET":
                     for k, v in kwargs.items():
-                        self.ns.msg.write(main_topic, k, yaml.load(v, Loader=yaml.SafeLoader))
-                        return_text += "  {}: {}\n".format(k, yaml.load(v, Loader=yaml.SafeLoader))
+                        self.ns.msg.write(
+                            main_topic, k, yaml.load(v, Loader=yaml.SafeLoader)
+                        )
+                        return_text += "  {}: {}\n".format(
+                            k, yaml.load(v, Loader=yaml.SafeLoader)
+                        )
             except Exception as e:
                 return_text += "Error: " + str(e)
+
             return_text += "</pre></html>\n"
+
             return return_text
 
         return_text = (
-            "<html><pre>\nheaders:\n  args: {}\n".format(args) +
-            "  kwargs: {}\n".format(kwargs) +
-            "  headers: {}\n".format(cherrypy.request.headers) +
-            "  path_info: {}\n".format(cherrypy.request.path_info) +
-            "  query_string: {}\n".format(cherrypy.request.query_string) +
-            "  session: {}\n".format(cherrypy.session) +
-            "  cookie: {}\n".format(cherrypy.request.cookie) +
-            "  method: {}\n".format(cherrypy.request.method) +
-            "  session: {}\n".format(cherrypy.session.get('fieldname')) +
-            "  body:\n")
+            "<html><pre>\nheaders:\n  args: {}\n".format(args)
+            + "  kwargs: {}\n".format(kwargs)
+            + "  headers: {}\n".format(cherrypy.request.headers)
+            + "  path_info: {}\n".format(cherrypy.request.path_info)
+            + "  query_string: {}\n".format(cherrypy.request.query_string)
+            + "  session: {}\n".format(cherrypy.session)
+            + "  cookie: {}\n".format(cherrypy.request.cookie)
+            + "  method: {}\n".format(cherrypy.request.method)
+            + "  session: {}\n".format(cherrypy.session.get("fieldname"))
+            + "  body:\n"
+        )
         return_text += "    length: {}\n".format(cherrypy.request.body.length)
+
         if cherrypy.request.body.length:
             return_text += "    content: {}\n".format(
-                str(cherrypy.request.body.read(int(cherrypy.request.headers.get('Content-Length', 0)))))
+                str(
+                    cherrypy.request.body.read(
+                        int(cherrypy.request.headers.get("Content-Length", 0))
+                    )
+                )
+            )
+
         if thread_info:
             return_text += "thread: {}\n".format(thread_info)
+
         return_text += "</pre></html>"
+
         return return_text
 
     @staticmethod
     def _check_valid_url_method(method, *args):
         if len(args) < 3:
-            raise RoException("URL must contain at least 'main_topic/version/topic'", HTTPStatus.METHOD_NOT_ALLOWED)
+            raise RoException(
+                "URL must contain at least 'main_topic/version/topic'",
+                HTTPStatus.METHOD_NOT_ALLOWED,
+            )
 
         reference = valid_url_methods
         for arg in args:
             if arg is None:
                 break
+
             if not isinstance(reference, dict):
-                raise RoException("URL contains unexpected extra items '{}'".format(arg),
-                                  HTTPStatus.METHOD_NOT_ALLOWED)
+                raise RoException(
+                    "URL contains unexpected extra items '{}'".format(arg),
+                    HTTPStatus.METHOD_NOT_ALLOWED,
+                )
 
             if arg in reference:
                 reference = reference[arg]
@@ -474,11 +578,22 @@ class Server(object):
                 # reference = reference["*"]
                 break
             else:
-                raise RoException("Unexpected URL item {}".format(arg), HTTPStatus.METHOD_NOT_ALLOWED)
+                raise RoException(
+                    "Unexpected URL item {}".format(arg),
+                    HTTPStatus.METHOD_NOT_ALLOWED,
+                )
+
         if "TODO" in reference and method in reference["TODO"]:
-            raise RoException("Method {} not supported yet for this URL".format(method), HTTPStatus.NOT_IMPLEMENTED)
+            raise RoException(
+                "Method {} not supported yet for this URL".format(method),
+                HTTPStatus.NOT_IMPLEMENTED,
+            )
         elif "METHODS" not in reference or method not in reference["METHODS"]:
-            raise RoException("Method {} not supported for this URL".format(method), HTTPStatus.METHOD_NOT_ALLOWED)
+            raise RoException(
+                "Method {} not supported for this URL".format(method),
+                HTTPStatus.METHOD_NOT_ALLOWED,
+            )
+
         return reference["ROLE_PERMISSION"] + method.lower()
 
     @staticmethod
@@ -492,71 +607,137 @@ class Server(object):
         :return: None
         """
         # Use cherrypy.request.base for absoluted path and make use of request.header HOST just in case behind aNAT
-        cherrypy.response.headers["Location"] = "/ro/{}/{}/{}/{}".format(main_topic, version, topic, id)
+        cherrypy.response.headers["Location"] = "/ro/{}/{}/{}/{}".format(
+            main_topic, version, topic, id
+        )
+
         return
 
     @cherrypy.expose
-    def default(self, main_topic=None, version=None, topic=None, _id=None, _id2=None, *args, **kwargs):
+    def default(
+        self,
+        main_topic=None,
+        version=None,
+        topic=None,
+        _id=None,
+        _id2=None,
+        *args,
+        **kwargs,
+    ):
         token_info = None
         outdata = None
         _format = None
         method = "DONE"
         rollback = []
         engine_session = None
+
         try:
             if not main_topic or not version or not topic:
-                raise RoException("URL must contain at least 'main_topic/version/topic'",
-                                  HTTPStatus.METHOD_NOT_ALLOWED)
-            if main_topic not in ("admin", "ns",):
-                raise RoException("URL main_topic '{}' not supported".format(main_topic),
-                                  HTTPStatus.METHOD_NOT_ALLOWED)
-            if version != 'v1':
-                raise RoException("URL version '{}' not supported".format(version), HTTPStatus.METHOD_NOT_ALLOWED)
-
-            if kwargs and "METHOD" in kwargs and kwargs["METHOD"] in ("PUT", "POST", "DELETE", "GET", "PATCH"):
+                raise RoException(
+                    "URL must contain at least 'main_topic/version/topic'",
+                    HTTPStatus.METHOD_NOT_ALLOWED,
+                )
+
+            if main_topic not in (
+                "admin",
+                "ns",
+            ):
+                raise RoException(
+                    "URL main_topic '{}' not supported".format(main_topic),
+                    HTTPStatus.METHOD_NOT_ALLOWED,
+                )
+
+            if version != "v1":
+                raise RoException(
+                    "URL version '{}' not supported".format(version),
+                    HTTPStatus.METHOD_NOT_ALLOWED,
+                )
+
+            if (
+                kwargs
+                and "METHOD" in kwargs
+                and kwargs["METHOD"] in ("PUT", "POST", "DELETE", "GET", "PATCH")
+            ):
                 method = kwargs.pop("METHOD")
             else:
                 method = cherrypy.request.method
 
-            role_permission = self._check_valid_url_method(method, main_topic, version, topic, _id, _id2, *args,
-                                                           **kwargs)
+            role_permission = self._check_valid_url_method(
+                method, main_topic, version, topic, _id, _id2, *args, **kwargs
+            )
             # skip token validation if requesting a token
             indata = self._format_in(kwargs)
+
             if main_topic != "admin" or topic != "tokens":
                 token_info = self.authenticator.authorize(role_permission, _id)
+
             outdata, created_id, done = self.map_operation[role_permission](
-                engine_session, indata, version, _id, _id2, *args, *kwargs)
+                engine_session, indata, version, _id, _id2, *args, *kwargs
+            )
+
             if created_id:
                 self._set_location_header(main_topic, version, topic, _id)
-            cherrypy.response.status = HTTPStatus.ACCEPTED.value if not done else HTTPStatus.OK.value if \
-                outdata is not None else HTTPStatus.NO_CONTENT.value
+
+            cherrypy.response.status = (
+                HTTPStatus.ACCEPTED.value
+                if not done
+                else HTTPStatus.OK.value
+                if outdata is not None
+                else HTTPStatus.NO_CONTENT.value
+            )
+
             return self._format_out(outdata, token_info, _format)
         except Exception as e:
-            if isinstance(e, (RoException, NsException, DbException, FsException, MsgException, AuthException,
-                              ValidationError)):
+            if isinstance(
+                e,
+                (
+                    RoException,
+                    NsException,
+                    DbException,
+                    FsException,
+                    MsgException,
+                    AuthException,
+                    ValidationError,
+                ),
+            ):
                 http_code_value = cherrypy.response.status = e.http_code.value
                 http_code_name = e.http_code.name
                 cherrypy.log("Exception {}".format(e))
             else:
-                http_code_value = cherrypy.response.status = HTTPStatus.BAD_REQUEST.value  # INTERNAL_SERVER_ERROR
+                http_code_value = (
+                    cherrypy.response.status
+                ) = HTTPStatus.BAD_REQUEST.value  # INTERNAL_SERVER_ERROR
                 cherrypy.log("CRITICAL: Exception {}".format(e), traceback=True)
                 http_code_name = HTTPStatus.BAD_REQUEST.name
+
             if hasattr(outdata, "close"):  # is an open file
                 outdata.close()
+
             error_text = str(e)
             rollback.reverse()
+
             for rollback_item in rollback:
                 try:
                     if rollback_item.get("operation") == "set":
-                        self.ns.db.set_one(rollback_item["topic"], {"_id": rollback_item["_id"]},
-                                           rollback_item["content"], fail_on_empty=False)
+                        self.ns.db.set_one(
+                            rollback_item["topic"],
+                            {"_id": rollback_item["_id"]},
+                            rollback_item["content"],
+                            fail_on_empty=False,
+                        )
                     else:
-                        self.ns.db.del_one(rollback_item["topic"], {"_id": rollback_item["_id"]},
-                                           fail_on_empty=False)
+                        self.ns.db.del_one(
+                            rollback_item["topic"],
+                            {"_id": rollback_item["_id"]},
+                            fail_on_empty=False,
+                        )
                 except Exception as e2:
-                    rollback_error_text = "Rollback Exception {}: {}".format(rollback_item, e2)
+                    rollback_error_text = "Rollback Exception {}: {}".format(
+                        rollback_item, e2
+                    )
                     cherrypy.log(rollback_error_text)
                     error_text += ". " + rollback_error_text
+
             # if isinstance(e, MsgException):
             #     error_text = "{} has been '{}' but other modules cannot be informed because an error on bus".format(
             #         engine_topic[:-1], method, error_text)
@@ -565,6 +746,7 @@ class Server(object):
                 "status": http_code_value,
                 "detail": error_text,
             }
+
             return self._format_out(problem_details, token_info)
             # raise cherrypy.HTTPError(e.http_code.value, str(e))
         finally:
@@ -572,7 +754,9 @@ class Server(object):
                 if method in ("PUT", "PATCH", "POST") and isinstance(outdata, dict):
                     for logging_id in ("id", "op_id", "nsilcmop_id", "nslcmop_id"):
                         if outdata.get(logging_id):
-                            cherrypy.request.login += ";{}={}".format(logging_id, outdata[logging_id][:36])
+                            cherrypy.request.login += ";{}={}".format(
+                                logging_id, outdata[logging_id][:36]
+                            )
 
 
 def _start_service():
@@ -587,24 +771,27 @@ def _start_service():
     cherrypy.log.error("Starting osm_ng_ro")
     # update general cherrypy configuration
     update_dict = {}
+    engine_config = cherrypy.tree.apps["/ro"].config
 
-    engine_config = cherrypy.tree.apps['/ro'].config
     for k, v in environ.items():
         if not k.startswith("OSMRO_"):
             continue
+
         k1, _, k2 = k[6:].lower().partition("_")
+
         if not k2:
             continue
+
         try:
             if k1 in ("server", "test", "auth", "log"):
                 # update [global] configuration
-                update_dict[k1 + '.' + k2] = yaml.safe_load(v)
+                update_dict[k1 + "." + k2] = yaml.safe_load(v)
             elif k1 == "static":
                 # update [/static] configuration
                 engine_config["/static"]["tools.staticdir." + k2] = yaml.safe_load(v)
             elif k1 == "tools":
                 # update [/] configuration
-                engine_config["/"]["tools." + k2.replace('_', '.')] = yaml.safe_load(v)
+                engine_config["/"]["tools." + k2.replace("_", ".")] = yaml.safe_load(v)
             elif k1 in ("message", "database", "storage", "authentication"):
                 engine_config[k1][k2] = yaml.safe_load(v)
 
@@ -616,26 +803,35 @@ def _start_service():
         engine_config["global"].update(update_dict)
 
     # logging cherrypy
-    log_format_simple = "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s"
-    log_formatter_simple = logging.Formatter(log_format_simple, datefmt='%Y-%m-%dT%H:%M:%S')
+    log_format_simple = (
+        "%(asctime)s %(levelname)s %(name)s %(filename)s:%(lineno)s %(message)s"
+    )
+    log_formatter_simple = logging.Formatter(
+        log_format_simple, datefmt="%Y-%m-%dT%H:%M:%S"
+    )
     logger_server = logging.getLogger("cherrypy.error")
     logger_access = logging.getLogger("cherrypy.access")
     logger_cherry = logging.getLogger("cherrypy")
     logger = logging.getLogger("ro")
 
     if "log.file" in engine_config["global"]:
-        file_handler = logging.handlers.RotatingFileHandler(engine_config["global"]["log.file"],
-                                                            maxBytes=100e6, backupCount=9, delay=0)
+        file_handler = logging.handlers.RotatingFileHandler(
+            engine_config["global"]["log.file"], maxBytes=100e6, backupCount=9, delay=0
+        )
         file_handler.setFormatter(log_formatter_simple)
         logger_cherry.addHandler(file_handler)
         logger.addHandler(file_handler)
+
     # log always to standard output
-    for format_, logger in {"ro.server %(filename)s:%(lineno)s": logger_server,
-                            "ro.access %(filename)s:%(lineno)s": logger_access,
-                            "%(name)s %(filename)s:%(lineno)s": logger
-                            }.items():
+    for format_, logger in {
+        "ro.server %(filename)s:%(lineno)s": logger_server,
+        "ro.access %(filename)s:%(lineno)s": logger_access,
+        "%(name)s %(filename)s:%(lineno)s": logger,
+    }.items():
         log_format_cherry = "%(asctime)s %(levelname)s {} %(message)s".format(format_)
-        log_formatter_cherry = logging.Formatter(log_format_cherry, datefmt='%Y-%m-%dT%H:%M:%S')
+        log_formatter_cherry = logging.Formatter(
+            log_format_cherry, datefmt="%Y-%m-%dT%H:%M:%S"
+        )
         str_handler = logging.StreamHandler()
         str_handler.setFormatter(log_formatter_cherry)
         logger.addHandler(str_handler)
@@ -643,24 +839,32 @@ def _start_service():
     if engine_config["global"].get("log.level"):
         logger_cherry.setLevel(engine_config["global"]["log.level"])
         logger.setLevel(engine_config["global"]["log.level"])
+
     # logging other modules
-    for k1, logname in {"message": "ro.msg", "database": "ro.db", "storage": "ro.fs"}.items():
+    for k1, logname in {
+        "message": "ro.msg",
+        "database": "ro.db",
+        "storage": "ro.fs",
+    }.items():
         engine_config[k1]["logger_name"] = logname
         logger_module = logging.getLogger(logname)
+
         if "logfile" in engine_config[k1]:
-            file_handler = logging.handlers.RotatingFileHandler(engine_config[k1]["logfile"],
-                                                                maxBytes=100e6, backupCount=9, delay=0)
+            file_handler = logging.handlers.RotatingFileHandler(
+                engine_config[k1]["logfile"], maxBytes=100e6, backupCount=9, delay=0
+            )
             file_handler.setFormatter(log_formatter_simple)
             logger_module.addHandler(file_handler)
+
         if "loglevel" in engine_config[k1]:
             logger_module.setLevel(engine_config[k1]["loglevel"])
     # TODO add more entries, e.g.: storage
 
     engine_config["assignment"] = {}
     # ^ each VIM, SDNc will be assigned one worker id. Ns class will add items and VimThread will auto-assign
-    cherrypy.tree.apps['/ro'].root.ns.start(engine_config)
-    cherrypy.tree.apps['/ro'].root.authenticator.start(engine_config)
-    cherrypy.tree.apps['/ro'].root.ns.init_db(target_version=database_version)
+    cherrypy.tree.apps["/ro"].root.ns.start(engine_config)
+    cherrypy.tree.apps["/ro"].root.authenticator.start(engine_config)
+    cherrypy.tree.apps["/ro"].root.ns.init_db(target_version=database_version)
 
     # # start subscriptions thread:
     vim_admin_thread = VimAdminThread(config=engine_config, engine=ro_server.ns)
@@ -678,37 +882,45 @@ def _stop_service():
     TODO: Ending database connections.
     """
     global vim_admin_thread
+
     # terminate vim_admin_thread
     if vim_admin_thread:
         vim_admin_thread.terminate()
+
     vim_admin_thread = None
-    cherrypy.tree.apps['/ro'].root.ns.stop()
+    cherrypy.tree.apps["/ro"].root.ns.stop()
     cherrypy.log.error("Stopping osm_ng_ro")
 
 
 def ro_main(config_file):
     global ro_server
+
     ro_server = Server()
-    cherrypy.engine.subscribe('start', _start_service)
-    cherrypy.engine.subscribe('stop', _stop_service)
-    cherrypy.quickstart(ro_server, '/ro', config_file)
+    cherrypy.engine.subscribe("start", _start_service)
+    cherrypy.engine.subscribe("stop", _stop_service)
+    cherrypy.quickstart(ro_server, "/ro", config_file)
 
 
 def usage():
-    print("""Usage: {} [options]
+    print(
+        """Usage: {} [options]
         -c|--config [configuration_file]: loads the configuration file (default: ./ro.cfg)
         -h|--help: shows this help
-        """.format(sys.argv[0]))
+        """.format(
+            sys.argv[0]
+        )
+    )
     # --log-socket-host HOST: send logs to this host")
     # --log-socket-port PORT: send logs using this port (default: 9022)")
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
     try:
         # load parameters and configuration
         opts, args = getopt.getopt(sys.argv[1:], "hvc:", ["config=", "help"])
         # TODO add  "log-socket-host=", "log-socket-port=", "log-file="
         config_file = None
+
         for o, a in opts:
             if o in ("-h", "--help"):
                 usage()
@@ -717,17 +929,29 @@ if __name__ == '__main__':
                 config_file = a
             else:
                 assert False, "Unhandled option"
+
         if config_file:
             if not path.isfile(config_file):
-                print("configuration file '{}' that not exist".format(config_file), file=sys.stderr)
+                print(
+                    "configuration file '{}' that not exist".format(config_file),
+                    file=sys.stderr,
+                )
                 exit(1)
         else:
-            for config_file in (path.dirname(__file__) + "/ro.cfg", "./ro.cfg", "/etc/osm/ro.cfg"):
+            for config_file in (
+                path.dirname(__file__) + "/ro.cfg",
+                "./ro.cfg",
+                "/etc/osm/ro.cfg",
+            ):
                 if path.isfile(config_file):
                     break
             else:
-                print("No configuration file 'ro.cfg' found neither at local folder nor at /etc/osm/", file=sys.stderr)
+                print(
+                    "No configuration file 'ro.cfg' found neither at local folder nor at /etc/osm/",
+                    file=sys.stderr,
+                )
                 exit(1)
+
         ro_main(config_file)
     except KeyboardInterrupt:
         print("KeyboardInterrupt. Finishing", file=sys.stderr)
index 54d8eed..efd940c 100644 (file)
@@ -25,10 +25,18 @@ Validator of input data using JSON schemas
 """
 
 # Basis schemas
-name_schema = {"type": "string", "minLength": 1, "maxLength": 255, "pattern": "^[^,;()'\"]+$"}
+name_schema = {
+    "type": "string",
+    "minLength": 1,
+    "maxLength": 255,
+    "pattern": "^[^,;()'\"]+$",
+}
 string_schema = {"type": "string", "minLength": 1, "maxLength": 255}
 ssh_key_schema = {"type": "string", "minLength": 1}
-id_schema = {"type": "string", "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$"}
+id_schema = {
+    "type": "string",
+    "pattern": "^[a-fA-F0-9]{8}(-[a-fA-F0-9]{4}){3}-[a-fA-F0-9]{12}$",
+}
 bool_schema = {"type": "boolean"}
 null_schema = {"type": "null"}
 object_schema = {"type": "object"}
@@ -42,7 +50,7 @@ deploy_item_schema = {
         "vim_info": object_schema,
         "common_id": string_schema,
     },
-    "additionalProperties": True
+    "additionalProperties": True,
 }
 
 deploy_item_list = {
@@ -96,10 +104,10 @@ deploy_schema = {
             "type": "object",
             "properties": {
                 "vld": deploy_item_list,
-            }
+            },
         },
     },
-    "additionalProperties": False
+    "additionalProperties": False,
 }
 
 
@@ -119,12 +127,17 @@ def validate_input(indata, schema_to_use):
     try:
         if schema_to_use:
             js_v(indata, schema_to_use)
+
         return None
     except js_e.ValidationError as e:
         if e.path:
             error_pos = "at '" + ":".join(map(str, e.path)) + "'"
         else:
             error_pos = ""
+
         raise ValidationError("Format error {} '{}' ".format(error_pos, e.message))
     except js_e.SchemaError:
-        raise ValidationError("Bad json schema {}".format(schema_to_use), http_code=HTTPStatus.INTERNAL_SERVER_ERROR)
+        raise ValidationError(
+            "Bad json schema {}".format(schema_to_use),
+            http_code=HTTPStatus.INTERNAL_SERVER_ERROR,
+        )
index e843c80..17bfb20 100644 (file)
@@ -33,7 +33,6 @@ __author__ = "Alfonso Tierno <alfonso.tiernosepulveda@telefonica.com>"
 
 
 class VimAdminException(Exception):
-
     def __init__(self, message, http_code=HTTPStatus.BAD_REQUEST):
         self.http_code = http_code
         Exception.__init__(self, message)
@@ -78,9 +77,10 @@ class LockRenew:
             "initial_lock_time": database_object["locked_at"],
             "locked_at": database_object["locked_at"],
             "thread": thread_object,
-            "unlocked": False  # True when it is not needed any more
+            "unlocked": False,  # True when it is not needed any more
         }
         LockRenew.renew_list.append(lock_object)
+
         return lock_object
 
     @staticmethod
@@ -90,36 +90,66 @@ class LockRenew:
     async def renew_locks(self):
         while not self.to_terminate:
             if not self.renew_list:
-                await asyncio.sleep(self.task_locked_time - self.task_relock_time, loop=self.loop)
+                await asyncio.sleep(
+                    self.task_locked_time - self.task_relock_time, loop=self.loop
+                )
                 continue
+
             lock_object = self.renew_list[0]
-            if lock_object["unlocked"] or not lock_object["thread"] or not lock_object["thread"].is_alive():
+
+            if (
+                lock_object["unlocked"]
+                or not lock_object["thread"]
+                or not lock_object["thread"].is_alive()
+            ):
                 # task has been finished or locker thread is dead, not needed to re-locked.
                 self.renew_list.pop(0)
                 continue
 
             locked_at = lock_object["locked_at"]
             now = time()
-            time_to_relock = locked_at + self.task_locked_time - self.task_relock_time - now
+            time_to_relock = (
+                locked_at + self.task_locked_time - self.task_relock_time - now
+            )
+
             if time_to_relock < 1:
                 if lock_object["initial_lock_time"] + self.task_max_locked_time < now:
                     self.renew_list.pop(0)
                     # re-lock
                     new_locked_at = locked_at + self.task_locked_time
+
                     try:
-                        if self.db.set_one(lock_object["table"],
-                                           update_dict={"locked_at": new_locked_at, "modified_at": now},
-                                           q_filter={"_id": lock_object["_id"], "locked_at": locked_at},
-                                           fail_on_empty=False):
-                            self.logger.debug("Renew lock for {}.{}".format(lock_object["table"], lock_object["_id"]))
+                        if self.db.set_one(
+                            lock_object["table"],
+                            update_dict={
+                                "locked_at": new_locked_at,
+                                "modified_at": now,
+                            },
+                            q_filter={
+                                "_id": lock_object["_id"],
+                                "locked_at": locked_at,
+                            },
+                            fail_on_empty=False,
+                        ):
+                            self.logger.debug(
+                                "Renew lock for {}.{}".format(
+                                    lock_object["table"], lock_object["_id"]
+                                )
+                            )
                             lock_object["locked_at"] = new_locked_at
                             self.renew_list.append(lock_object)
                         else:
-                            self.logger.info("Cannot renew lock for {}.{}".format(lock_object["table"],
-                                                                                  lock_object["_id"]))
+                            self.logger.info(
+                                "Cannot renew lock for {}.{}".format(
+                                    lock_object["table"], lock_object["_id"]
+                                )
+                            )
                     except Exception as e:
-                        self.logger.error("Exception when trying to renew lock for {}.{}: {}".format(
-                            lock_object["table"], lock_object["_id"], e))
+                        self.logger.error(
+                            "Exception when trying to renew lock for {}.{}: {}".format(
+                                lock_object["table"], lock_object["_id"], e
+                            )
+                        )
             else:
                 # wait until it is time to re-lock it
                 await asyncio.sleep(time_to_relock, loop=self.loop)
@@ -127,12 +157,17 @@ class LockRenew:
     def stop(self):
         # unlock all locked items
         now = time()
+
         for lock_object in self.renew_list:
             locked_at = lock_object["locked_at"]
+
             if not lock_object["unlocked"] or locked_at + self.task_locked_time >= now:
-                self.db.set_one(lock_object["table"], update_dict={"locked_at": 0},
-                                q_filter={"_id": lock_object["_id"], "locked_at": locked_at},
-                                fail_on_empty=False)
+                self.db.set_one(
+                    lock_object["table"],
+                    update_dict={"locked_at": 0},
+                    q_filter={"_id": lock_object["_id"], "locked_at": locked_at},
+                    fail_on_empty=False,
+                )
 
 
 class VimAdminThread(threading.Thread):
@@ -156,20 +191,25 @@ class VimAdminThread(threading.Thread):
         self.last_rotask_time = 0
         self.next_check_unused_vim = time() + self.TIME_CHECK_UNUSED_VIM
         self.logger = logging.getLogger("ro.vimadmin")
-        self.aiomain_task_kafka = None  # asyncio task for receiving vim actions from kafka bus
-        self.aiomain_task_vim = None  # asyncio task for watching ro_tasks not processed by nobody
+        # asyncio task for receiving vim actions from kafka bus
+        self.aiomain_task_kafka = None
+        # asyncio task for watching ro_tasks not processed by nobody
+        self.aiomain_task_vim = None
         self.aiomain_task_renew_lock = None
         # ^asyncio task for maintain an ro_task locked when VIM plugin takes too much time processing an order
         self.lock_renew = LockRenew(config, self.logger)
         self.task_locked_time = config["global"]["task_locked_time"]
 
     async def vim_watcher(self):
-        """ Reads database periodically looking for tasks not processed by nobody because of a reboot
+        """Reads database periodically looking for tasks not processed by nobody because of a reboot
         in order to load this vim"""
         # firstly read VIMS not processed
         for target_database in ("vim_accounts", "wim_accounts", "sdns"):
-            unattended_targets = self.db.get_list(target_database,
-                                                  q_filter={"_admin.operations.operationState": "PROCESSING"})
+            unattended_targets = self.db.get_list(
+                target_database,
+                q_filter={"_admin.operations.operationState": "PROCESSING"},
+            )
+
             for target in unattended_targets:
                 target_id = "{}:{}".format(target_database[:3], target["_id"])
                 self.logger.info("ordered to check {}".format(target_id))
@@ -178,37 +218,57 @@ class VimAdminThread(threading.Thread):
         while not self.to_terminate:
             now = time()
             processed_vims = []
+
             if not self.last_rotask_time:
                 self.last_rotask_time = 0
-            ro_tasks = self.db.get_list("ro_tasks",
-                                        q_filter={"target_id.ncont": self.engine.get_assigned_vims(),
-                                                  "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
-                                                  "locked_at.lt": now - self.task_locked_time,
-                                                  "to_check_at.gt": self.last_rotask_time,
-                                                  "to_check_at.lte": now - self.MAX_TIME_UNATTENDED})
+
+            ro_tasks = self.db.get_list(
+                "ro_tasks",
+                q_filter={
+                    "target_id.ncont": self.engine.get_assigned_vims(),
+                    "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"],
+                    "locked_at.lt": now - self.task_locked_time,
+                    "to_check_at.gt": self.last_rotask_time,
+                    "to_check_at.lte": now - self.MAX_TIME_UNATTENDED,
+                },
+            )
             self.last_rotask_time = now - self.MAX_TIME_UNATTENDED
+
             for ro_task in ro_tasks:
                 # if already checked ignore
                 if ro_task["target_id"] in processed_vims:
                     continue
+
                 processed_vims.append(ro_task["target_id"])
+
                 # if already assigned ignore
                 if ro_task["target_id"] in self.engine.get_assigned_vims():
                     continue
+
                 # if there is some task locked on this VIM, there is an RO working on it, so ignore
-                if self.db.get_list("ro_tasks",
-                                    q_filter={"target_id": ro_task["target_id"],
-                                              "tasks.status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
-                                              "locked_at.gt": now - self.task_locked_time}):
+                if self.db.get_list(
+                    "ro_tasks",
+                    q_filter={
+                        "target_id": ro_task["target_id"],
+                        "tasks.status": ["SCHEDULED", "BUILD", "DONE", "FAILED"],
+                        "locked_at.gt": now - self.task_locked_time,
+                    },
+                ):
                     continue
+
                 # unattended, assign vim
                 self.engine.assign_vim(ro_task["target_id"])
-                self.logger.debug("ordered to load {}. Inactivity detected".format(ro_task["target_id"]))
+                self.logger.debug(
+                    "ordered to load {}. Inactivity detected".format(
+                        ro_task["target_id"]
+                    )
+                )
 
             # every 2 hours check if there are vims without any ro_task and unload it
             if now > self.next_check_unused_vim:
                 self.next_check_unused_vim = now + self.TIME_CHECK_UNUSED_VIM
                 self.engine.unload_unused_vims()
+
             await asyncio.sleep(self.MAX_TIME_UNATTENDED, loop=self.loop)
 
     async def aiomain(self):
@@ -217,32 +277,57 @@ class VimAdminThread(threading.Thread):
             try:
                 if not self.aiomain_task_kafka:
                     # await self.msg.aiowrite("admin", "echo", "dummy message", loop=self.loop)
-                    await self.msg.aiowrite("vim_account", "echo", "dummy message", loop=self.loop)
+                    await self.msg.aiowrite(
+                        "vim_account", "echo", "dummy message", loop=self.loop
+                    )
                     kafka_working = True
                     self.logger.debug("Starting vim_account subscription task")
                     self.aiomain_task_kafka = asyncio.ensure_future(
-                        self.msg.aioread(self.kafka_topics, loop=self.loop, group_id=False,
-                                         aiocallback=self._msg_callback),
-                        loop=self.loop)
+                        self.msg.aioread(
+                            self.kafka_topics,
+                            loop=self.loop,
+                            group_id=False,
+                            aiocallback=self._msg_callback,
+                        ),
+                        loop=self.loop,
+                    )
+
                 if not self.aiomain_task_vim:
                     self.aiomain_task_vim = asyncio.ensure_future(
-                        self.vim_watcher(),
-                        loop=self.loop)
+                        self.vim_watcher(), loop=self.loop
+                    )
+
                 if not self.aiomain_task_renew_lock:
-                    self.aiomain_task_renew_lock = asyncio.ensure_future(self.lock_renew.renew_locks(), loop=self.loop)
+                    self.aiomain_task_renew_lock = asyncio.ensure_future(
+                        self.lock_renew.renew_locks(), loop=self.loop
+                    )
 
                 done, _ = await asyncio.wait(
-                    [self.aiomain_task_kafka, self.aiomain_task_vim, self.aiomain_task_renew_lock],
-                    timeout=None, loop=self.loop, return_when=asyncio.FIRST_COMPLETED)
+                    [
+                        self.aiomain_task_kafka,
+                        self.aiomain_task_vim,
+                        self.aiomain_task_renew_lock,
+                    ],
+                    timeout=None,
+                    loop=self.loop,
+                    return_when=asyncio.FIRST_COMPLETED,
+                )
+
                 try:
                     if self.aiomain_task_kafka in done:
                         exc = self.aiomain_task_kafka.exception()
-                        self.logger.error("kafka subscription task exception: {}".format(exc))
+                        self.logger.error(
+                            "kafka subscription task exception: {}".format(exc)
+                        )
                         self.aiomain_task_kafka = None
+
                     if self.aiomain_task_vim in done:
                         exc = self.aiomain_task_vim.exception()
-                        self.logger.error("vim_account watcher task exception: {}".format(exc))
+                        self.logger.error(
+                            "vim_account watcher task exception: {}".format(exc)
+                        )
                         self.aiomain_task_vim = None
+
                     if self.aiomain_task_renew_lock in done:
                         exc = self.aiomain_task_renew_lock.exception()
                         self.logger.error("renew_locks task exception: {}".format(exc))
@@ -253,10 +338,14 @@ class VimAdminThread(threading.Thread):
             except Exception as e:
                 if self.to_terminate:
                     return
+
                 if kafka_working:
                     # logging only first time
-                    self.logger.critical("Error accessing kafka '{}'. Retrying ...".format(e))
+                    self.logger.critical(
+                        "Error accessing kafka '{}'. Retrying ...".format(e)
+                    )
                     kafka_working = False
+
             await asyncio.sleep(10, loop=self.loop)
 
     def run(self):
@@ -274,13 +363,18 @@ class VimAdminThread(threading.Thread):
                     self.db = dbmemory.DbMemory()
                     self.db.db_connect(self.config["database"])
                 else:
-                    raise VimAdminException("Invalid configuration param '{}' at '[database]':'driver'".format(
-                        self.config["database"]["driver"]))
+                    raise VimAdminException(
+                        "Invalid configuration param '{}' at '[database]':'driver'".format(
+                            self.config["database"]["driver"]
+                        )
+                    )
+
             self.lock_renew.start(self.db, self.loop)
 
             if not self.msg:
                 config_msg = self.config["message"].copy()
                 config_msg["loop"] = self.loop
+
                 if config_msg["driver"] == "local":
                     self.msg = msglocal.MsgLocal()
                     self.msg.connect(config_msg)
@@ -288,20 +382,27 @@ class VimAdminThread(threading.Thread):
                     self.msg = msgkafka.MsgKafka()
                     self.msg.connect(config_msg)
                 else:
-                    raise VimAdminException("Invalid configuration param '{}' at '[message]':'driver'".format(
-                        config_msg["driver"]))
+                    raise VimAdminException(
+                        "Invalid configuration param '{}' at '[message]':'driver'".format(
+                            config_msg["driver"]
+                        )
+                    )
         except (DbException, MsgException) as e:
             raise VimAdminException(str(e), http_code=e.http_code)
 
         self.logger.info("Starting")
         while not self.to_terminate:
             try:
-                self.loop.run_until_complete(asyncio.ensure_future(self.aiomain(), loop=self.loop))
+                self.loop.run_until_complete(
+                    asyncio.ensure_future(self.aiomain(), loop=self.loop)
+                )
             # except asyncio.CancelledError:
             #     break  # if cancelled it should end, breaking loop
             except Exception as e:
                 if not self.to_terminate:
-                    self.logger.exception("Exception '{}' at messaging read loop".format(e), exc_info=True)
+                    self.logger.exception(
+                        "Exception '{}' at messaging read loop".format(e), exc_info=True
+                    )
 
         self.logger.info("Finishing")
         self._stop()
@@ -318,9 +419,11 @@ class VimAdminThread(threading.Thread):
         try:
             if command == "echo":
                 return
+
             if topic in self.kafka_topics:
-                target = topic[0:3]   # vim, wim or sdn
+                target = topic[0:3]  # vim, wim or sdn
                 target_id = target + ":" + params["_id"]
+
                 if command in ("edited", "edit"):
                     self.engine.reload_vim(target_id)
                     self.logger.debug("ordered to reload {}".format(target_id))
@@ -330,12 +433,19 @@ class VimAdminThread(threading.Thread):
                 elif command in ("create", "created"):
                     self.engine.check_vim(target_id)
                     self.logger.debug("ordered to check {}".format(target_id))
-
         except (DbException, MsgException) as e:
-            self.logger.error("Error while processing topic={} command={}: {}".format(topic, command, e))
+            self.logger.error(
+                "Error while processing topic={} command={}: {}".format(
+                    topic, command, e
+                )
+            )
         except Exception as e:
-            self.logger.exception("Exception while processing topic={} command={}: {}".format(topic, command, e),
-                                  exc_info=True)
+            self.logger.exception(
+                "Exception while processing topic={} command={}: {}".format(
+                    topic, command, e
+                ),
+                exc_info=True,
+            )
 
     def _stop(self):
         """
@@ -345,6 +455,7 @@ class VimAdminThread(threading.Thread):
         try:
             if self.db:
                 self.db.db_disconnect()
+
             if self.msg:
                 self.msg.disconnect()
         except (DbException, MsgException) as e:
@@ -358,10 +469,14 @@ class VimAdminThread(threading.Thread):
         """
         self.to_terminate = True
         self.lock_renew.to_terminate = True
+
         if self.aiomain_task_kafka:
             self.loop.call_soon_threadsafe(self.aiomain_task_kafka.cancel)
+
         if self.aiomain_task_vim:
             self.loop.call_soon_threadsafe(self.aiomain_task_vim.cancel)
+
         if self.aiomain_task_renew_lock:
             self.loop.call_soon_threadsafe(self.aiomain_task_renew_lock.cancel)
+
         self.lock_renew.stop()
index 4e8daba..31c3b35 100644 (file)
@@ -22,26 +22,28 @@ _name = "osm_ng_ro"
 _readme = "osm-ng-ro is the New Generation Resource Orchestrator for OSM"
 setup(
     name=_name,
-    description='OSM Resource Orchestrator',
+    description="OSM Resource Orchestrator",
     long_description=_readme,
-    version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
-    author='ETSI OSM',
-    author_email='alfonso.tiernosepulveda@telefonica.com',
-    maintainer='Alfonso Tierno',
-    maintainer_email='alfonso.tiernosepulveda@telefonica.com',
-    url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
-    license='Apache 2.0',
-
+    version_command=(
+        "git describe --match v* --tags --long --dirty",
+        "pep440-git-full",
+    ),
+    author="ETSI OSM",
+    author_email="alfonso.tiernosepulveda@telefonica.com",
+    maintainer="Alfonso Tierno",
+    maintainer_email="alfonso.tiernosepulveda@telefonica.com",
+    url="https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary",
+    license="Apache 2.0",
     packages=find_packages(exclude=["temp", "local"]),
     include_package_data=True,
     install_requires=[
-        'CherryPy==18.1.2',
-        'osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git#egg=osm-common',
-        'jsonschema',
-        'PyYAML',
-        'requests',
-        'cryptography',  # >=2.5  installed right version with the debian post-install script
+        "CherryPy==18.1.2",
+        "osm-common @ git+https://osm.etsi.org/gerrit/osm/common.git#egg=osm-common",
+        "jsonschema",
+        "PyYAML",
+        "requests",
+        "cryptography",  # >=2.5  installed right version with the debian post-install script
         "osm-ro-plugin @ git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro-plugin&subdirectory=RO-plugin",
     ],
-    setup_requires=['setuptools-version-command'],
+    setup_requires=["setuptools-version-command"],
 )
index 081bc1c..fe01272 100644 (file)
@@ -24,7 +24,7 @@ install_command = python3 -m pip install -r requirements.txt   -U {opts} {packag
 basepython = python3
 deps = flake8
 commands = flake8 osm_ng_ro  --max-line-length 120 \
-    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,E226,E402,W504
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp,osm_im --ignore W291,W293,W503,W605,E123,E125,E203,E226,E241
 
 [testenv:build]
 basepython = python3
index f340f41..f45ec75 100644 (file)
@@ -54,10 +54,10 @@ interface {interface}
             switchport_def = self._int_SRIOV.format(service=s_type, vlan_id=vlan_id)\r
         else:\r
             switchport_def = self._int_PASSTROUGH.format(vlan_id=vlan_id)\r
-        return self._basic_int.format(uuid=uuid,\r
-                                      interface=interface,\r
-                                      type=i_type,\r
-                                      switchport_def=switchport_def)\r
+\r
+        return self._basic_int.format(\r
+            uuid=uuid, interface=interface, type=i_type, switchport_def=switchport_def\r
+        )\r
 \r
     def getElan_sriov(self, uuid, interface, vlan_id, index):\r
         return self._get_interface(uuid, interface, vlan_id, "ELAN", index, "trunk")\r
@@ -66,10 +66,14 @@ interface {interface}
         return self._get_interface(uuid, interface, vlan_id, "ELINE", index, "trunk")\r
 \r
     def getElan_passthrough(self, uuid, interface, vlan_id, index):\r
-        return self._get_interface(uuid, interface, vlan_id, "ELAN", index, "dot1q-tunnel")\r
+        return self._get_interface(\r
+            uuid, interface, vlan_id, "ELAN", index, "dot1q-tunnel"\r
+        )\r
 \r
     def getEline_passthrough(self, uuid, interface, vlan_id, index):\r
-        return self._get_interface(uuid, interface, vlan_id, "ELINE", index, "dot1q-tunnel")\r
+        return self._get_interface(\r
+            uuid, interface, vlan_id, "ELINE", index, "dot1q-tunnel"\r
+        )\r
 \r
     _basic_vlan = """\r
 vlan {vlan}\r
@@ -92,12 +96,21 @@ vlan {vlan}
     def _get_vlan(self, uuid, vlan_id, vni_id, s_type):\r
         if self.topology == self._VLAN:\r
             return self._configLet_VLAN.format(service=s_type, vlan=vlan_id, uuid=uuid)\r
+\r
         if self.topology == self._VLAN_MLAG:\r
-            return self._configLet_VLAN_MLAG.format(service=s_type, vlan=vlan_id, uuid=uuid)\r
+            return self._configLet_VLAN_MLAG.format(\r
+                service=s_type, vlan=vlan_id, uuid=uuid\r
+            )\r
+\r
         if self.topology == self._VXLAN:\r
-            return self._configLet_VXLAN.format(service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id)\r
+            return self._configLet_VXLAN.format(\r
+                service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id\r
+            )\r
+\r
         if self.topology == self._VXLAN_MLAG:\r
-            return self._configLet_VXLAN_MLAG.format(service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id)\r
+            return self._configLet_VXLAN_MLAG.format(\r
+                service=s_type, vlan=vlan_id, uuid=uuid, vni=vni_id\r
+            )\r
 \r
     def getElan_vlan(self, uuid, vlan_id, vni_id):\r
         return self._get_vlan(uuid, vlan_id, vni_id, "ELAN")\r
@@ -117,11 +130,9 @@ router bgp {bgp}
 \r
     def _get_bgp(self, uuid, vlan_id, vni_id, loopback0, bgp, s_type):\r
         if self.topology == self._VXLAN or self.topology == self._VXLAN_MLAG:\r
-            return self._configLet_BGP.format(uuid=uuid,\r
-                                              bgp=bgp,\r
-                                              vlan=vlan_id,\r
-                                              loopback=loopback0,\r
-                                              vni=vni_id)\r
+            return self._configLet_BGP.format(\r
+                uuid=uuid, bgp=bgp, vlan=vlan_id, loopback=loopback0, vni=vni_id\r
+            )\r
 \r
     def getElan_bgp(self, uuid, vlan_id, vni_id, loopback0, bgp):\r
         return self._get_bgp(uuid, vlan_id, vni_id, loopback0, bgp, "ELAN")\r
index a338afd..6af7c43 100644 (file)
@@ -48,6 +48,7 @@ class AristaCVPTask:
     def __apply_state(self, task, state):\r
         t_id = self.__get_id(task)\r
         self.cvpClientApi.add_note_to_task(t_id, "Executed by OSM")\r
+\r
         if state == "executed":\r
             return self.__execute_task(t_id)\r
         elif state == "cancelled":\r
@@ -64,33 +65,39 @@ class AristaCVPTask:
 \r
     def update_all_tasks(self, data):\r
         new_data = dict()\r
+\r
         for task_id in data.keys():\r
             res = self.cvpClientApi.get_task_by_id(task_id)\r
             new_data[task_id] = res\r
+\r
         return new_data\r
 \r
     def get_pending_tasks(self):\r
-        return self.cvpClientApi.get_tasks_by_status('Pending')\r
+        return self.cvpClientApi.get_tasks_by_status("Pending")\r
 \r
     def get_pending_tasks_old(self):\r
         taskList = []\r
-        tasksField = {'workOrderId': 'workOrderId',\r
-                      'workOrderState': 'workOrderState',\r
-                      'currentTaskName': 'currentTaskName',\r
-                      'description': 'description',\r
-                      'workOrderUserDefinedStatus':\r
-                      'workOrderUserDefinedStatus',\r
-                      'note': 'note',\r
-                      'taskStatus': 'taskStatus',\r
-                      'workOrderDetails': 'workOrderDetails'}\r
-        tasks = self.cvpClientApi.get_tasks_by_status('Pending')\r
+        tasksField = {\r
+            "workOrderId": "workOrderId",\r
+            "workOrderState": "workOrderState",\r
+            "currentTaskName": "currentTaskName",\r
+            "description": "description",\r
+            "workOrderUserDefinedStatus": "workOrderUserDefinedStatus",\r
+            "note": "note",\r
+            "taskStatus": "taskStatus",\r
+            "workOrderDetails": "workOrderDetails",\r
+        }\r
+        tasks = self.cvpClientApi.get_tasks_by_status("Pending")\r
+\r
         # Reduce task data to required fields\r
         for task in tasks:\r
             taskFacts = {}\r
             for field in task.keys():\r
                 if field in tasksField:\r
                     taskFacts[tasksField[field]] = task[field]\r
+\r
             taskList.append(taskFacts)\r
+\r
         return taskList\r
 \r
     def task_action(self, tasks, wait, state):\r
@@ -118,15 +125,18 @@ class AristaCVPTask:
         now = time.time()\r
         while (now - start) < wait:\r
             data = self.update_all_tasks(data)\r
+\r
             if all([self.__terminal(self.__get_state(t)) for t in data.values()]):\r
                 break\r
+\r
             time.sleep(1)\r
             now = time.time()\r
 \r
         if wait:\r
             for i, task in data.items():\r
                 if not self.__terminal(self.__get_state(task)):\r
-                    warnings.append("Task {} has not completed in {} seconds".\r
-                                    format(i, wait))\r
+                    warnings.append(\r
+                        "Task {} has not completed in {} seconds".format(i, wait)\r
+                    )\r
 \r
         return changed, data, warnings\r
index e72a082..314c673 100644 (file)
 #
 # This work has been performed in the context of Arista Telefonica OSM PoC.
 ##
+
 from osm_ro_plugin.sdnconn import SdnConnectorBase, SdnConnectorError
 import re
 import socket
+
 # Required by compare function
 import difflib
+
 # Library that uses Levenshtein Distance to calculate the differences
 # between strings.
 # from fuzzywuzzy import fuzz
@@ -49,24 +52,22 @@ from osm_rosdn_arista_cloudvision.aristaTask import AristaCVPTask
 
 
 class SdnError(Enum):
-    UNREACHABLE = 'Unable to reach the WIM url, connect error.',
-    TIMEOUT = 'Unable to reach the WIM url, timeout.',
-    VLAN_INCONSISTENT = \
-        'VLAN value inconsistent between the connection points',
-    VLAN_NOT_PROVIDED = 'VLAN value not provided',
-    CONNECTION_POINTS_SIZE = \
-        'Unexpected number of connection points: 2 expected.',
-    ENCAPSULATION_TYPE = \
-        'Unexpected service_endpoint_encapsulation_type. ' \
-        'Only "dotq1" is accepted.',
-    BANDWIDTH = 'Unable to get the bandwidth.',
-    STATUS = 'Unable to get the status for the service.',
-    DELETE = 'Unable to delete service.',
-    CLEAR_ALL = 'Unable to clear all the services',
-    UNKNOWN_ACTION = 'Unknown action invoked.',
-    BACKUP = 'Unable to get the backup parameter.',
-    UNSUPPORTED_FEATURE = "Unsupported feature",
-    UNAUTHORIZED = "Failed while authenticating",
+    UNREACHABLE = "Unable to reach the WIM url, connect error."
+    TIMEOUT = "Unable to reach the WIM url, timeout."
+    VLAN_INCONSISTENT = "VLAN value inconsistent between the connection points"
+    VLAN_NOT_PROVIDED = "VLAN value not provided"
+    CONNECTION_POINTS_SIZE = "Unexpected number of connection points: 2 expected."
+    ENCAPSULATION_TYPE = (
+        'Unexpected service_endpoint_encapsulation_type. Only "dotq1" is accepted.'
+    )
+    BANDWIDTH = "Unable to get the bandwidth."
+    STATUS = "Unable to get the status for the service."
+    DELETE = "Unable to delete service."
+    CLEAR_ALL = "Unable to clear all the services"
+    UNKNOWN_ACTION = "Unknown action invoked."
+    BACKUP = "Unable to get the backup parameter."
+    UNSUPPORTED_FEATURE = "Unsupported feature"
+    UNAUTHORIZED = "Failed while authenticating"
     INTERNAL_ERROR = "Internal error"
 
 
@@ -97,14 +98,15 @@ class AristaSdnConnector(SdnConnectorBase):
     -- All created services identification is stored in a generic ConfigLet 'OSM_metadata'
        to keep track of the managed resources by OSM in the Arista deployment.
     """
+
     __supported_service_types = ["ELINE (L2)", "ELINE", "ELAN"]
     __service_types_ELAN = "ELAN"
     __service_types_ELINE = "ELINE"
     __ELINE_num_connection_points = 2
     __supported_service_types = ["ELINE", "ELAN"]
     __supported_encapsulation_types = ["dot1q"]
-    __WIM_LOGGER = 'ro.sdn.arista'
-    __SERVICE_ENDPOINT_MAPPING = 'service_endpoint_mapping'
+    __WIM_LOGGER = "ro.sdn.arista"
+    __SERVICE_ENDPOINT_MAPPING = "service_endpoint_mapping"
     __ENCAPSULATION_TYPE_PARAM = "service_endpoint_encapsulation_type"
     __ENCAPSULATION_INFO_PARAM = "service_endpoint_encapsulation_info"
     __BACKUP_PARAM = "backup"
@@ -119,16 +121,16 @@ class AristaSdnConnector(SdnConnectorBase):
     __SW_PORT_PARAM = "switch_port"
     __VLAN_PARAM = "vlan"
     __VNI_PARAM = "vni"
-    __SEPARATOR = '_'
-    __MANAGED_BY_OSM = '## Managed by OSM '
+    __SEPARATOR = "_"
+    __MANAGED_BY_OSM = "## Managed by OSM "
     __OSM_PREFIX = "osm_"
     __OSM_METADATA = "OSM_metadata"
-    __METADATA_PREFIX = '!## Service'
+    __METADATA_PREFIX = "!## Service"
     __EXC_TASK_EXEC_WAIT = 10
     __ROLLB_TASK_EXEC_WAIT = 10
     __API_REQUEST_TOUT = 60
-    __SWITCH_TAG_NAME = 'topology_type'
-    __SWITCH_TAG_VALUE = 'leaf'
+    __SWITCH_TAG_NAME = "topology_type"
+    __SWITCH_TAG_VALUE = "leaf"
     __LOOPBACK_INTF = "Loopback0"
     _VLAN = "VLAN"
     _VXLAN = "VXLAN"
@@ -159,55 +161,74 @@ class AristaSdnConnector(SdnConnectorBase):
         :param logger (logging.Logger): optional logger object. If none is passed 'ro.sdn.sdnconn' is used.
         """
         self.__regex = re.compile(
-            r'^(?:http|ftp)s?://'  # http:// or https://
-            r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'  # domain...
-            r'localhost|'  # localhost...
-            r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'  # ...or ip
-            r'(?::\d+)?', re.IGNORECASE)  # optional port
+            r"^(?:http|ftp)s?://"  # http:// or https://
+            r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|"  # domain...
+            r"localhost|"  # localhost...
+            r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})"  # ...or ip
+            r"(?::\d+)?",
+            re.IGNORECASE,
+        )  # optional port
         self.raiseException = True
         self.logger = logger or logging.getLogger(self.__WIM_LOGGER)
         super().__init__(wim, wim_account, config, self.logger)
         self.__wim = wim
         self.__wim_account = wim_account
         self.__config = config
+
         if self.is_valid_destination(self.__wim.get("wim_url")):
             self.__wim_url = self.__wim.get("wim_url")
         else:
-            raise SdnConnectorError(message='Invalid wim_url value',
-                                    http_code=500)
+            raise SdnConnectorError(message="Invalid wim_url value", http_code=500)
+
         self.__user = wim_account.get("user")
         self.__passwd = wim_account.get("password")
         self.client = None
         self.cvp_inventory = None
         self.cvp_tags = None
-        self.logger.debug("Arista SDN plugin {}, cvprac version {}, user:{} and config:{}".
-                          format(wim, cvprac_version, self.__user,
-                                 self.delete_keys_from_dict(config, ('passwd',))))
+        self.logger.debug(
+            "Arista SDN plugin {}, cvprac version {}, user:{} and config:{}".format(
+                wim,
+                cvprac_version,
+                self.__user,
+                self.delete_keys_from_dict(config, ("passwd",)),
+            )
+        )
         self.allDeviceFacts = []
         self.taskC = None
+
         try:
             self.__load_topology()
             self.__load_switches()
         except (ConnectTimeout, Timeout) as ct:
-            raise SdnConnectorError(message=SdnError.TIMEOUT + " " + str(ct), http_code=408)
+            raise SdnConnectorError(
+                message=SdnError.TIMEOUT + " " + str(ct), http_code=408
+            )
         except ConnectionError as ce:
-            raise SdnConnectorError(message=SdnError.UNREACHABLE + " " + str(ce), http_code=404)
+            raise SdnConnectorError(
+                message=SdnError.UNREACHABLE + " " + str(ce), http_code=404
+            )
         except SdnConnectorError as sc:
             raise sc
         except CvpLoginError as le:
             raise SdnConnectorError(message=le.msg, http_code=500) from le
         except Exception as e:
-            raise SdnConnectorError(message="Unable to load switches from CVP" + " " + str(e),
-                                    http_code=500) from e
-        self.logger.debug("Using topology {} in Arista Leaf switches: {}".format(
-            self.topology,
-            self.delete_keys_from_dict(self.switches, ('passwd',))))
+            raise SdnConnectorError(
+                message="Unable to load switches from CVP " + str(e), http_code=500
+            ) from e
+
+        self.logger.debug(
+            "Using topology {} in Arista Leaf switches: {}".format(
+                self.topology, self.delete_keys_from_dict(self.switches, ("passwd",))
+            )
+        )
         self.clC = AristaSDNConfigLet(self.topology)
 
     def __load_topology(self):
         self.topology = self._VXLAN_MLAG
-        if self.__config and self.__config.get('topology'):
-            topology = self.__config.get('topology')
+
+        if self.__config and self.__config.get("topology"):
+            topology = self.__config.get("topology")
+
             if topology == "VLAN":
                 self.topology = self._VLAN
             elif topology == "VXLAN":
@@ -218,7 +239,7 @@ class AristaSdnConnector(SdnConnectorBase):
                 self.topology = self._VXLAN_MLAG
 
     def __load_switches(self):
-        """ Retrieves the switches to configure in the following order
+        """Retrieves the switches to configure in the following order
         1.  from incoming configuration:
         1.1 using port mapping
               using user and password from WIM
@@ -236,47 +257,58 @@ class AristaSdnConnector(SdnConnectorBase):
             for port in self.__config.get(self.__SERVICE_ENDPOINT_MAPPING):
                 switch_dpid = port.get(self.__SW_ID_PARAM)
                 if switch_dpid and switch_dpid not in self.switches:
-                    self.switches[switch_dpid] = {'passwd': self.__passwd,
-                                                  'ip': None,
-                                                  'usr': self.__user,
-                                                  'lo0': None,
-                                                  'AS': None,
-                                                  'serialNumber': None,
-                                                  'mlagPeerDevice': None}
-
-        if self.__config and self.__config.get('switches'):
+                    self.switches[switch_dpid] = {
+                        "passwd": self.__passwd,
+                        "ip": None,
+                        "usr": self.__user,
+                        "lo0": None,
+                        "AS": None,
+                        "serialNumber": None,
+                        "mlagPeerDevice": None,
+                    }
+
+        if self.__config and self.__config.get("switches"):
             # Not directly from json, complete one by one
-            config_switches = self.__config.get('switches')
+            config_switches = self.__config.get("switches")
             for cs, cs_content in config_switches.items():
                 if cs not in self.switches:
-                    self.switches[cs] = {'passwd': self.__passwd,
-                                         'ip': None,
-                                         'usr': self.__user,
-                                         'lo0': None,
-                                         'AS': None,
-                                         'serialNumber': None,
-                                         'mlagPeerDevice': None}
+                    self.switches[cs] = {
+                        "passwd": self.__passwd,
+                        "ip": None,
+                        "usr": self.__user,
+                        "lo0": None,
+                        "AS": None,
+                        "serialNumber": None,
+                        "mlagPeerDevice": None,
+                    }
+
                 if cs_content:
                     self.switches[cs].update(cs_content)
 
         # Load the rest of the data
         if self.client is None:
             self.client = self.__connect()
+
         self.__load_inventory()
+
         if not self.switches:
             self.__get_tags(self.__SWITCH_TAG_NAME, self.__SWITCH_TAG_VALUE)
+
             for device in self.allDeviceFacts:
                 # get the switches whose topology_tag is 'leaf'
-                if device['serialNumber'] in self.cvp_tags:
-                    if not self.switches.get(device['hostname']):
-                        switch_data = {'passwd': self.__passwd,
-                                       'ip': device['ipAddress'],
-                                       'usr': self.__user,
-                                       'lo0': None,
-                                       'AS': None,
-                                       'serialNumber': None,
-                &