Refactoring: Made complete codebase PEP8 compatible.

Only PEP8 rule E501 (line too long) is not yet reflected
by this change.

The patch also adds automated code style checks to the
CI test stage using flake8.
It will let the tests fail if there is a code style
violation.

Change-Id: I90956dd424a46691546ef720351757d3c43451a7
Signed-off-by: peusterm <manuel.peuster@uni-paderborn.de>
diff --git a/src/emuvim/api/sonata/dummygatekeeper.py b/src/emuvim/api/sonata/dummygatekeeper.py
index f20483b..59b1900 100755
--- a/src/emuvim/api/sonata/dummygatekeeper.py
+++ b/src/emuvim/api/sonata/dummygatekeeper.py
@@ -1,37 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-This module implements a simple REST API that behaves like SONATA's gatekeeper.
-
-It is only used to support the development of SONATA's SDK tools and to demonstrate
-the year 1 version of the emulator until the integration with WP4's orchestrator is done.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import logging
 import os
 import uuid
@@ -39,7 +30,7 @@
 import zipfile
 import yaml
 import threading
-from docker import DockerClient, APIClient
+from docker import DockerClient
 from flask import Flask, request
 import flask_restful as fr
 from collections import defaultdict
@@ -49,6 +40,7 @@
 import ipaddress
 import copy
 import time
+from functools import reduce
 
 logging.basicConfig()
 LOG = logging.getLogger("sonata-dummy-gatekeeper")
@@ -62,28 +54,34 @@
 # Enable Dockerfile build functionality
 BUILD_DOCKERFILE = False
 
-# flag to indicate that we run without the emulator (only the bare API for integration testing)
+# flag to indicate that we run without the emulator (only the bare API for
+# integration testing)
 GK_STANDALONE_MODE = False
 
 # should a new version of an image be pulled even if its available
 FORCE_PULL = False
 
 # Automatically deploy SAPs (endpoints) of the service as new containers
-# Attention: This is not a configuration switch but a global variable! Don't change its default value.
+# Attention: This is not a configuration switch but a global variable!
+# Don't change its default value.
 DEPLOY_SAP = False
 
-# flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
+# flag to indicate if we use bidirectional forwarding rules in the
+# automatic chaining process
 BIDIRECTIONAL_CHAIN = False
 
-# override the management interfaces in the descriptors with default docker0 interfaces in the containers
+# override the management interfaces in the descriptors with default
+# docker0 interfaces in the containers
 USE_DOCKER_MGMT = False
 
-# automatically deploy uploaded packages (no need to execute son-access deploy --latest separately)
+# automatically deploy uploaded packages (no need to execute son-access
+# deploy --latest separately)
 AUTO_DEPLOY = False
 
 # and also automatically terminate any other running services
 AUTO_DELETE = False
 
+
 def generate_subnets(prefix, base, subnet_size=50, mask=24):
     # Generate a list of ipaddress in subnets
     r = list()
@@ -91,6 +89,8 @@
         subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
         r.append(ipaddress.ip_network(unicode(subnet)))
     return r
+
+
 # private subnet definitions for the generated interfaces
 # 10.10.xxx.0/24
 SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
@@ -100,18 +100,20 @@
 ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
 
 # path to the VNFD for the SAP VNF that is deployed as internal SAP point
-SAP_VNFD=None
+SAP_VNFD = None
 
 # Time in seconds to wait for vnf stop scripts to execute fully
 VNF_STOP_WAIT_TIME = 5
 
+
 class Gatekeeper(object):
 
     def __init__(self):
         self.services = dict()
         self.dcs = dict()
         self.net = None
-        self.vnf_counter = 0  # used to generate short names for VNFs (Mininet limitation)
+        # used to generate short names for VNFs (Mininet limitation)
+        self.vnf_counter = 0
         LOG.info("Create SONATA dummy gatekeeper.")
 
     def register_service_package(self, service_uuid, service):
@@ -143,7 +145,8 @@
         self.uuid = service_uuid
         self.package_file_hash = package_file_hash
         self.package_file_path = package_file_path
-        self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
+        self.package_content_path = os.path.join(
+            CATALOG_FOLDER, "services/%s" % self.uuid)
         self.manifest = None
         self.nsd = None
         self.vnfds = dict()
@@ -194,9 +197,10 @@
         self.instances[instance_uuid] = dict()
         self.instances[instance_uuid]["vnf_instances"] = list()
 
-        # 2. compute placement of this service instance (adds DC names to VNFDs)
+        # 2. compute placement of this service instance (adds DC names to
+        # VNFDs)
         if not GK_STANDALONE_MODE:
-            #self._calculate_placement(FirstDcPlacement)
+            # self._calculate_placement(FirstDcPlacement)
             self._calculate_placement(RoundRobinDcPlacementWithSAPs)
         # 3. start all vnfds that we have in the service (except SAPs)
         for vnf_id in self.vnfds:
@@ -216,9 +220,11 @@
         if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
             vlinks = self.nsd["virtual_links"]
             # constituent virtual links are not checked
-            #fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
-            eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
-            elan_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-LAN")]
+            # fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
+            eline_fwd_links = [l for l in vlinks if (
+                l["connectivity_type"] == "E-Line")]
+            elan_fwd_links = [l for l in vlinks if (
+                l["connectivity_type"] == "E-LAN")]
 
             GK.net.deployed_elines.extend(eline_fwd_links)
             GK.net.deployed_elans.extend(elan_fwd_links)
@@ -229,8 +235,10 @@
             # 5b. deploy E-LAN links
             self._connect_elans(elan_fwd_links, instance_uuid)
 
-        # 6. run the emulator specific entrypoint scripts in the VNFIs of this service instance
-        self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
+        # 6. run the emulator specific entrypoint scripts in the VNFIs of this
+        # service instance
+        self._trigger_emulator_start_scripts_in_vnfis(
+            self.instances[instance_uuid]["vnf_instances"])
 
         LOG.info("Service started. Instance id: %r" % instance_uuid)
         return instance_uuid
@@ -248,7 +256,8 @@
         # instance_uuid = str(self.uuid.uuid4())
         vnf_instances = self.instances[instance_uuid]["vnf_instances"]
 
-        # trigger stop skripts in vnf instances and wait a few seconds for completion
+        # trigger stop skripts in vnf instances and wait a few seconds for
+        # completion
         self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
         time.sleep(VNF_STOP_WAIT_TIME)
 
@@ -259,7 +268,8 @@
             ext_sap = self.saps[sap_name]
             target_dc = ext_sap.get("dc")
             target_dc.removeExternalSAP(sap_name)
-            LOG.info("Stopping the SAP instance: %r in DC %r" % (sap_name, target_dc))
+            LOG.info("Stopping the SAP instance: %r in DC %r" %
+                     (sap_name, target_dc))
 
         if not GK_STANDALONE_MODE:
             # remove placement?
@@ -290,7 +300,8 @@
             assert(docker_name is not None)
             assert(target_dc is not None)
             if not self._check_docker_image_exists(docker_name):
-                raise Exception("Docker image %r not found. Abort." % docker_name)
+                raise Exception(
+                    "Docker image %r not found. Abort." % docker_name)
 
             # 3. get the resource limits
             res_req = u.get("resource_requirements")
@@ -298,77 +309,88 @@
             if cpu_list is None:
                 cpu_list = res_req.get("cpu").get("vcpus")
             if cpu_list is None:
-                cpu_list="1"
+                cpu_list = "1"
             cpu_bw = res_req.get("cpu").get("cpu_bw")
             if not cpu_bw:
-                cpu_bw=1
+                cpu_bw = 1
             mem_num = str(res_req.get("memory").get("size"))
-            if len(mem_num)==0:
-                mem_num="2"
+            if len(mem_num) == 0:
+                mem_num = "2"
             mem_unit = str(res_req.get("memory").get("size_unit"))
-            if str(mem_unit)==0:
-                mem_unit="GB"
+            if str(mem_unit) == 0:
+                mem_unit = "GB"
             mem_limit = float(mem_num)
-            if mem_unit=="GB":
-                mem_limit=mem_limit*1024*1024*1024
-            elif mem_unit=="MB":
-                mem_limit=mem_limit*1024*1024
-            elif mem_unit=="KB":
-                mem_limit=mem_limit*1024
+            if mem_unit == "GB":
+                mem_limit = mem_limit * 1024 * 1024 * 1024
+            elif mem_unit == "MB":
+                mem_limit = mem_limit * 1024 * 1024
+            elif mem_unit == "KB":
+                mem_limit = mem_limit * 1024
             mem_lim = int(mem_limit)
-            cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
+            cpu_period, cpu_quota = self._calculate_cpu_cfs_values(
+                float(cpu_bw))
 
-            # check if we need to deploy the management ports (defined as type:management both on in the vnfd and nsd)
+            # check if we need to deploy the management ports (defined as
+            # type:management both on in the vnfd and nsd)
             intfs = vnfd.get("connection_points", [])
             mgmt_intf_names = []
             if USE_DOCKER_MGMT:
-                mgmt_intfs = [vnf_id + ':' + intf['id'] for intf in intfs if intf.get('type') == 'management']
-                # check if any of these management interfaces are used in a management-type network in the nsd
+                mgmt_intfs = [vnf_id + ':' + intf['id']
+                              for intf in intfs if intf.get('type') == 'management']
+                # check if any of these management interfaces are used in a
+                # management-type network in the nsd
                 for nsd_intf_name in mgmt_intfs:
-                    vlinks = [ l["connection_points_reference"] for l in self.nsd.get("virtual_links", [])]
+                    vlinks = [l["connection_points_reference"]
+                              for l in self.nsd.get("virtual_links", [])]
                     for link in vlinks:
-                        if nsd_intf_name in link and self.check_mgmt_interface(link):
-                            # this is indeed a management interface and can be skipped
-                            vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(nsd_intf_name)
-                            found_interfaces = [intf for intf in intfs if intf.get('id') == vnf_interface]
+                        if nsd_intf_name in link and self.check_mgmt_interface(
+                                link):
+                            # this is indeed a management interface and can be
+                            # skipped
+                            vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
+                                nsd_intf_name)
+                            found_interfaces = [
+                                intf for intf in intfs if intf.get('id') == vnf_interface]
                             intfs.remove(found_interfaces[0])
                             mgmt_intf_names.append(vnf_interface)
 
             # 4. generate the volume paths for the docker container
-            volumes=list()
+            volumes = list()
             # a volume to extract log files
-            docker_log_path = "/tmp/results/%s/%s"%(self.uuid,vnf_id)
-            LOG.debug("LOG path for vnf %s is %s."%(vnf_id,docker_log_path))
+            docker_log_path = "/tmp/results/%s/%s" % (self.uuid, vnf_id)
+            LOG.debug("LOG path for vnf %s is %s." % (vnf_id, docker_log_path))
             if not os.path.exists(docker_log_path):
-                LOG.debug("Creating folder %s"%docker_log_path)
+                LOG.debug("Creating folder %s" % docker_log_path)
                 os.makedirs(docker_log_path)
 
-            volumes.append(docker_log_path+":/mnt/share/")
-
+            volumes.append(docker_log_path + ":/mnt/share/")
 
             # 5. do the dc.startCompute(name="foobar") call to run the container
             # TODO consider flavors, and other annotations
             # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
             # use the vnf_id in the nsd as docker name
             # so deployed containers can be easily mapped back to the nsd
-            LOG.info("Starting %r as %r in DC %r" % (vnf_name, vnf_id, vnfd.get("dc")))
+            LOG.info("Starting %r as %r in DC %r" %
+                     (vnf_name, vnf_id, vnfd.get("dc")))
             LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
             vnfi = target_dc.startCompute(
-                    vnf_id,
-                    network=intfs,
-                    image=docker_name,
-                    flavor_name="small",
-                    cpu_quota=cpu_quota,
-                    cpu_period=cpu_period,
-                    cpuset=cpu_list,
-                    mem_limit=mem_lim,
-                    volumes=volumes,
-                    type=kwargs.get('type','docker'))
+                vnf_id,
+                network=intfs,
+                image=docker_name,
+                flavor_name="small",
+                cpu_quota=cpu_quota,
+                cpu_period=cpu_period,
+                cpuset=cpu_list,
+                mem_limit=mem_lim,
+                volumes=volumes,
+                type=kwargs.get('type', 'docker'))
 
-            # rename the docker0 interfaces (eth0) to the management port name defined in the VNFD
+            # rename the docker0 interfaces (eth0) to the management port name
+            # defined in the VNFD
             if USE_DOCKER_MGMT:
                 for intf_name in mgmt_intf_names:
-                    self._vnf_reconfigure_network(vnfi, 'eth0', new_name=intf_name)
+                    self._vnf_reconfigure_network(
+                        vnfi, 'eth0', new_name=intf_name)
 
             return vnfi
 
@@ -383,7 +405,8 @@
         dc = vnfi.datacenter
 
         # stop the vnfi
-        LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
+        LOG.info("Stopping the vnf instance contained in %r in DC %r" %
+                 (status["name"], dc))
         dc.stopCompute(status["name"])
 
     def _get_vnf_instance(self, instance_uuid, vnf_id):
@@ -416,17 +439,18 @@
             intf = vnfi.intf(intf=if_name)
             if intf is not None:
                 intf.setIP(net_str)
-                LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
+                LOG.debug("Reconfigured network of %s:%s to %r" %
+                          (vnfi.name, if_name, net_str))
             else:
-                LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
+                LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
+                    vnfi.name, if_name))
 
         if new_name is not None:
             vnfi.cmd('ip link set', if_name, 'down')
             vnfi.cmd('ip link set', if_name, 'name', new_name)
             vnfi.cmd('ip link set', new_name, 'up')
-            LOG.debug("Reconfigured interface name of %s:%s to %s" % (vnfi.name, if_name, new_name))
-
-
+            LOG.debug("Reconfigured interface name of %s:%s to %s" %
+                      (vnfi.name, if_name, new_name))
 
     def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
         for vnfi in vnfi_list:
@@ -434,10 +458,12 @@
             env = config.get("Env", list())
             for env_var in env:
                 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
-                LOG.debug("%r = %r" % (var , cmd))
-                if var=="SON_EMU_CMD":
-                    LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
-                    # execute command in new thread to ensure that GK is not blocked by VNF
+                LOG.debug("%r = %r" % (var, cmd))
+                if var == "SON_EMU_CMD":
+                    LOG.info("Executing entry point script in %r: %r" %
+                             (vnfi.name, cmd))
+                    # execute command in new thread to ensure that GK is not
+                    # blocked by VNF
                     t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
                     t.daemon = True
                     t.start()
@@ -448,15 +474,15 @@
             env = config.get("Env", list())
             for env_var in env:
                 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
-                if var=="SON_EMU_CMD_STOP":
-                    LOG.info("Executing stop script in %r: %r" % (vnfi.name, cmd))
-                    # execute command in new thread to ensure that GK is not blocked by VNF
+                if var == "SON_EMU_CMD_STOP":
+                    LOG.info("Executing stop script in %r: %r" %
+                             (vnfi.name, cmd))
+                    # execute command in new thread to ensure that GK is not
+                    # blocked by VNF
                     t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
                     t.daemon = True
                     t.start()
 
-
-
     def _unpack_service_package(self):
         """
         unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
@@ -465,7 +491,6 @@
         with zipfile.ZipFile(self.package_file_path, "r") as z:
             z.extractall(self.package_content_path)
 
-
     def _load_package_descriptor(self):
         """
         Load the main package descriptor YAML and keep it as dict.
@@ -488,7 +513,7 @@
             GK.net.deployed_nsds.append(self.nsd)
             # create dict to find the vnf_name for any vnf id
             self.vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
-                                                reduce(lambda x, y: dict(x, **y),
+                                               reduce(lambda x, y: dict(x, **y),
                                                        map(lambda d: {d["vnf_id"]: d["vnf_name"]},
                                                            self.nsd["network_functions"])))
 
@@ -504,14 +529,15 @@
         vnfd_set = dict()
         if "package_content" in self.manifest:
             for pc in self.manifest.get("package_content"):
-                if pc.get("content-type") == "application/sonata.function_descriptor":
+                if pc.get(
+                        "content-type") == "application/sonata.function_descriptor":
                     vnfd_path = os.path.join(
                         self.package_content_path,
                         make_relative_path(pc.get("name")))
                     vnfd = load_yaml(vnfd_path)
                     vnfd_set[vnfd.get("name")] = vnfd
             # then link each vnf_id in the nsd to its vnfd
-            for  vnf_id in self.vnf_id2vnf_name:
+            for vnf_id in self.vnf_id2vnf_name:
                 vnf_name = self.vnf_id2vnf_name[vnf_id]
                 self.vnfds[vnf_id] = vnfd_set[vnf_name]
                 LOG.debug("Loaded VNFD: {0} id: {1}".format(vnf_name, vnf_id))
@@ -520,7 +546,8 @@
         # create list of all SAPs
         # check if we need to deploy management ports
         if USE_DOCKER_MGMT:
-            SAPs = [p for p in self.nsd["connection_points"] if 'management' not in p.get('type')]
+            SAPs = [p for p in self.nsd["connection_points"]
+                    if 'management' not in p.get('type')]
         else:
             SAPs = [p for p in self.nsd["connection_points"]]
 
@@ -530,21 +557,26 @@
             # make sure SAP has type set (default internal)
             sap["type"] = sap.get("type", 'internal')
 
-            # Each Service Access Point (connection_point) in the nsd is an IP address on the host
+            # Each Service Access Point (connection_point) in the nsd is an IP
+            # address on the host
             if sap["type"] == "external":
                 # add to vnfds to calculate placement later on
                 sap_net = SAP_SUBNETS.pop(0)
-                self.saps[sap_docker_name] = {"name": sap_docker_name , "type": "external", "net": sap_net}
+                self.saps[sap_docker_name] = {
+                    "name": sap_docker_name, "type": "external", "net": sap_net}
                 # add SAP vnf to list in the NSD so it is deployed later on
-                # each SAP gets a unique VNFD and vnf_id in the NSD and custom type (only defined in the dummygatekeeper)
+                # each SAP gets a unique VNFD and vnf_id in the NSD and custom
+                # type (only defined in the dummygatekeeper)
                 self.nsd["network_functions"].append(
                     {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_ext"})
 
-            # Each Service Access Point (connection_point) in the nsd is getting its own container (default)
+            # Each Service Access Point (connection_point) in the nsd is
+            # getting its own container (default)
             elif sap["type"] == "internal" or sap["type"] == "management":
                 # add SAP to self.vnfds
                 if SAP_VNFD is None:
-                    sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
+                    sapfile = pkg_resources.resource_filename(
+                        __name__, "sap_vnfd.yml")
                 else:
                     sapfile = SAP_VNFD
                 sap_vnfd = load_yaml(sapfile)
@@ -558,17 +590,20 @@
                 self.nsd["network_functions"].append(
                     {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_int"})
 
-            LOG.debug("Loaded SAP: name: {0}, type: {1}".format(sap_docker_name, sap['type']))
+            LOG.debug("Loaded SAP: name: {0}, type: {1}".format(
+                sap_docker_name, sap['type']))
 
         # create sap lists
-        self.saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
-        self.saps_int = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "internal"]
+        self.saps_ext = [self.saps[sap]['name']
+                         for sap in self.saps if self.saps[sap]["type"] == "external"]
+        self.saps_int = [self.saps[sap]['name']
+                         for sap in self.saps if self.saps[sap]["type"] == "internal"]
 
     def _start_sap(self, sap, instance_uuid):
         if not DEPLOY_SAP:
             return
 
-        LOG.info('start SAP: {0} ,type: {1}'.format(sap['name'],sap['type']))
+        LOG.info('start SAP: {0} ,type: {1}'.format(sap['name'], sap['type']))
         if sap["type"] == "internal":
             vnfi = None
             if not GK_STANDALONE_MODE:
@@ -593,39 +628,49 @@
         for link in eline_fwd_links:
             # check if we need to deploy this link when its a management link:
             if USE_DOCKER_MGMT:
-                if self.check_mgmt_interface(link["connection_points_reference"]):
+                if self.check_mgmt_interface(
+                        link["connection_points_reference"]):
                     continue
 
-            src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
-            dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
+            src_id, src_if_name, src_sap_id = parse_interface(
+                link["connection_points_reference"][0])
+            dst_id, dst_if_name, dst_sap_id = parse_interface(
+                link["connection_points_reference"][1])
 
             setChaining = False
             # check if there is a SAP in the link and chain everything together
             if src_sap_id in self.saps and dst_sap_id in self.saps:
-                LOG.info('2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id, dst_sap_id))
+                LOG.info(
+                    '2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id, dst_sap_id))
                 continue
 
             elif src_sap_id in self.saps_ext:
                 src_id = src_sap_id
-                # set intf name to None so the chaining function will choose the first one
+                # set intf name to None so the chaining function will choose
+                # the first one
                 src_if_name = None
                 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
                 if dst_vnfi is not None:
                     # choose first ip address in sap subnet
                     sap_net = self.saps[src_sap_id]['net']
-                    sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
-                    self._vnf_reconfigure_network(dst_vnfi, dst_if_name, sap_ip)
+                    sap_ip = "{0}/{1}".format(str(sap_net[2]),
+                                              sap_net.prefixlen)
+                    self._vnf_reconfigure_network(
+                        dst_vnfi, dst_if_name, sap_ip)
                     setChaining = True
 
             elif dst_sap_id in self.saps_ext:
                 dst_id = dst_sap_id
-                # set intf name to None so the chaining function will choose the first one
+                # set intf name to None so the chaining function will choose
+                # the first one
                 dst_if_name = None
                 src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
                 if src_vnfi is not None:
                     sap_net = self.saps[dst_sap_id]['net']
-                    sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
-                    self._vnf_reconfigure_network(src_vnfi, src_if_name, sap_ip)
+                    sap_ip = "{0}/{1}".format(str(sap_net[2]),
+                                              sap_net.prefixlen)
+                    self._vnf_reconfigure_network(
+                        src_vnfi, src_if_name, sap_ip)
                     setChaining = True
 
             # Link between 2 VNFs
@@ -635,20 +680,23 @@
                     src_id = src_sap_id
                 if dst_sap_id in self.saps_int:
                     dst_id = dst_sap_id
-                # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
+                # re-configure the VNFs IP assignment and ensure that a new
+                # subnet is used for each E-Link
                 src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
                 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
                 if src_vnfi is not None and dst_vnfi is not None:
                     eline_net = ELINE_SUBNETS.pop(0)
-                    ip1 = "{0}/{1}".format(str(eline_net[1]), eline_net.prefixlen)
-                    ip2 = "{0}/{1}".format(str(eline_net[2]), eline_net.prefixlen)
+                    ip1 = "{0}/{1}".format(str(eline_net[1]),
+                                           eline_net.prefixlen)
+                    ip2 = "{0}/{1}".format(str(eline_net[2]),
+                                           eline_net.prefixlen)
                     self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
                     self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
                     setChaining = True
 
             # Set the chaining
             if setChaining:
-                ret = GK.net.setChain(
+                GK.net.setChain(
                     src_id, dst_id,
                     vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
                     bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
@@ -656,7 +704,6 @@
                     "Setting up E-Line link. (%s:%s) -> (%s:%s)" % (
                         src_id, src_if_name, dst_id, dst_if_name))
 
-
     def _connect_elans(self, elan_fwd_links, instance_uuid):
         """
         Connect all E-LAN links in the NSD
@@ -667,17 +714,18 @@
         for link in elan_fwd_links:
             # check if we need to deploy this link when its a management link:
             if USE_DOCKER_MGMT:
-                if self.check_mgmt_interface(link["connection_points_reference"]):
+                if self.check_mgmt_interface(
+                        link["connection_points_reference"]):
                     continue
 
             elan_vnf_list = []
-            # check if an external SAP is in the E-LAN (then a subnet is already defined)
+            # check if an external SAP is in the E-LAN (then a subnet is
+            # already defined)
             intfs_elan = [intf for intf in link["connection_points_reference"]]
             lan_sap = self.check_ext_saps(intfs_elan)
             if lan_sap:
                 lan_net = self.saps[lan_sap]['net']
                 lan_hosts = list(lan_net.hosts())
-                sap_ip = str(lan_hosts.pop(0))
             else:
                 lan_net = ELAN_SUBNETS.pop(0)
                 lan_hosts = list(lan_net.hosts())
@@ -686,12 +734,15 @@
             for intf in link["connection_points_reference"]:
 
                 # skip external SAPs, they already have an ip
-                vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf)
+                vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
+                    intf)
                 if vnf_sap_docker_name in self.saps_ext:
-                    elan_vnf_list.append({'name': vnf_sap_docker_name, 'interface': vnf_interface})
+                    elan_vnf_list.append(
+                        {'name': vnf_sap_docker_name, 'interface': vnf_interface})
                     continue
 
-                ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)), lan_net.prefixlen)
+                ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
+                                              lan_net.prefixlen)
                 vnf_id, intf_name, vnf_sap_id = parse_interface(intf)
 
                 # make sure we use the correct sap vnf name
@@ -711,12 +762,12 @@
                 if vnfi is not None:
                     self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
                     # add this vnf and interface to the E-LAN for tagging
-                    elan_vnf_list.append({'name': src_docker_name, 'interface': intf_name})
+                    elan_vnf_list.append(
+                        {'name': src_docker_name, 'interface': intf_name})
 
             # install the VLAN tags for this E-LAN
             GK.net.setLAN(elan_vnf_list)
 
-
     def _load_docker_files(self):
         """
         Get all paths to Dockerfiles from VNFDs and store them in dict.
@@ -737,7 +788,8 @@
         Get all URLs to pre-build docker images in some repo.
         :return:
         """
-        # also merge sap dicts, because internal saps also need a docker container
+        # also merge sap dicts, because internal saps also need a docker
+        # container
         all_vnfs = self.vnfds.copy()
         all_vnfs.update(self.saps)
 
@@ -748,7 +800,8 @@
                     if url is not None:
                         url = url.replace("http://", "")
                         self.remote_docker_image_urls[k] = url
-                        LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
+                        LOG.debug("Found Docker image URL (%r): %r" %
+                                  (k, self.remote_docker_image_urls[k]))
 
     def _build_images_from_dockerfiles(self):
         """
@@ -757,9 +810,11 @@
         if GK_STANDALONE_MODE:
             return  # do not build anything in standalone mode
         dc = DockerClient()
-        LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
+        LOG.info("Building %d Docker images (this may take several minutes) ..." % len(
+            self.local_docker_files))
         for k, v in self.local_docker_files.iteritems():
-            for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
+            for line in dc.build(path=v.replace(
+                    "Dockerfile", ""), tag=k, rm=False, nocache=False):
                 LOG.debug("DOCKER BUILD: %s" % line)
             LOG.info("Docker image created: %s" % k)
 
@@ -769,7 +824,8 @@
         """
         dc = DockerClient()
         for url in self.remote_docker_image_urls.itervalues():
-            if not FORCE_PULL:  # only pull if not present (speedup for development)
+            # only pull if not present (speedup for development)
+            if not FORCE_PULL:
                 if len(dc.images.list(name=url)) > 0:
                     LOG.debug("Image %r present. Skipping pull." % url)
                     continue
@@ -777,16 +833,13 @@
             # this seems to fail with latest docker api version 2.0.2
             # dc.images.pull(url,
             #        insecure_registry=True)
-            #using docker cli instead
+            # using docker cli instead
             cmd = ["docker",
                    "pull",
                    url,
                    ]
             Popen(cmd).wait()
 
-
-
-
     def _check_docker_image_exists(self, image_name):
         """
         Query the docker service and check if the given image exists
@@ -814,7 +867,6 @@
             sap_dict = self.saps[sap]
             LOG.info("Placed SAP %r on DC %r" % (sap, str(sap_dict.get("dc"))))
 
-
     def _calculate_cpu_cfs_values(self, cpu_time_percentage):
         """
         Calculate cpu period and quota for CFS
@@ -828,30 +880,38 @@
         # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
         # Attention minimum cpu_quota is 1ms (micro)
         cpu_period = 1000000  # lets consider a fixed period of 1000000 microseconds for now
-        LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
-        cpu_quota = cpu_period * cpu_time_percentage  # calculate the fraction of cpu time for this container
-        # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
+        LOG.debug("cpu_period is %r, cpu_percentage is %r" %
+                  (cpu_period, cpu_time_percentage))
+        # calculate the fraction of cpu time for this container
+        cpu_quota = cpu_period * cpu_time_percentage
+        # ATTENTION >= 1000 to avoid a invalid argument system error ... no
+        # idea why
         if cpu_quota < 1000:
             LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
             cpu_quota = 1000
             LOG.warning("Increased CPU quota to avoid system error.")
-        LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
+        LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
+                  (cpu_period, cpu_quota))
         return int(cpu_period), int(cpu_quota)
 
     def check_ext_saps(self, intf_list):
         # check if the list of interfacs contains an external SAP
-        saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
+        saps_ext = [self.saps[sap]['name']
+                    for sap in self.saps if self.saps[sap]["type"] == "external"]
         for intf_name in intf_list:
-            vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf_name)
+            vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
+                intf_name)
             if vnf_sap_docker_name in saps_ext:
                 return vnf_sap_docker_name
 
     def check_mgmt_interface(self, intf_list):
-        SAPs_mgmt = [p.get('id') for p in self.nsd["connection_points"] if 'management' in p.get('type')]
+        SAPs_mgmt = [p.get('id') for p in self.nsd["connection_points"]
+                     if 'management' in p.get('type')]
         for intf_name in intf_list:
             if intf_name in SAPs_mgmt:
                 return True
 
+
 """
 Some (simple) placement algorithms
 """
@@ -861,6 +921,7 @@
     """
     Placement: Always use one and the same data center from the GK.dcs dict.
     """
+
     def place(self, nsd, vnfds, saps, dcs):
         for id, vnfd in vnfds.iteritems():
             vnfd["dc"] = list(dcs.itervalues())[0]
@@ -870,6 +931,7 @@
     """
     Placement: Distribute VNFs across all available DCs in a round robin fashion.
     """
+
     def place(self, nsd, vnfds, saps, dcs):
         c = 0
         dcs_list = list(dcs.itervalues())
@@ -877,11 +939,13 @@
             vnfd["dc"] = dcs_list[c % len(dcs_list)]
             c += 1  # inc. c to use next DC
 
+
 class RoundRobinDcPlacementWithSAPs(object):
     """
     Placement: Distribute VNFs across all available DCs in a round robin fashion,
     every SAP is instantiated on the same DC as the connected VNF.
     """
+
     def place(self, nsd, vnfds, saps, dcs):
 
         # place vnfs
@@ -893,13 +957,18 @@
 
         # place SAPs
         vlinks = nsd.get("virtual_links", [])
-        eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
-        elan_fwd_links = [l for l in vlinks if  (l["connectivity_type"] == "E-LAN")]
+        eline_fwd_links = [l for l in vlinks if (
+            l["connectivity_type"] == "E-Line")]
+        elan_fwd_links = [l for l in vlinks if (
+            l["connectivity_type"] == "E-LAN")]
 
-        # SAPs on E-Line links are placed on the same DC as the VNF on the E-Line
+        # SAPs on E-Line links are placed on the same DC as the VNF on the
+        # E-Line
         for link in eline_fwd_links:
-            src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
-            dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
+            src_id, src_if_name, src_sap_id = parse_interface(
+                link["connection_points_reference"][0])
+            dst_id, dst_if_name, dst_sap_id = parse_interface(
+                link["connection_points_reference"][1])
 
             # check if there is a SAP in the link
             if src_sap_id in saps:
@@ -920,11 +989,10 @@
                 # find SAP interfaces
                 intf_id, intf_name, intf_sap_id = parse_interface(intf)
                 if intf_sap_id in saps:
-                    dc = dcs_list[randint(0, dc_len-1)]
+                    dc = dcs_list[randint(0, dc_len - 1)]
                     saps[intf_sap_id]['dc'] = dc
 
 
-
 """
 Resource definitions and API endpoints
 """
@@ -950,7 +1018,8 @@
             elif len(request.data) > 0:
                 son_file = request.data
             else:
-                return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
+                return {"service_uuid": None, "size": 0, "sha1": None,
+                        "error": "upload failed. file not found."}, 500
             # generate a uuid to reference this package
             service_uuid = str(uuid.uuid4())
             file_hash = hashlib.sha1(str(son_file)).hexdigest()
@@ -969,11 +1038,14 @@
             if AUTO_DELETE:
                 service_list = copy.copy(GK.services)
                 for service_uuid in service_list:
-                    instances_list = copy.copy(GK.services[service_uuid].instances)
+                    instances_list = copy.copy(
+                        GK.services[service_uuid].instances)
                     for instance_uuid in instances_list:
                         # valid service and instance UUID, stop service
-                        GK.services.get(service_uuid).stop_service(instance_uuid)
-                        LOG.info("service instance with uuid %r stopped." % instance_uuid)
+                        GK.services.get(service_uuid).stop_service(
+                            instance_uuid)
+                        LOG.info("service instance with uuid %r stopped." %
+                                 instance_uuid)
 
             # create a service object and register it
             s = Service(service_uuid, file_hash, upload_path)
@@ -983,13 +1055,15 @@
             if AUTO_DEPLOY:
                 # ok, we have a service uuid, lets start the service
                 reset_subnets()
-                service_instance_uuid = GK.services.get(service_uuid).start_service()
+                GK.services.get(service_uuid).start_service()
 
             # generate the JSON result
-            return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
-        except Exception as ex:
+            return {"service_uuid": service_uuid, "size": size,
+                    "sha1": file_hash, "error": None}, 201
+        except BaseException:
             LOG.exception("Service package upload failed:")
-            return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
+            return {"service_uuid": None, "size": 0,
+                    "sha1": None, "error": "upload failed"}, 500
 
     def get(self):
         """
@@ -1014,12 +1088,15 @@
         service_uuid = json_data.get("service_uuid")
 
         # lets be a bit fuzzy here to make testing easier
-        if (service_uuid is None or service_uuid=="latest") and len(GK.services) > 0:
-            # if we don't get a service uuid, we simple start the first service in the list
+        if (service_uuid is None or service_uuid ==
+                "latest") and len(GK.services) > 0:
+            # if we don't get a service uuid, we simple start the first service
+            # in the list
             service_uuid = list(GK.services.iterkeys())[0]
         if service_uuid in GK.services:
             # ok, we have a service uuid, lets start the service
-            service_instance_uuid = GK.services.get(service_uuid).start_service()
+            service_instance_uuid = GK.services.get(
+                service_uuid).start_service()
             return {"service_instance_uuid": service_instance_uuid}, 201
         return "Service not found", 404
 
@@ -1043,17 +1120,21 @@
 
         # try to be fuzzy
         if service_uuid is None and len(GK.services) > 0:
-            #if we don't get a service uuid, we simply stop the last service in the list
+            # if we don't get a service uuid, we simply stop the last service
+            # in the list
             service_uuid = list(GK.services.iterkeys())[0]
-        if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
-            instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
+        if instance_uuid is None and len(
+                GK.services[service_uuid].instances) > 0:
+            instance_uuid = list(
+                GK.services[service_uuid].instances.iterkeys())[0]
 
         if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
             # valid service and instance UUID, stop service
             GK.services.get(service_uuid).stop_service(instance_uuid)
-            return "service instance with uuid %r stopped." % instance_uuid,200
+            return "service instance with uuid %r stopped." % instance_uuid, 200
         return "Service not found", 404
 
+
 class Exit(fr.Resource):
 
     def put(self):
@@ -1068,7 +1149,6 @@
     GK = Gatekeeper()
 
 
-
 # create a single, global GK object
 GK = None
 initialize_GK()
@@ -1078,11 +1158,11 @@
 api = fr.Api(app)
 # define endpoints
 api.add_resource(Packages, '/packages', '/api/v2/packages')
-api.add_resource(Instantiations, '/instantiations', '/api/v2/instantiations', '/api/v2/requests')
+api.add_resource(Instantiations, '/instantiations',
+                 '/api/v2/instantiations', '/api/v2/requests')
 api.add_resource(Exit, '/emulator/exit')
 
 
-
 def start_rest_api(host, port, datacenters=dict()):
     GK.dcs = datacenters
     GK.net = get_dc_network()
@@ -1104,7 +1184,7 @@
         try:
             r = yaml.load(f)
         except yaml.YAMLError as exc:
-            LOG.exception("YAML parse error")
+            LOG.exception("YAML parse error: %r" % str(exc))
             r = dict()
     return r
 
@@ -1144,6 +1224,7 @@
 
     return vnf_id, vnf_interface, vnf_sap_docker_name
 
+
 def reset_subnets():
     # private subnet definitions for the generated interfaces
     # 10.10.xxx.0/24
@@ -1156,6 +1237,7 @@
     global ELINE_SUBNETS
     ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
 
+
 if __name__ == '__main__':
     """
     Lets allow to run the API in standalone mode.
@@ -1163,4 +1245,3 @@
     GK_STANDALONE_MODE = True
     logging.getLogger("werkzeug").setLevel(logging.INFO)
     start_rest_api("0.0.0.0", 8000)
-