Refactoring: Made complete codebase PEP8 compatible. 49/6149/3 v4.0.0
authorpeusterm <manuel.peuster@uni-paderborn.de>
Tue, 15 May 2018 15:10:27 +0000 (17:10 +0200)
committerpeusterm <manuel.peuster@uni-paderborn.de>
Tue, 15 May 2018 15:45:37 +0000 (17:45 +0200)
Only PEP8 rule E501 (line too long) is not yet reflected
by this change.

The patch also adds automated code style checks to the
CI test stage using flake8.
It will let the tests fail if there is a code style
violation.

Change-Id: I90956dd424a46691546ef720351757d3c43451a7
Signed-off-by: peusterm <manuel.peuster@uni-paderborn.de>
79 files changed:
devops-stages/stage-archive.sh
devops-stages/stage-build.sh
devops-stages/stage-test.sh
setup-cli.py
setup.py
src/emuvim/__init__.py
src/emuvim/api/__init__.py
src/emuvim/api/openstack/__init__.py
src/emuvim/api/openstack/chain_api.py
src/emuvim/api/openstack/compute.py
src/emuvim/api/openstack/docker_util.py
src/emuvim/api/openstack/heat_parser.py
src/emuvim/api/openstack/helper.py
src/emuvim/api/openstack/ip_handler.py
src/emuvim/api/openstack/manage.py
src/emuvim/api/openstack/openstack_api_endpoint.py
src/emuvim/api/openstack/openstack_dummies/__init__.py
src/emuvim/api/openstack/openstack_dummies/base_openstack_dummy.py
src/emuvim/api/openstack/openstack_dummies/glance_dummy_api.py
src/emuvim/api/openstack/openstack_dummies/heat_dummy_api.py
src/emuvim/api/openstack/openstack_dummies/keystone_dummy_api.py
src/emuvim/api/openstack/openstack_dummies/neutron_dummy_api.py
src/emuvim/api/openstack/openstack_dummies/neutron_sfc_dummy_api.py
src/emuvim/api/openstack/openstack_dummies/nova_dummy_api.py
src/emuvim/api/openstack/resources/__init__.py
src/emuvim/api/openstack/resources/flow_classifier.py
src/emuvim/api/openstack/resources/image.py
src/emuvim/api/openstack/resources/instance_flavor.py
src/emuvim/api/openstack/resources/load_balancer.py
src/emuvim/api/openstack/resources/model.py
src/emuvim/api/openstack/resources/net.py
src/emuvim/api/openstack/resources/port.py
src/emuvim/api/openstack/resources/port_chain.py
src/emuvim/api/openstack/resources/port_pair.py
src/emuvim/api/openstack/resources/port_pair_group.py
src/emuvim/api/openstack/resources/resource.py
src/emuvim/api/openstack/resources/router.py
src/emuvim/api/openstack/resources/server.py
src/emuvim/api/openstack/resources/stack.py
src/emuvim/api/openstack/resources/template.py
src/emuvim/api/rest/__init__.py
src/emuvim/api/rest/compute.py
src/emuvim/api/rest/monitor.py
src/emuvim/api/rest/network.py
src/emuvim/api/rest/rest_api_endpoint.py
src/emuvim/api/sonata/__init__.py
src/emuvim/api/sonata/dummygatekeeper.py
src/emuvim/cli/__init__.py
src/emuvim/cli/prometheus.py
src/emuvim/cli/rest/__init__.py
src/emuvim/cli/rest/compute.py
src/emuvim/cli/rest/datacenter.py
src/emuvim/cli/rest/monitor.py
src/emuvim/cli/rest/network.py
src/emuvim/cli/son_emu_cli.py
src/emuvim/dashboard/__init__.py
src/emuvim/dcemulator/__init__.py
src/emuvim/dcemulator/link.py
src/emuvim/dcemulator/monitoring.py
src/emuvim/dcemulator/net.py
src/emuvim/dcemulator/node.py
src/emuvim/dcemulator/resourcemodel/__init__.py
src/emuvim/dcemulator/resourcemodel/upb/__init__.py
src/emuvim/dcemulator/resourcemodel/upb/simple.py
src/emuvim/dcemulator/son_emu_simple_switch_13.py
src/emuvim/examples/default_single_dc_topology.py
src/emuvim/examples/openstack_single_dc.py
src/emuvim/examples/osm_default_daemon_topology_2_pop.py
src/emuvim/test/__init__.py
src/emuvim/test/api_base.py
src/emuvim/test/api_base_openstack.py
src/emuvim/test/base.py
src/emuvim/test/integrationtests/__init__.py
src/emuvim/test/unittests/__init__.py
src/emuvim/test/unittests/test_emulator.py
src/emuvim/test/unittests/test_openstack.py
src/emuvim/test/unittests/test_resourcemodel.py
src/emuvim/test/unittests/test_restapi.py
src/emuvim/test/unittests/test_sonata_dummy_gatekeeper.py

index 4c9c64b..7bda67b 100755 (executable)
@@ -1,2 +1,27 @@
 #!/bin/bash
 #!/bin/bash
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 echo "vim-emu stage archive: Not yet implemented!"
 echo "vim-emu stage archive: Not yet implemented!"
index 4a355e4..ef0f548 100755 (executable)
@@ -1,2 +1,27 @@
 #!/bin/bash
 #!/bin/bash
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 echo "vim-emu stage build: Skipped. Installation was already done during container build procedure."
 echo "vim-emu stage build: Skipped. Installation was already done during container build procedure."
index e904dda..10dbe02 100755 (executable)
@@ -1,4 +1,29 @@
 #!/bin/bash
 #!/bin/bash
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 #
 # Runs the unittests of "vim-emu". Script needs to be executed inside
 # the vim-emu Docker container by user "root". It requires the container
 #
 # Runs the unittests of "vim-emu". Script needs to be executed inside
 # the vim-emu Docker container by user "root". It requires the container
@@ -16,4 +41,7 @@ echo "Tests executed by user: $(whoami)"
 # trigger the tests
 cd /son-emu/
 py.test -v src/emuvim/test/unittests
 # trigger the tests
 cd /son-emu/
 py.test -v src/emuvim/test/unittests
-
+# trigger pep8 style check
+echo "Doing flake8 style check ..."
+flake8 --exclude=.eggs,devops --ignore=E501 .
+echo "done."
index f1b613d..b6586b3 100755 (executable)
@@ -1,33 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-Additional setup that installs 'son-emu-cli' in standalone mode.
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from setuptools import setup, find_packages
 
 setup(name='vimemucli',
 from setuptools import setup, find_packages
 
 setup(name='vimemucli',
@@ -54,4 +49,4 @@ setup(name='vimemucli',
       },
       setup_requires=['pytest-runner'],
       tests_require=['pytest'],
       },
       setup_requires=['pytest-runner'],
       tests_require=['pytest'],
-)
+      )
index 94f632f..090063a 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from setuptools import setup, find_packages
 
 setup(name='emuvim',
 from setuptools import setup, find_packages
 
 setup(name='emuvim',
@@ -58,7 +56,8 @@ setup(name='emuvim',
           'prometheus_client',
           'ipaddress',
           'simplejson',
           'prometheus_client',
           'ipaddress',
           'simplejson',
-          'gevent'
+          'gevent',
+          'flake8'
       ],
       zip_safe=False,
       entry_points={
       ],
       zip_safe=False,
       entry_points={
index 7e60065..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 395c0ce..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 9979159..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 20258bb..47af63c 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import json
 import logging
 import copy
 import json
 import logging
 import copy
@@ -34,8 +32,6 @@ from mininet.node import OVSSwitch
 from flask import Flask
 from flask import Response, request
 from flask_restful import Api, Resource
 from flask import Flask
 from flask import Response, request
 from flask_restful import Api, Resource
-from mininet.link import Link
-import uuid
 
 
 class ChainApi(Resource):
 
 
 class ChainApi(Resource):
@@ -77,7 +73,8 @@ class ChainApi(Resource):
             return response
 
     def _start_flask(self):
             return response
 
     def _start_flask(self):
-        logging.info("Starting %s endpoint @ http://%s:%d" % ("ChainDummyApi", self.ip, self.port))
+        logging.info("Starting %s endpoint @ http://%s:%d" %
+                     ("ChainDummyApi", self.ip, self.port))
         if self.app is not None:
             self.app.before_request(self.dump_playbook)
             self.app.run(self.ip, self.port, debug=True, use_reloader=False)
         if self.app is not None:
             self.app.before_request(self.dump_playbook)
             self.app.run(self.ip, self.port, debug=True, use_reloader=False)
@@ -88,8 +85,8 @@ class ChainApi(Resource):
                 if len(request.data) > 0:
                     data = "# CHAIN API\n"
                     data += "curl -X {type} -H \"Content-type: application/json\" -d '{data}' {url}".format(type=request.method,
                 if len(request.data) > 0:
                     data = "# CHAIN API\n"
                     data += "curl -X {type} -H \"Content-type: application/json\" -d '{data}' {url}".format(type=request.method,
-                                                                                            data=request.data,
-                                                                                            url=request.url)
+                                                                                                            data=request.data,
+                                                                                                            url=request.url)
                     logfile.write(data + "\n")
 
 
                     logfile.write(data + "\n")
 
 
@@ -139,7 +136,8 @@ class ChainVersionsList(Resource):
             return Response(resp, status=200, mimetype="application/json")
 
         except Exception as ex:
             return Response(resp, status=200, mimetype="application/json")
 
         except Exception as ex:
-            logging.exception(u"%s: Could not show list of versions." % __name__)
+            logging.exception(
+                u"%s: Could not show list of versions." % __name__)
             return ex.message, 500
 
 
             return ex.message, 500
 
 
@@ -162,10 +160,12 @@ class ChainList(Resource):
             for chain in self.api.manage.full_chain_data.values():
                 resp["chains"].append(chain)
 
             for chain in self.api.manage.full_chain_data.values():
                 resp["chains"].append(chain)
 
-            return Response(json.dumps(resp), status=200, mimetype="application/json")
+            return Response(json.dumps(resp), status=200,
+                            mimetype="application/json")
 
         except Exception as ex:
 
         except Exception as ex:
-            logging.exception(u"%s: Could not list all network chains." % __name__)
+            logging.exception(
+                u"%s: Could not list all network chains." % __name__)
             return ex.message, 500
 
 
             return ex.message, 500
 
 
@@ -188,10 +188,12 @@ class BalanceHostList(Resource):
             for lb in self.api.manage.full_lb_data.values():
                 resp["loadbalancers"].append(lb)
 
             for lb in self.api.manage.full_lb_data.values():
                 resp["loadbalancers"].append(lb)
 
-            return Response(json.dumps(resp), status=200, mimetype="application/json")
+            return Response(json.dumps(resp), status=200,
+                            mimetype="application/json")
 
         except Exception as ex:
 
         except Exception as ex:
-            logging.exception(u"%s: Could not list all live loadbalancers." % __name__)
+            logging.exception(
+                u"%s: Could not list all live loadbalancers." % __name__)
             return ex.message, 500
 
 
             return ex.message, 500
 
 
@@ -271,11 +273,14 @@ class ChainVnfInterfaces(Resource):
                                                           vnf_dst_interface=dst_intfs, bidirectional=True,
                                                           path=path, layer2=layer2)
             resp = {'cookie': cookie}
                                                           vnf_dst_interface=dst_intfs, bidirectional=True,
                                                           path=path, layer2=layer2)
             resp = {'cookie': cookie}
-            return Response(json.dumps(resp), status=200, mimetype="application/json")
+            return Response(json.dumps(resp), status=200,
+                            mimetype="application/json")
 
         except Exception as e:
 
         except Exception as e:
-            logging.exception(u"%s: Error setting up the chain.\n %s" % (__name__, e))
-            return Response(u"Error setting up the chain", status=500, mimetype="application/json")
+            logging.exception(
+                u"%s: Error setting up the chain.\n %s" % (__name__, e))
+            return Response(u"Error setting up the chain",
+                            status=500, mimetype="application/json")
 
     def delete(self, src_vnf, src_intfs, dst_vnf, dst_intfs):
         """
 
     def delete(self, src_vnf, src_intfs, dst_vnf, dst_intfs):
         """
@@ -307,10 +312,13 @@ class ChainVnfInterfaces(Resource):
         try:
             cookie = self.api.manage.network_action_stop(src_vnf, dst_vnf, vnf_src_interface=src_intfs,
                                                          vnf_dst_interface=dst_intfs, bidirectional=True)
         try:
             cookie = self.api.manage.network_action_stop(src_vnf, dst_vnf, vnf_src_interface=src_intfs,
                                                          vnf_dst_interface=dst_intfs, bidirectional=True)
-            return Response(json.dumps(cookie), status=200, mimetype="application/json")
+            return Response(json.dumps(cookie), status=200,
+                            mimetype="application/json")
         except Exception as e:
         except Exception as e:
-            logging.exception(u"%s: Error deleting the chain.\n %s" % (__name__, e))
-            return Response(u"Error deleting the chain", status=500, mimetype="application/json")
+            logging.exception(
+                u"%s: Error deleting the chain.\n %s" % (__name__, e))
+            return Response(u"Error deleting the chain",
+                            status=500, mimetype="application/json")
 
 
 class ChainVnfDcStackInterfaces(Resource):
 
 
 class ChainVnfDcStackInterfaces(Resource):
@@ -322,7 +330,8 @@ class ChainVnfDcStackInterfaces(Resource):
     def __init__(self, api):
         self.api = api
 
     def __init__(self, api):
         self.api = api
 
-    def put(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+    def put(self, src_dc, src_stack, src_vnf, src_intfs,
+            dst_dc, dst_stack, dst_vnf, dst_intfs):
         """
         A PUT request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
         will set up chain.
         """
         A PUT request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
         will set up chain.
@@ -352,18 +361,21 @@ class ChainVnfDcStackInterfaces(Resource):
 
         """
         # search for real names
 
         """
         # search for real names
-        real_names = self._findNames(src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
-        if type(real_names) is not tuple:
+        real_names = self._findNames(
+            src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
+        if not isinstance(real_names, tuple):
             # something went wrong
             return real_names
 
         container_src, container_dst, interface_src, interface_dst = real_names
 
         # check if both VNFs exist
             # something went wrong
             return real_names
 
         container_src, container_dst, interface_src, interface_dst = real_names
 
         # check if both VNFs exist
-        if not self.api.manage.check_vnf_intf_pair(container_src, interface_src):
+        if not self.api.manage.check_vnf_intf_pair(
+                container_src, interface_src):
             return Response(u"VNF %s or intfs %s does not exist" % (container_src, interface_src), status=501,
                             mimetype="application/json")
             return Response(u"VNF %s or intfs %s does not exist" % (container_src, interface_src), status=501,
                             mimetype="application/json")
-        if not self.api.manage.check_vnf_intf_pair(container_dst, interface_dst):
+        if not self.api.manage.check_vnf_intf_pair(
+                container_dst, interface_dst):
             return Response(u"VNF %s or intfs %s does not exist" % (container_dst, interface_dst), status=501,
                             mimetype="application/json")
 
             return Response(u"VNF %s or intfs %s does not exist" % (container_dst, interface_dst), status=501,
                             mimetype="application/json")
 
@@ -372,13 +384,17 @@ class ChainVnfDcStackInterfaces(Resource):
                                                           vnf_dst_interface=interface_dst, bidirectional=True,
                                                           layer2=True)
             resp = {'cookie': cookie}
                                                           vnf_dst_interface=interface_dst, bidirectional=True,
                                                           layer2=True)
             resp = {'cookie': cookie}
-            return Response(json.dumps(resp), status=200, mimetype="application/json")
+            return Response(json.dumps(resp), status=200,
+                            mimetype="application/json")
 
         except Exception as e:
 
         except Exception as e:
-            logging.exception(u"%s: Error setting up the chain.\n %s" % (__name__, e))
-            return Response(u"Error setting up the chain", status=500, mimetype="application/json")
+            logging.exception(
+                u"%s: Error setting up the chain.\n %s" % (__name__, e))
+            return Response(u"Error setting up the chain",
+                            status=500, mimetype="application/json")
 
 
-    def post(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+    def post(self, src_dc, src_stack, src_vnf, src_intfs,
+             dst_dc, dst_stack, dst_vnf, dst_intfs):
         """
          A post request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
          will create a chain between two interfaces at the specified vnfs.
         """
          A post request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
          will create a chain between two interfaces at the specified vnfs.
@@ -410,8 +426,9 @@ class ChainVnfDcStackInterfaces(Resource):
             layer2 = True
 
         # search for real names
             layer2 = True
 
         # search for real names
-        real_names = self._findNames(src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
-        if type(real_names) is not tuple:
+        real_names = self._findNames(
+            src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
+        if not isinstance(real_names, tuple):
             # something went wrong
             return real_names
 
             # something went wrong
             return real_names
 
@@ -422,13 +439,17 @@ class ChainVnfDcStackInterfaces(Resource):
                                                           vnf_dst_interface=interface_dst, bidirectional=True,
                                                           path=path, layer2=layer2)
             resp = {'cookie': cookie}
                                                           vnf_dst_interface=interface_dst, bidirectional=True,
                                                           path=path, layer2=layer2)
             resp = {'cookie': cookie}
-            return Response(json.dumps(resp), status=200, mimetype="application/json")
+            return Response(json.dumps(resp), status=200,
+                            mimetype="application/json")
 
         except Exception as e:
 
         except Exception as e:
-            logging.exception(u"%s: Error setting up the chain.\n %s" % (__name__, e))
-            return Response(u"Error setting up the chain", status=500, mimetype="application/json")
+            logging.exception(
+                u"%s: Error setting up the chain.\n %s" % (__name__, e))
+            return Response(u"Error setting up the chain",
+                            status=500, mimetype="application/json")
 
 
-    def delete(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+    def delete(self, src_dc, src_stack, src_vnf, src_intfs,
+               dst_dc, dst_stack, dst_vnf, dst_intfs):
         """
         A DELETE request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
         will delete a previously created chain.
         """
         A DELETE request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
         will delete a previously created chain.
@@ -456,8 +477,9 @@ class ChainVnfDcStackInterfaces(Resource):
 
         """
         # search for real names
 
         """
         # search for real names
-        real_names = self._findNames(src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
-        if type(real_names) is not tuple:
+        real_names = self._findNames(
+            src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
+        if not isinstance(real_names, tuple):
             # something went wrong, real_names is a Response object
             return real_names
 
             # something went wrong, real_names is a Response object
             return real_names
 
@@ -466,17 +488,22 @@ class ChainVnfDcStackInterfaces(Resource):
         try:
             cookie = self.api.manage.network_action_stop(container_src, container_dst, vnf_src_interface=interface_src,
                                                          vnf_dst_interface=interface_dst, bidirectional=True)
         try:
             cookie = self.api.manage.network_action_stop(container_src, container_dst, vnf_src_interface=interface_src,
                                                          vnf_dst_interface=interface_dst, bidirectional=True)
-            return Response(json.dumps(cookie), status=200, mimetype="application/json")
+            return Response(json.dumps(cookie), status=200,
+                            mimetype="application/json")
         except Exception as e:
         except Exception as e:
-            logging.exception(u"%s: Error deleting the chain.\n %s" % (__name__, e))
-            return Response(u"Error deleting the chain", status=500, mimetype="application/json")
+            logging.exception(
+                u"%s: Error deleting the chain.\n %s" % (__name__, e))
+            return Response(u"Error deleting the chain",
+                            status=500, mimetype="application/json")
 
     # Tries to find real container and interface names according to heat template names
     # Returns a tuple of 4 or a Response object
 
     # Tries to find real container and interface names according to heat template names
     # Returns a tuple of 4 or a Response object
-    def _findNames(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+    def _findNames(self, src_dc, src_stack, src_vnf, src_intfs,
+                   dst_dc, dst_stack, dst_vnf, dst_intfs):
         # search for datacenters
         if src_dc not in self.api.manage.net.dcs or dst_dc not in self.api.manage.net.dcs:
         # search for datacenters
         if src_dc not in self.api.manage.net.dcs or dst_dc not in self.api.manage.net.dcs:
-            return Response(u"At least one DC does not exist", status=500, mimetype="application/json")
+            return Response(u"At least one DC does not exist",
+                            status=500, mimetype="application/json")
         dc_src = self.api.manage.net.dcs[src_dc]
         dc_dst = self.api.manage.net.dcs[dst_dc]
         # search for related OpenStackAPIs
         dc_src = self.api.manage.net.dcs[src_dc]
         dc_dst = self.api.manage.net.dcs[dst_dc]
         # search for related OpenStackAPIs
@@ -489,7 +516,8 @@ class ChainVnfDcStackInterfaces(Resource):
             if api.compute.dc == dc_dst:
                 api_dst = api
         if api_src is None or api_dst is None:
             if api.compute.dc == dc_dst:
                 api_dst = api
         if api_src is None or api_dst is None:
-            return Response(u"At least one OpenStackAPI does not exist", status=500, mimetype="application/json")
+            return Response(u"At least one OpenStackAPI does not exist",
+                            status=500, mimetype="application/json")
         # search for stacks
         stack_src = None
         stack_dst = None
         # search for stacks
         stack_src = None
         stack_dst = None
@@ -500,7 +528,8 @@ class ChainVnfDcStackInterfaces(Resource):
             if stack.stack_name == dst_stack:
                 stack_dst = stack
         if stack_src is None or stack_dst is None:
             if stack.stack_name == dst_stack:
                 stack_dst = stack
         if stack_src is None or stack_dst is None:
-            return Response(u"At least one Stack does not exist", status=500, mimetype="application/json")
+            return Response(u"At least one Stack does not exist",
+                            status=500, mimetype="application/json")
         # search for servers
         server_src = None
         server_dst = None
         # search for servers
         server_src = None
         server_dst = None
@@ -513,7 +542,8 @@ class ChainVnfDcStackInterfaces(Resource):
                 server_dst = server
                 break
         if server_src is None or server_dst is None:
                 server_dst = server
                 break
         if server_src is None or server_dst is None:
-            return Response(u"At least one VNF does not exist", status=500, mimetype="application/json")
+            return Response(u"At least one VNF does not exist",
+                            status=500, mimetype="application/json")
 
         container_src = server_src.name
         container_dst = server_dst.name
 
         container_src = server_src.name
         container_dst = server_dst.name
@@ -526,7 +556,8 @@ class ChainVnfDcStackInterfaces(Resource):
         if dst_intfs in server_dst.port_names:
             port_dst = stack_dst.ports[dst_intfs]
         if port_src is None or port_dst is None:
         if dst_intfs in server_dst.port_names:
             port_dst = stack_dst.ports[dst_intfs]
         if port_src is None or port_dst is None:
-            return Response(u"At least one Port does not exist", status=500, mimetype="application/json")
+            return Response(u"At least one Port does not exist",
+                            status=500, mimetype="application/json")
 
         interface_src = port_src.intf_name
         interface_dst = port_dst.intf_name
 
         interface_src = port_src.intf_name
         interface_dst = port_dst.intf_name
@@ -576,8 +607,9 @@ class BalanceHostDcStack(Resource):
 
             # check src vnf/port
             if src_stack != "floating":
 
             # check src vnf/port
             if src_stack != "floating":
-                real_src = self._findName(src_dc, src_stack, vnf_src_name, vnf_src_interface)
-                if type(real_src) is not tuple:
+                real_src = self._findName(
+                    src_dc, src_stack, vnf_src_name, vnf_src_interface)
+                if not isinstance(real_src, tuple):
                     # something went wrong, real_src is a Response object
                     return real_src
 
                     # something went wrong, real_src is a Response object
                     return real_src
 
@@ -590,20 +622,24 @@ class BalanceHostDcStack(Resource):
                 dst_server = dst_vnf.get('server', None)
                 dst_port = dst_vnf.get('port', None)
                 if dst_dc is not None and dst_stack is not None and dst_server is not None and dst_port is not None:
                 dst_server = dst_vnf.get('server', None)
                 dst_port = dst_vnf.get('port', None)
                 if dst_dc is not None and dst_stack is not None and dst_server is not None and dst_port is not None:
-                    real_dst = self._findName(dst_dc, dst_stack, dst_server, dst_port)
-                    if type(real_dst) is not tuple:
+                    real_dst = self._findName(
+                        dst_dc, dst_stack, dst_server, dst_port)
+                    if not isinstance(real_dst, tuple):
                         # something went wrong, real_dst is a Response object
                         return real_dst
                     real_dst_dict[real_dst[0]] = real_dst[1]
 
                         # something went wrong, real_dst is a Response object
                         return real_dst
                     real_dst_dict[real_dst[0]] = real_dst[1]
 
-            input_object = {"dst_vnf_interfaces": real_dst_dict, "path": req.get("path", None)}
+            input_object = {"dst_vnf_interfaces": real_dst_dict,
+                            "path": req.get("path", None)}
 
             if src_stack != "floating":
 
             if src_stack != "floating":
-                self.api.manage.add_loadbalancer(container_src, interface_src, lb_data=input_object)
+                self.api.manage.add_loadbalancer(
+                    container_src, interface_src, lb_data=input_object)
                 return Response(u"Loadbalancer set up at %s:%s" % (container_src, interface_src),
                                 status=200, mimetype="application/json")
             else:
                 return Response(u"Loadbalancer set up at %s:%s" % (container_src, interface_src),
                                 status=200, mimetype="application/json")
             else:
-                cookie, floating_ip = self.api.manage.add_floating_lb(src_dc, lb_data=input_object)
+                cookie, floating_ip = self.api.manage.add_floating_lb(
+                    src_dc, lb_data=input_object)
 
                 return Response(json.dumps({"cookie": "%d" % cookie, "floating_ip": "%s" % floating_ip}),
                                 status=200, mimetype="application/json")
 
                 return Response(json.dumps({"cookie": "%d" % cookie, "floating_ip": "%s" % floating_ip}),
                                 status=200, mimetype="application/json")
@@ -612,7 +648,8 @@ class BalanceHostDcStack(Resource):
             logging.exception(u"%s: Error setting up the loadbalancer at %s %s %s:%s.\n %s" %
                               (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface, e))
             return Response(u"%s: Error setting up the loadbalancer at %s %s %s:%s.\n %s" %
             logging.exception(u"%s: Error setting up the loadbalancer at %s %s %s:%s.\n %s" %
                               (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface, e))
             return Response(u"%s: Error setting up the loadbalancer at %s %s %s:%s.\n %s" %
-                            (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface, e), status=500,
+                            (__name__, src_dc, src_stack, vnf_src_name,
+                             vnf_src_interface, e), status=500,
                             mimetype="application/json")
 
     def delete(self, src_dc, src_stack, vnf_src_name, vnf_src_interface):
                             mimetype="application/json")
 
     def delete(self, src_dc, src_stack, vnf_src_name, vnf_src_interface):
@@ -634,14 +671,16 @@ class BalanceHostDcStack(Resource):
         try:
             # check src vnf/port
             if src_stack != "floating":
         try:
             # check src vnf/port
             if src_stack != "floating":
-                real_src = self._findName(src_dc, src_stack, vnf_src_name, vnf_src_interface)
-                if type(real_src) is not tuple:
+                real_src = self._findName(
+                    src_dc, src_stack, vnf_src_name, vnf_src_interface)
+                if not isinstance(real_src, tuple):
                     # something went wrong, real_src is a Response object
                     return real_src
 
                 container_src, interface_src = real_src
 
                     # something went wrong, real_src is a Response object
                     return real_src
 
                 container_src, interface_src = real_src
 
-                self.api.manage.delete_loadbalancer(container_src, interface_src)
+                self.api.manage.delete_loadbalancer(
+                    container_src, interface_src)
                 return Response(u"Loadbalancer deleted at %s:%s" % (vnf_src_name, vnf_src_interface),
                                 status=200, mimetype="application/json")
             else:
                 return Response(u"Loadbalancer deleted at %s:%s" % (vnf_src_name, vnf_src_interface),
                                 status=200, mimetype="application/json")
             else:
@@ -654,7 +693,8 @@ class BalanceHostDcStack(Resource):
             logging.exception(u"%s: Error deleting the loadbalancer at %s %s %s%s.\n %s" %
                               (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface, e))
             return Response(u"%s: Error deleting the loadbalancer at %s %s %s%s." %
             logging.exception(u"%s: Error deleting the loadbalancer at %s %s %s%s.\n %s" %
                               (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface, e))
             return Response(u"%s: Error deleting the loadbalancer at %s %s %s%s." %
-                            (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface), status=500,
+                            (__name__, src_dc, src_stack, vnf_src_name,
+                             vnf_src_interface), status=500,
                             mimetype="application/json")
 
     # Tries to find real container and port name according to heat template names
                             mimetype="application/json")
 
     # Tries to find real container and port name according to heat template names
@@ -662,7 +702,8 @@ class BalanceHostDcStack(Resource):
     def _findName(self, dc, stack, vnf, port):
         # search for datacenters
         if dc not in self.api.manage.net.dcs:
     def _findName(self, dc, stack, vnf, port):
         # search for datacenters
         if dc not in self.api.manage.net.dcs:
-            return Response(u"DC does not exist", status=500, mimetype="application/json")
+            return Response(u"DC does not exist", status=500,
+                            mimetype="application/json")
         dc_real = self.api.manage.net.dcs[dc]
         # search for related OpenStackAPIs
         api_real = None
         dc_real = self.api.manage.net.dcs[dc]
         # search for related OpenStackAPIs
         api_real = None
@@ -671,14 +712,16 @@ class BalanceHostDcStack(Resource):
             if api.compute.dc == dc_real:
                 api_real = api
         if api_real is None:
             if api.compute.dc == dc_real:
                 api_real = api
         if api_real is None:
-            return Response(u"OpenStackAPI does not exist", status=500, mimetype="application/json")
+            return Response(u"OpenStackAPI does not exist",
+                            status=500, mimetype="application/json")
         # search for stacks
         stack_real = None
         for stackObj in api_real.compute.stacks.values():
             if stackObj.stack_name == stack:
                 stack_real = stackObj
         if stack_real is None:
         # search for stacks
         stack_real = None
         for stackObj in api_real.compute.stacks.values():
             if stackObj.stack_name == stack:
                 stack_real = stackObj
         if stack_real is None:
-            return Response(u"Stack does not exist", status=500, mimetype="application/json")
+            return Response(u"Stack does not exist", status=500,
+                            mimetype="application/json")
         # search for servers
         server_real = None
         for server in stack_real.servers.values():
         # search for servers
         server_real = None
         for server in stack_real.servers.values():
@@ -686,7 +729,8 @@ class BalanceHostDcStack(Resource):
                 server_real = server
                 break
         if server_real is None:
                 server_real = server
                 break
         if server_real is None:
-            return Response(u"VNF does not exist", status=500, mimetype="application/json")
+            return Response(u"VNF does not exist", status=500,
+                            mimetype="application/json")
 
         container_real = server_real.name
 
 
         container_real = server_real.name
 
@@ -695,7 +739,8 @@ class BalanceHostDcStack(Resource):
         if port in server_real.port_names:
             port_real = stack_real.ports[port]
         if port_real is None:
         if port in server_real.port_names:
             port_real = stack_real.ports[port]
         if port_real is None:
-            return Response(u"At least one Port does not exist", status=500, mimetype="application/json")
+            return Response(u"At least one Port does not exist",
+                            status=500, mimetype="application/json")
 
         interface_real = port_real.intf_name
 
 
         interface_real = port_real.intf_name
 
@@ -733,16 +778,19 @@ class BalanceHost(Resource):
 
             if vnf_src_name != "floating":
                 # check if VNF exist
 
             if vnf_src_name != "floating":
                 # check if VNF exist
-                if not self.api.manage.check_vnf_intf_pair(vnf_src_name, vnf_src_interface):
+                if not self.api.manage.check_vnf_intf_pair(
+                        vnf_src_name, vnf_src_interface):
                     return Response(u"VNF %s or intfs %s does not exist" % (vnf_src_name, vnf_src_interface),
                                     status=501,
                                     mimetype="application/json")
                     return Response(u"VNF %s or intfs %s does not exist" % (vnf_src_name, vnf_src_interface),
                                     status=501,
                                     mimetype="application/json")
-                self.api.manage.add_loadbalancer(vnf_src_name, vnf_src_interface, lb_data=req)
+                self.api.manage.add_loadbalancer(
+                    vnf_src_name, vnf_src_interface, lb_data=req)
 
                 return Response(u"Loadbalancer set up at %s:%s" % (vnf_src_name, vnf_src_interface),
                                 status=200, mimetype="application/json")
             else:
 
                 return Response(u"Loadbalancer set up at %s:%s" % (vnf_src_name, vnf_src_interface),
                                 status=200, mimetype="application/json")
             else:
-                cookie, floating_ip = self.api.manage.add_floating_lb(vnf_src_interface, lb_data=req)
+                cookie, floating_ip = self.api.manage.add_floating_lb(
+                    vnf_src_interface, lb_data=req)
 
                 return Response(json.dumps({"cookie": "%d" % cookie, "floating_ip": "%s" % floating_ip}),
                                 status=200, mimetype="application/json")
 
                 return Response(json.dumps({"cookie": "%d" % cookie, "floating_ip": "%s" % floating_ip}),
                                 status=200, mimetype="application/json")
@@ -766,11 +814,13 @@ class BalanceHost(Resource):
 
         """
         # check if VNF exist
 
         """
         # check if VNF exist
-        if not self.api.manage.check_vnf_intf_pair(vnf_src_name, vnf_src_interface):
+        if not self.api.manage.check_vnf_intf_pair(
+                vnf_src_name, vnf_src_interface):
             return Response(u"VNF %s or intfs %s does not exist" % (vnf_src_name, vnf_src_interface), status=501,
                             mimetype="application/json")
         try:
             return Response(u"VNF %s or intfs %s does not exist" % (vnf_src_name, vnf_src_interface), status=501,
                             mimetype="application/json")
         try:
-            logging.debug("Deleting loadbalancer at %s: interface: %s" % (vnf_src_name, vnf_src_interface))
+            logging.debug("Deleting loadbalancer at %s: interface: %s" %
+                          (vnf_src_name, vnf_src_interface))
             net = self.api.manage.net
 
             if vnf_src_name != "floating":
             net = self.api.manage.net
 
             if vnf_src_name != "floating":
@@ -779,7 +829,8 @@ class BalanceHost(Resource):
                     return Response(u"Source VNF or interface can not be found." % vnf_src_name,
                                     status=404, mimetype="application/json")
 
                     return Response(u"Source VNF or interface can not be found." % vnf_src_name,
                                     status=404, mimetype="application/json")
 
-                self.api.manage.delete_loadbalancer(vnf_src_name, vnf_src_interface)
+                self.api.manage.delete_loadbalancer(
+                    vnf_src_name, vnf_src_interface)
 
                 return Response(u"Loadbalancer deleted at %s:%s" % (vnf_src_name, vnf_src_interface),
                                 status=200, mimetype="application/json")
 
                 return Response(u"Loadbalancer deleted at %s:%s" % (vnf_src_name, vnf_src_interface),
                                 status=200, mimetype="application/json")
@@ -848,7 +899,8 @@ class QueryTopology(Resource):
                             # with their unique keys
                             link = copy.copy(data)
                             for edge in link:
                             # with their unique keys
                             link = copy.copy(data)
                             for edge in link:
-                                # do not add any links to the floating switch to the topology!
+                                # do not add any links to the floating switch
+                                # to the topology!
                                 if graph_node == "fs1":
                                     continue
                                 # the translator wants everything as a string!
                                 if graph_node == "fs1":
                                     continue
                                 # the translator wants everything as a string!
index b812953..6fc4c1b 100755 (executable)
@@ -1,33 +1,40 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from mininet.link import Link
 
 from mininet.link import Link
 
-from resources import *
+from resources.instance_flavor import InstanceFlavor
+from resources.net import Net
+from resources.port import Port
+from resources.port_pair import PortPair
+from resources.port_pair_group import PortPairGroup
+from resources.flow_classifier import FlowClassifier
+from resources.port_chain import PortChain
+from resources.server import Server
+from resources.image import Image
+
 from docker import DockerClient
 import logging
 import threading
 from docker import DockerClient
 import logging
 import threading
@@ -89,7 +96,8 @@ class OpenstackCompute(object):
         for image in self.dcli.images.list():
             if len(image.tags) > 0:
                 for t in image.tags:
         for image in self.dcli.images.list():
             if len(image.tags) > 0:
                 for t in image.tags:
-                    t = t.replace(":latest", "")  # only use short tag names for OSM compatibility
+                    # only use short tag names for OSM compatibility
+                    t = t.replace(":latest", "")
                     if t not in self._images:
                         self._images[t] = Image(t)
         return self._images
                     if t not in self._images:
                         self._images[t] = Image(t)
         return self._images
@@ -103,7 +111,8 @@ class OpenstackCompute(object):
         """
         if not self.check_stack(stack):
             self.clean_broken_stack(stack)
         """
         if not self.check_stack(stack):
             self.clean_broken_stack(stack)
-            raise HeatApiStackInvalidException("Stack did not pass validity checks")
+            raise HeatApiStackInvalidException(
+                "Stack did not pass validity checks")
         self.stacks[stack.id] = stack
 
     def clean_broken_stack(self, stack):
         self.stacks[stack.id] = stack
 
     def clean_broken_stack(self, stack):
@@ -132,7 +141,7 @@ class OpenstackCompute(object):
             for port_name in server.port_names:
                 if port_name not in stack.ports:
                     LOG.warning("Server %s of stack %s has a port named %s that is not known." %
             for port_name in server.port_names:
                 if port_name not in stack.ports:
                     LOG.warning("Server %s of stack %s has a port named %s that is not known." %
-                                    (server.name, stack.stack_name, port_name))
+                                (server.name, stack.stack_name, port_name))
                     everything_ok = False
             if server.image is None:
                 LOG.warning("Server %s holds no image." % (server.name))
                     everything_ok = False
             if server.image is None:
                 LOG.warning("Server %s holds no image." % (server.name))
@@ -143,7 +152,7 @@ class OpenstackCompute(object):
         for port in stack.ports.values():
             if port.net_name not in stack.nets:
                 LOG.warning("Port %s of stack %s has a network named %s that is not known." %
         for port in stack.ports.values():
             if port.net_name not in stack.nets:
                 LOG.warning("Port %s of stack %s has a network named %s that is not known." %
-                                (port.name, stack.stack_name, port.net_name))
+                            (port.name, stack.stack_name, port.net_name))
                 everything_ok = False
             if port.intf_name is None:
                 LOG.warning("Port %s has no interface name." % (port.name))
                 everything_ok = False
             if port.intf_name is None:
                 LOG.warning("Port %s has no interface name." % (port.name))
@@ -160,11 +169,12 @@ class OpenstackCompute(object):
                         break
                 if not found:
                     LOG.warning("Router %s of stack %s has a network named %s that is not known." %
                         break
                 if not found:
                     LOG.warning("Router %s of stack %s has a network named %s that is not known." %
-                                    (router.name, stack.stack_name, subnet_name))
+                                (router.name, stack.stack_name, subnet_name))
                     everything_ok = False
         return everything_ok
 
                     everything_ok = False
         return everything_ok
 
-    def add_flavor(self, name, cpu, memory, memory_unit, storage, storage_unit):
+    def add_flavor(self, name, cpu, memory,
+                   memory_unit, storage, storage_unit):
         """
         Adds a flavor to the stack.
 
         """
         Adds a flavor to the stack.
 
@@ -181,7 +191,8 @@ class OpenstackCompute(object):
         :param storage_unit:
         :type storage_unit: ``str``
         """
         :param storage_unit:
         :type storage_unit: ``str``
         """
-        flavor = InstanceFlavor(name, cpu, memory, memory_unit, storage, storage_unit)
+        flavor = InstanceFlavor(
+            name, cpu, memory, memory_unit, storage, storage_unit)
         self.flavors[flavor.name] = flavor
         return flavor
 
         self.flavors[flavor.name] = flavor
         return flavor
 
@@ -244,7 +255,8 @@ class OpenstackCompute(object):
             * *False*: else
         :rtype: ``bool``
         """
             * *False*: else
         :rtype: ``bool``
         """
-        LOG.debug("updating stack {} with new_stack {}".format(old_stack_id, new_stack))
+        LOG.debug("updating stack {} with new_stack {}".format(
+            old_stack_id, new_stack))
         if old_stack_id not in self.stacks:
             return False
         old_stack = self.stacks[old_stack_id]
         if old_stack_id not in self.stacks:
             return False
         old_stack = self.stacks[old_stack_id]
@@ -281,24 +293,26 @@ class OpenstackCompute(object):
 
         # Remove unnecessary networks
         for net in old_stack.nets.values():
 
         # Remove unnecessary networks
         for net in old_stack.nets.values():
-            if not net.name in new_stack.nets:
+            if net.name not in new_stack.nets:
                 self.delete_network(net.id)
 
         # Remove all unnecessary servers
         for server in old_stack.servers.values():
             if server.name in new_stack.servers:
                 self.delete_network(net.id)
 
         # Remove all unnecessary servers
         for server in old_stack.servers.values():
             if server.name in new_stack.servers:
-                if not server.compare_attributes(new_stack.servers[server.name]):
+                if not server.compare_attributes(
+                        new_stack.servers[server.name]):
                     self.stop_compute(server)
                 else:
                     # Delete unused and changed links
                     for port_name in server.port_names:
                         if port_name in old_stack.ports and port_name in new_stack.ports:
                     self.stop_compute(server)
                 else:
                     # Delete unused and changed links
                     for port_name in server.port_names:
                         if port_name in old_stack.ports and port_name in new_stack.ports:
-                            if not old_stack.ports.get(port_name) == new_stack.ports.get(port_name):
+                            if not old_stack.ports.get(
+                                    port_name) == new_stack.ports.get(port_name):
                                 my_links = self.dc.net.links
                                 for link in my_links:
                                     if str(link.intf1) == old_stack.ports[port_name].intf_name and \
                                 my_links = self.dc.net.links
                                 for link in my_links:
                                     if str(link.intf1) == old_stack.ports[port_name].intf_name and \
-                                                    str(link.intf1.ip) == \
-                                                    old_stack.ports[port_name].ip_address.split('/')[0]:
+                                            str(link.intf1.ip) == \
+                                            old_stack.ports[port_name].ip_address.split('/')[0]:
                                         self._remove_link(server.name, link)
 
                                         # Add changed link
                                         self._remove_link(server.name, link)
 
                                         # Add changed link
@@ -367,15 +381,18 @@ class OpenstackCompute(object):
                 if port.compare_attributes(old_port):
                     for net in new_stack.nets.values():
                         if net.name == port.net_name:
                 if port.compare_attributes(old_port):
                     for net in new_stack.nets.values():
                         if net.name == port.net_name:
-                            if net.assign_ip_address(old_port.ip_address, port.name):
+                            if net.assign_ip_address(
+                                    old_port.ip_address, port.name):
                                 port.ip_address = old_port.ip_address
                                 port.mac_address = old_port.mac_address
                             else:
                                 port.ip_address = old_port.ip_address
                                 port.mac_address = old_port.mac_address
                             else:
-                                port.ip_address = net.get_new_ip_address(port.name)
+                                port.ip_address = net.get_new_ip_address(
+                                    port.name)
 
         for port in new_stack.ports.values():
             for net in new_stack.nets.values():
 
         for port in new_stack.ports.values():
             for net in new_stack.nets.values():
-                if port.net_name == net.name and not net.is_my_ip(port.ip_address, port.name):
+                if port.net_name == net.name and not net.is_my_ip(
+                        port.ip_address, port.name):
                     port.ip_address = net.get_new_ip_address(port.name)
 
     def update_subnet_cidr(self, old_stack, new_stack):
                     port.ip_address = net.get_new_ip_address(port.name)
 
     def update_subnet_cidr(self, old_stack, new_stack):
@@ -446,7 +463,8 @@ class OpenstackCompute(object):
             if port is not None:
                 network_dict['id'] = port.intf_name
                 network_dict['ip'] = port.ip_address
             if port is not None:
                 network_dict['id'] = port.intf_name
                 network_dict['ip'] = port.ip_address
-                network_dict[network_dict['id']] = self.find_network_by_name_or_id(port.net_name).name
+                network_dict[network_dict['id']] = self.find_network_by_name_or_id(
+                    port.net_name).name
                 network.append(network_dict)
         # default network dict
         if len(network) < 1:
                 network.append(network_dict)
         # default network dict
         if len(network) < 1:
@@ -481,7 +499,8 @@ class OpenstackCompute(object):
             if "SON_EMU_CMD=" in env_var:
                 cmd = str(env_var.split("=")[1])
                 server.son_emu_command = cmd
             if "SON_EMU_CMD=" in env_var:
                 cmd = str(env_var.split("=")[1])
                 server.son_emu_command = cmd
-                # execute command in new thread to ensure that GK is not blocked by VNF
+                # execute command in new thread to ensure that GK is not
+                # blocked by VNF
                 t = threading.Thread(target=c.cmdPrint, args=(cmd,))
                 t.daemon = True
                 t.start()
                 t = threading.Thread(target=c.cmdPrint, args=(cmd,))
                 t.daemon = True
                 t.start()
@@ -493,7 +512,8 @@ class OpenstackCompute(object):
         :param server: The server that should be removed
         :type server: ``heat.resources.server``
         """
         :param server: The server that should be removed
         :type server: ``heat.resources.server``
         """
-        LOG.debug("Stopping container %s with full name %s" % (server.name, server.full_name))
+        LOG.debug("Stopping container %s with full name %s" %
+                  (server.name, server.full_name))
         link_names = list()
         for port_name in server.port_names:
             prt = self.find_port_by_name_or_id(port_name)
         link_names = list()
         for port_name in server.port_names:
             prt = self.find_port_by_name_or_id(port_name)
@@ -502,7 +522,8 @@ class OpenstackCompute(object):
         my_links = self.dc.net.links
         for link in my_links:
             if str(link.intf1) in link_names:
         my_links = self.dc.net.links
         for link in my_links:
             if str(link.intf1) in link_names:
-                # Remove all self created links that connect the server to the main switch
+                # Remove all self created links that connect the server to the
+                # main switch
                 self._remove_link(server.name, link)
 
         # Stop the server and the remaining connection to the datacenter switch
                 self._remove_link(server.name, link)
 
         # Stop the server and the remaining connection to the datacenter switch
@@ -528,11 +549,13 @@ class OpenstackCompute(object):
             return self.computeUnits[name_or_id]
 
         for server in self.computeUnits.values():
             return self.computeUnits[name_or_id]
 
         for server in self.computeUnits.values():
-            if server.name == name_or_id or server.template_name == name_or_id or server.full_name == name_or_id:
+            if (server.name == name_or_id or
+                    server.template_name == name_or_id or
+                    server.full_name == name_or_id):
                 return server
                 return server
-            if (server.name == self._shorten_server_name(name_or_id)
-                or server.template_name ==  self._shorten_server_name(name_or_id)
-                or server.full_name == self._shorten_server_name(name_or_id)):
+            if (server.name == self._shorten_server_name(name_or_id) or
+                    server.template_name == self._shorten_server_name(name_or_id) or
+                    server.full_name == self._shorten_server_name(name_or_id)):
                 return server
         return None
 
                 return server
         return None
 
@@ -548,7 +571,8 @@ class OpenstackCompute(object):
         :return: Returns the created server.
         :rtype: :class:`heat.resources.server`
         """
         :return: Returns the created server.
         :rtype: :class:`heat.resources.server`
         """
-        if self.find_server_by_name_or_id(name) is not None and not stack_operation:
+        if self.find_server_by_name_or_id(
+                name) is not None and not stack_operation:
             raise Exception("Server with name %s already exists." % name)
         safe_name = self._shorten_server_name(name)
         server = Server(safe_name)
             raise Exception("Server with name %s already exists." % name)
         safe_name = self._shorten_server_name(name)
         server = Server(safe_name)
@@ -570,7 +594,6 @@ class OpenstackCompute(object):
             LOG.info("Short server name: {}".format(h))
         return name
 
             LOG.info("Short server name: {}".format(h))
         return name
 
-
     def delete_server(self, server):
         """
         Deletes the given server from the stack dictionary and the computeUnits dictionary.
     def delete_server(self, server):
         """
         Deletes the given server from the stack dictionary and the computeUnits dictionary.
@@ -625,8 +648,10 @@ class OpenstackCompute(object):
         :return: :class:`heat.resources.net`
         """
         LOG.debug("Creating network with name %s" % name)
         :return: :class:`heat.resources.net`
         """
         LOG.debug("Creating network with name %s" % name)
-        if self.find_network_by_name_or_id(name) is not None and not stack_operation:
-            LOG.warning("Creating network with name %s failed, as it already exists" % name)
+        if self.find_network_by_name_or_id(
+                name) is not None and not stack_operation:
+            LOG.warning(
+                "Creating network with name %s failed, as it already exists" % name)
             raise Exception("Network with name %s already exists." % name)
         network = Net(name)
         network.id = str(uuid.uuid4())
             raise Exception("Network with name %s already exists." % name)
         network = Net(name)
         network.id = str(uuid.uuid4())
@@ -643,7 +668,8 @@ class OpenstackCompute(object):
         """
         net = self.find_network_by_name_or_id(name_or_id)
         if net is None:
         """
         net = self.find_network_by_name_or_id(name_or_id)
         if net is None:
-            raise Exception("Network with name or id %s does not exists." % name_or_id)
+            raise Exception(
+                "Network with name or id %s does not exists." % name_or_id)
 
         for stack in self.stacks.values():
             stack.nets.pop(net.name, None)
 
         for stack in self.stacks.values():
             stack.nets.pop(net.name, None)
@@ -664,7 +690,8 @@ class OpenstackCompute(object):
         """
         port = self.find_port_by_name_or_id(name)
         if port is not None and not stack_operation:
         """
         port = self.find_port_by_name_or_id(name)
         if port is not None and not stack_operation:
-            LOG.warning("Creating port with name %s failed, as it already exists" % name)
+            LOG.warning(
+                "Creating port with name %s failed, as it already exists" % name)
             raise Exception("Port with name %s already exists." % name)
         LOG.debug("Creating port with name %s" % name)
         port = Port(name)
             raise Exception("Port with name %s already exists." % name)
         LOG.debug("Creating port with name %s" % name)
         port = Port(name)
@@ -699,13 +726,14 @@ class OpenstackCompute(object):
         """
         port = self.find_port_by_name_or_id(name_or_id)
         if port is None:
         """
         port = self.find_port_by_name_or_id(name_or_id)
         if port is None:
-            LOG.warning("Port with name or id %s does not exist. Can't delete it." % name_or_id)
+            LOG.warning(
+                "Port with name or id %s does not exist. Can't delete it." % name_or_id)
             return
 
         my_links = self.dc.net.links
         for link in my_links:
             if str(link.intf1) == port.intf_name and \
             return
 
         my_links = self.dc.net.links
         for link in my_links:
             if str(link.intf1) == port.intf_name and \
-                            str(link.intf1.ip) == port.ip_address.split('/')[0]:
+                    str(link.intf1.ip) == port.ip_address.split('/')[0]:
                 self._remove_link(link.intf1.node.name, link)
                 break
 
                 self._remove_link(link.intf1.node.name, link)
                 break
 
@@ -727,7 +755,8 @@ class OpenstackCompute(object):
         """
         port_pair = self.find_port_pair_by_name_or_id(name)
         if port_pair is not None and not stack_operation:
         """
         port_pair = self.find_port_pair_by_name_or_id(name)
         if port_pair is not None and not stack_operation:
-            logging.warning("Creating port pair with name %s failed, as it already exists" % name)
+            logging.warning(
+                "Creating port pair with name %s failed, as it already exists" % name)
             raise Exception("Port pair with name %s already exists." % name)
         logging.debug("Creating port pair with name %s" % name)
         port_pair = PortPair(name)
             raise Exception("Port pair with name %s already exists." % name)
         logging.debug("Creating port pair with name %s" % name)
         port_pair = PortPair(name)
@@ -761,7 +790,8 @@ class OpenstackCompute(object):
         """
         port_pair = self.find_port_pair_by_name_or_id(name_or_id)
         if port_pair is None:
         """
         port_pair = self.find_port_pair_by_name_or_id(name_or_id)
         if port_pair is None:
-            raise Exception("Port pair with name or id %s does not exists." % name_or_id)
+            raise Exception(
+                "Port pair with name or id %s does not exists." % name_or_id)
 
         self.port_pairs.pop(port_pair.id, None)
 
 
         self.port_pairs.pop(port_pair.id, None)
 
@@ -779,8 +809,10 @@ class OpenstackCompute(object):
         """
         port_pair_group = self.find_port_pair_group_by_name_or_id(name)
         if port_pair_group is not None and not stack_operation:
         """
         port_pair_group = self.find_port_pair_group_by_name_or_id(name)
         if port_pair_group is not None and not stack_operation:
-            logging.warning("Creating port pair group with name %s failed, as it already exists" % name)
-            raise Exception("Port pair group with name %s already exists." % name)
+            logging.warning(
+                "Creating port pair group with name %s failed, as it already exists" % name)
+            raise Exception(
+                "Port pair group with name %s already exists." % name)
         logging.debug("Creating port pair group with name %s" % name)
         port_pair_group = PortPairGroup(name)
         if not stack_operation:
         logging.debug("Creating port pair group with name %s" % name)
         port_pair_group = PortPairGroup(name)
         if not stack_operation:
@@ -813,7 +845,8 @@ class OpenstackCompute(object):
         """
         port_pair_group = self.find_port_pair_group_by_name_or_id(name_or_id)
         if port_pair_group is None:
         """
         port_pair_group = self.find_port_pair_group_by_name_or_id(name_or_id)
         if port_pair_group is None:
-            raise Exception("Port pair with name or id %s does not exists." % name_or_id)
+            raise Exception(
+                "Port pair with name or id %s does not exists." % name_or_id)
 
         self.port_pair_groups.pop(port_pair_group.id, None)
 
 
         self.port_pair_groups.pop(port_pair_group.id, None)
 
@@ -831,7 +864,8 @@ class OpenstackCompute(object):
         """
         port_chain = self.find_port_chain_by_name_or_id(name)
         if port_chain is not None and not stack_operation:
         """
         port_chain = self.find_port_chain_by_name_or_id(name)
         if port_chain is not None and not stack_operation:
-            logging.warning("Creating port chain with name %s failed, as it already exists" % name)
+            logging.warning(
+                "Creating port chain with name %s failed, as it already exists" % name)
             raise Exception("Port chain with name %s already exists." % name)
         logging.debug("Creating port chain with name %s" % name)
         port_chain = PortChain(name)
             raise Exception("Port chain with name %s already exists." % name)
         logging.debug("Creating port chain with name %s" % name)
         port_chain = PortChain(name)
@@ -865,7 +899,8 @@ class OpenstackCompute(object):
         port_chain = self.find_port_chain_by_name_or_id(name_or_id)
         port_chain.uninstall(self)
         if port_chain is None:
         port_chain = self.find_port_chain_by_name_or_id(name_or_id)
         port_chain.uninstall(self)
         if port_chain is None:
-            raise Exception("Port chain with name or id %s does not exists." % name_or_id)
+            raise Exception(
+                "Port chain with name or id %s does not exists." % name_or_id)
 
         self.port_chains.pop(port_chain.id, None)
 
 
         self.port_chains.pop(port_chain.id, None)
 
@@ -883,8 +918,10 @@ class OpenstackCompute(object):
         """
         flow_classifier = self.find_flow_classifier_by_name_or_id(name)
         if flow_classifier is not None and not stack_operation:
         """
         flow_classifier = self.find_flow_classifier_by_name_or_id(name)
         if flow_classifier is not None and not stack_operation:
-            logging.warning("Creating flow classifier with name %s failed, as it already exists" % name)
-            raise Exception("Flow classifier with name %s already exists." % name)
+            logging.warning(
+                "Creating flow classifier with name %s failed, as it already exists" % name)
+            raise Exception(
+                "Flow classifier with name %s already exists." % name)
         logging.debug("Creating flow classifier with name %s" % name)
         flow_classifier = FlowClassifier(name)
         if not stack_operation:
         logging.debug("Creating flow classifier with name %s" % name)
         flow_classifier = FlowClassifier(name)
         if not stack_operation:
@@ -917,7 +954,8 @@ class OpenstackCompute(object):
         """
         flow_classifier = self.find_flow_classifier_by_name_or_id(name_or_id)
         if flow_classifier is None:
         """
         flow_classifier = self.find_flow_classifier_by_name_or_id(name_or_id)
         if flow_classifier is None:
-            raise Exception("Flow classifier with name or id %s does not exists." % name_or_id)
+            raise Exception(
+                "Flow classifier with name or id %s does not exists." % name_or_id)
 
         self.flow_classifiers.pop(flow_classifier.id, None)
 
 
         self.flow_classifiers.pop(flow_classifier.id, None)
 
index a93e75e..f32d971 100755 (executable)
@@ -1,31 +1,29 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from docker import DockerClient, APIClient
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+from docker import APIClient
 import time
 import re
 
 import time
 import re
 
@@ -64,7 +62,8 @@ def docker_abs_cpu(container_id):
     cpu_usage = 0
     for number in numbers:
         cpu_usage += number
     cpu_usage = 0
     for number in numbers:
         cpu_usage += number
-    return {'CPU_used': cpu_usage, 'CPU_used_systime': sys_time, 'CPU_cores': len(numbers)}
+    return {'CPU_used': cpu_usage,
+            'CPU_used_systime': sys_time, 'CPU_cores': len(numbers)}
 
 
 def docker_mem_used(container_id):
 
 
 def docker_mem_used(container_id):
@@ -123,7 +122,8 @@ def docker_mem(container_id):
     out_dict = dict()
     out_dict['MEM_used'] = docker_mem_used(container_id)
     out_dict['MEM_limit'] = docker_max_mem(container_id)
     out_dict = dict()
     out_dict['MEM_used'] = docker_mem_used(container_id)
     out_dict['MEM_limit'] = docker_max_mem(container_id)
-    out_dict['MEM_%'] = float(out_dict['MEM_used']) / float(out_dict['MEM_limit'])
+    out_dict['MEM_%'] = float(out_dict['MEM_used']) / \
+        float(out_dict['MEM_limit'])
     return out_dict
 
 
     return out_dict
 
 
@@ -218,9 +218,12 @@ def monitoring_over_time(container_id):
     second_disk_io = docker_block_rw(container_id)
 
     # Disk access
     second_disk_io = docker_block_rw(container_id)
 
     # Disk access
-    time_div = (int(second_disk_io['BLOCK_systime']) - int(first_disk_io['BLOCK_systime']))
-    read_div = int(second_disk_io['BLOCK_read']) - int(first_disk_io['BLOCK_read'])
-    write_div = int(second_disk_io['BLOCK_write']) - int(first_disk_io['BLOCK_write'])
+    time_div = (int(second_disk_io['BLOCK_systime']
+                    ) - int(first_disk_io['BLOCK_systime']))
+    read_div = int(second_disk_io['BLOCK_read']) - \
+        int(first_disk_io['BLOCK_read'])
+    write_div = int(second_disk_io['BLOCK_write']) - \
+        int(first_disk_io['BLOCK_write'])
     out_dict = {'BLOCK_read/s': int(read_div * 1000000000 / float(time_div) + 0.5),
                 'BLOCK_write/s': int(write_div * 1000000000 / float(time_div) + 0.5)}
 
     out_dict = {'BLOCK_read/s': int(read_div * 1000000000 / float(time_div) + 0.5),
                 'BLOCK_write/s': int(write_div * 1000000000 / float(time_div) + 0.5)}
 
@@ -232,7 +235,10 @@ def monitoring_over_time(container_id):
                      'NET_out/s': int(out_div * 1000000000 / float(time_div) + 0.5)})
 
     # CPU utilization
                      'NET_out/s': int(out_div * 1000000000 / float(time_div) + 0.5)})
 
     # CPU utilization
-    time_div = (int(second_cpu_usage['CPU_used_systime']) - int(first_cpu_usage['CPU_used_systime']))
-    usage_div = int(second_cpu_usage['CPU_used']) - int(first_cpu_usage['CPU_used'])
-    out_dict.update({'CPU_%': usage_div / float(time_div), 'CPU_cores': first_cpu_usage['CPU_cores']})
+    time_div = (int(second_cpu_usage['CPU_used_systime']
+                    ) - int(first_cpu_usage['CPU_used_systime']))
+    usage_div = int(second_cpu_usage['CPU_used']) - \
+        int(first_cpu_usage['CPU_used'])
+    out_dict.update({'CPU_%': usage_div / float(time_div),
+                     'CPU_cores': first_cpu_usage['CPU_cores']})
     return out_dict
     return out_dict
index 9bf80ee..ffcaa0a 100755 (executable)
@@ -1,32 +1,30 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from __future__ import print_function  # TODO remove when print is no longer needed for debugging
 from __future__ import print_function  # TODO remove when print is no longer needed for debugging
-from resources import *
+from resources.router import Router
 from datetime import datetime
 import re
 import sys
 from datetime import datetime
 import re
 import sys
@@ -68,7 +66,8 @@ class HeatParser:
                  * *False*: Else
         :rtype: ``bool``
         """
                  * *False*: Else
         :rtype: ``bool``
         """
-        if not self.check_template_version(str(input_dict['heat_template_version'])):
+        if not self.check_template_version(
+                str(input_dict['heat_template_version'])):
             print('Unsupported template version: ' + input_dict['heat_template_version'], file=sys.stderr)
             return False
 
             print('Unsupported template version: ' + input_dict['heat_template_version'], file=sys.stderr)
             return False
 
@@ -81,15 +80,19 @@ class HeatParser:
         self.bufferResource = list()
 
         for resource in self.resources.values():
         self.bufferResource = list()
 
         for resource in self.resources.values():
-            self.handle_resource(resource, stack, dc_label, stack_update=stack_update)
+            self.handle_resource(resource, stack, dc_label,
+                                 stack_update=stack_update)
 
 
-        # This loop tries to create all classes which had unresolved dependencies.
+        # This loop tries to create all classes which had unresolved
+        # dependencies.
         unresolved_resources_last_round = len(self.bufferResource) + 1
         unresolved_resources_last_round = len(self.bufferResource) + 1
-        while len(self.bufferResource) > 0 and unresolved_resources_last_round > len(self.bufferResource):
+        while len(self.bufferResource) > 0 and unresolved_resources_last_round > len(
+                self.bufferResource):
             unresolved_resources_last_round = len(self.bufferResource)
             number_of_items = len(self.bufferResource)
             while number_of_items > 0:
             unresolved_resources_last_round = len(self.bufferResource)
             number_of_items = len(self.bufferResource)
             while number_of_items > 0:
-                self.handle_resource(self.bufferResource.pop(0), stack, dc_label, stack_update=stack_update)
+                self.handle_resource(self.bufferResource.pop(
+                    0), stack, dc_label, stack_update=stack_update)
                 number_of_items -= 1
 
         if len(self.bufferResource) > 0:
                 number_of_items -= 1
 
         if len(self.bufferResource) > 0:
@@ -123,7 +126,8 @@ class HeatParser:
             try:
                 net_name = resource['properties']['name']
                 if net_name not in stack.nets:
             try:
                 net_name = resource['properties']['name']
                 if net_name not in stack.nets:
-                    stack.nets[net_name] = self.compute.create_network(net_name, True)
+                    stack.nets[net_name] = self.compute.create_network(
+                        net_name, True)
 
             except Exception as e:
                 LOG.warning('Could not create Net: ' + e.message)
 
             except Exception as e:
                 LOG.warning('Could not create Net: ' + e.message)
@@ -141,7 +145,8 @@ class HeatParser:
                 net.subnet_name = resource['properties']['name']
                 if 'gateway_ip' in resource['properties']:
                     net.gateway_ip = resource['properties']['gateway_ip']
                 net.subnet_name = resource['properties']['name']
                 if 'gateway_ip' in resource['properties']:
                     net.gateway_ip = resource['properties']['gateway_ip']
-                net.subnet_id = resource['properties'].get('id', str(uuid.uuid4()))
+                net.subnet_id = resource['properties'].get(
+                    'id', str(uuid.uuid4()))
                 net.subnet_creation_time = str(datetime.now())
                 if not stack_update:
                     net.set_cidr(IP.get_new_cidr(net.subnet_id))
                 net.subnet_creation_time = str(datetime.now())
                 if not stack_update:
                     net.set_cidr(IP.get_new_cidr(net.subnet_id))
@@ -158,8 +163,10 @@ class HeatParser:
                 else:
                     port = stack.ports[port_name]
 
                 else:
                     port = stack.ports[port_name]
 
-                if str(resource['properties']['network']['get_resource']) in stack.nets:
-                    net = stack.nets[resource['properties']['network']['get_resource']]
+                if str(resource['properties']['network']
+                       ['get_resource']) in stack.nets:
+                    net = stack.nets[resource['properties']
+                                     ['network']['get_resource']]
                     if net.subnet_id is not None:
                         port.net_name = net.name
                         port.ip_address = net.get_new_ip_address(port.name)
                     if net.subnet_id is not None:
                         port.net_name = net.name
                         port.ip_address = net.get_new_ip_address(port.name)
@@ -171,20 +178,24 @@ class HeatParser:
 
         if 'OS::Nova::Server' in resource['type']:
             try:
 
         if 'OS::Nova::Server' in resource['type']:
             try:
-                compute_name = str(dc_label) + '_' + str(stack.stack_name) + '_' + str(resource['properties']['name'])
+                compute_name = str(dc_label) + '_' + str(stack.stack_name) + \
+                    '_' + str(resource['properties']['name'])
                 shortened_name = str(dc_label) + '_' + str(stack.stack_name) + '_' + \
                 shortened_name = str(dc_label) + '_' + str(stack.stack_name) + '_' + \
-                                 self.shorten_server_name(str(resource['properties']['name']), stack)
+                    self.shorten_server_name(
+                        str(resource['properties']['name']), stack)
                 nw_list = resource['properties']['networks']
 
                 if shortened_name not in stack.servers:
                 nw_list = resource['properties']['networks']
 
                 if shortened_name not in stack.servers:
-                    server = self.compute.create_server(shortened_name, stack_update)
+                    server = self.compute.create_server(
+                        shortened_name, stack_update)
                     stack.servers[shortened_name] = server
                 else:
                     server = stack.servers[shortened_name]
 
                 server.full_name = compute_name
                 server.template_name = str(resource['properties']['name'])
                     stack.servers[shortened_name] = server
                 else:
                     server = stack.servers[shortened_name]
 
                 server.full_name = compute_name
                 server.template_name = str(resource['properties']['name'])
-                server.command = resource['properties'].get('command', '/bin/sh')
+                server.command = resource['properties'].get(
+                    'command', '/bin/sh')
                 server.image = resource['properties']['image']
                 server.flavor = resource['properties']['flavor']
 
                 server.image = resource['properties']['image']
                 server.flavor = resource['properties']['flavor']
 
@@ -194,7 +205,8 @@ class HeatParser:
                     # we don't know which network it belongs to yet, but the resource will appear later in a valid
                     # template
                     if port_name not in stack.ports:
                     # we don't know which network it belongs to yet, but the resource will appear later in a valid
                     # template
                     if port_name not in stack.ports:
-                        stack.ports[port_name] = self.compute.create_port(port_name, stack_update)
+                        stack.ports[port_name] = self.compute.create_port(
+                            port_name, stack_update)
                     server.port_names.append(port_name)
                 return
             except Exception as e:
                     server.port_names.append(port_name)
                 return
             except Exception as e:
@@ -219,7 +231,8 @@ class HeatParser:
                         stack.routers[router_name].add_subnet(subnet_name)
                         return
             except Exception as e:
                         stack.routers[router_name].add_subnet(subnet_name)
                         return
             except Exception as e:
-                LOG.warning('Could not create RouterInterface: ' + e.__repr__())
+                LOG.warning(
+                    'Could not create RouterInterface: ' + e.__repr__())
             self.bufferResource.append(resource)
             return
 
             self.bufferResource.append(resource)
             return
 
@@ -228,7 +241,8 @@ class HeatParser:
                 port_name = resource['properties']['port_id']['get_resource']
                 floating_network_id = resource['properties']['floating_network_id']
                 if port_name not in stack.ports:
                 port_name = resource['properties']['port_id']['get_resource']
                 floating_network_id = resource['properties']['floating_network_id']
                 if port_name not in stack.ports:
-                    stack.ports[port_name] = self.compute.create_port(port_name, stack_update)
+                    stack.ports[port_name] = self.compute.create_port(
+                        port_name, stack_update)
 
                 stack.ports[port_name].floating_ip = floating_network_id
             except Exception as e:
 
                 stack.ports[port_name].floating_ip = floating_network_id
             except Exception as e:
@@ -247,14 +261,17 @@ class HeatParser:
         if 'OS::Heat::ResourceGroup' in resource['type']:
             try:
                 embedded_resource = resource['properties']['resource_def']
         if 'OS::Heat::ResourceGroup' in resource['type']:
             try:
                 embedded_resource = resource['properties']['resource_def']
-                LOG.debug("Found resource in resource group: {}".format(embedded_resource))
+                LOG.debug("Found resource in resource group: {}".format(
+                    embedded_resource))
                 # recursively parse embedded resource
                 # recursively parse embedded resource
-                self.handle_resource(embedded_resource, stack, dc_label, stack_update)
+                self.handle_resource(
+                    embedded_resource, stack, dc_label, stack_update)
             except Exception as e:
                 print('Could not create Router: ' + e.message)
             return
 
             except Exception as e:
                 print('Could not create Router: ' + e.message)
             return
 
-        LOG.warning('Could not determine resource type: {}'.format(resource['type']))
+        LOG.warning(
+            'Could not determine resource type: {}'.format(resource['type']))
         return
 
     def shorten_server_name(self, server_name, stack):
         return
 
     def shorten_server_name(self, server_name, stack):
@@ -310,8 +327,8 @@ class HeatParser:
         if year < 2015:
             return False
         if year == 2015:
         if year < 2015:
             return False
         if year == 2015:
-            if month < 04:
+            if month < 0o4:
                 return False
                 return False
-            if month == 04 and day < 30:
+            if month == 0o4 and day < 30:
                 return False
         return True
                 return False
         return True
index 09a51f6..c7ef69b 100644 (file)
@@ -1,38 +1,37 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from urlparse import urlparse
 import logging
 
 LOG = logging.getLogger("api.openstack.helper")
 
 from urlparse import urlparse
 import logging
 
 LOG = logging.getLogger("api.openstack.helper")
 
+
 def get_host(r):
     try:
         return urlparse(r.base_url).hostname
 def get_host(r):
     try:
         return urlparse(r.base_url).hostname
-    except:
+    except BaseException:
         LOG.error("Could not get host part of request URL.")
     return "0.0.0.0"
         LOG.error("Could not get host part of request URL.")
     return "0.0.0.0"
index 208a910..9138342 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from resources.net import Net
 import threading
 
 from resources.net import Net
 import threading
 
@@ -128,7 +126,7 @@ def is_my_cidr(cidr, uuid):
 
     int_ip = Net.cidr_2_int(cidr)
 
 
     int_ip = Net.cidr_2_int(cidr)
 
-    if not int_ip in __issued_ips:
+    if int_ip not in __issued_ips:
         return False
 
     if __issued_ips[int_ip] == uuid:
         return False
 
     if __issued_ips[int_ip] == uuid:
index 5405e78..083550e 100755 (executable)
@@ -1,38 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""Openstack manage component of PG Sandman.
-
-.. module:: manage
-    :synopsis: Module containing the OpenstackManage class.
-.. moduleauthor: PG Sandman
-
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import logging
 import threading
 import uuid
 import logging
 import threading
 import uuid
@@ -40,7 +30,8 @@ import networkx as nx
 import chain_api
 import json
 import random
 import chain_api
 import json
 import random
-from emuvim.api.openstack.resources import Net, Port
+from emuvim.api.openstack.resources.net import Net
+from emuvim.api.openstack.resources.port import Port
 from mininet.node import OVSSwitch, RemoteController, Node
 
 
 from mininet.node import OVSSwitch, RemoteController, Node
 
 
@@ -71,7 +62,8 @@ class OpenstackManage(object):
         self.ip = ip
         self.port = port
         self._net = None
         self.ip = ip
         self.port = port
         self._net = None
-        # to keep track which src_vnf(input port on the switch) handles a load balancer
+        # to keep track which src_vnf(input port on the switch) handles a load
+        # balancer
         self.lb_flow_cookies = dict()
         self.chain_flow_cookies = dict()
 
         self.lb_flow_cookies = dict()
         self.chain_flow_cookies = dict()
 
@@ -83,7 +75,8 @@ class OpenstackManage(object):
         # debug and to maintain
         self.flow_groups = dict()
 
         # debug and to maintain
         self.flow_groups = dict()
 
-        # we want one global chain api. this should not be datacenter dependent!
+        # we want one global chain api. this should not be datacenter
+        # dependent!
         self.chain = chain_api.ChainApi(ip, port, self)
         self.thread = threading.Thread(target=self.chain._start_flask, args=())
         self.thread.daemon = True
         self.chain = chain_api.ChainApi(ip, port, self)
         self.thread = threading.Thread(target=self.chain._start_flask, args=())
         self.thread.daemon = True
@@ -128,7 +121,7 @@ class OpenstackManage(object):
 
             # create a port for the host
             port = Port("root-port")
 
             # create a port for the host
             port = Port("root-port")
-            #port.id = str(uuid.uuid4())
+            # port.id = str(uuid.uuid4())
             port.net_name = fn.name
 
             # get next free ip
             port.net_name = fn.name
 
             # get next free ip
@@ -137,16 +130,19 @@ class OpenstackManage(object):
             # floating ip network setup
             # wierd way of getting a datacenter object
             first_dc = self.net.dcs.values()[0]
             # floating ip network setup
             # wierd way of getting a datacenter object
             first_dc = self.net.dcs.values()[0]
-            # set a dpid for the switch. for this we have to get the id of the next possible dc
-            self.floating_switch = self.net.addSwitch("fs1", dpid=hex(first_dc._get_next_dc_dpid())[2:])
+            # set a dpid for the switch. for this we have to get the id of the
+            # next possible dc
+            self.floating_switch = self.net.addSwitch(
+                "fs1", dpid=hex(first_dc._get_next_dc_dpid())[2:])
             # this is the interface appearing on the physical host
             self.floating_root = Node('root', inNamespace=False)
             self.net.hosts.append(self.floating_root)
             self.net.nameToNode['root'] = self.floating_root
             # this is the interface appearing on the physical host
             self.floating_root = Node('root', inNamespace=False)
             self.net.hosts.append(self.floating_root)
             self.net.nameToNode['root'] = self.floating_root
-            self.floating_intf = self.net.addLink(self.floating_root, self.floating_switch).intf1
+            self.floating_intf = self.net.addLink(
+                self.floating_root, self.floating_switch).intf1
             self.floating_root.setIP(root_ip, intf=self.floating_intf)
             self.floating_root.setIP(root_ip, intf=self.floating_intf)
-            self.floating_nodes[(self.floating_root.name, root_ip)] = self.floating_root
-
+            self.floating_nodes[(self.floating_root.name,
+                                 root_ip)] = self.floating_root
 
     def stop_floating_network(self):
         self._net = None
 
     def stop_floating_network(self):
         self._net = None
@@ -235,16 +231,21 @@ class OpenstackManage(object):
             vnf_dst_interface = kwargs.get('vnf_dst_interface')
             layer2 = kwargs.get('layer2', True)
             match = kwargs.get('match')
             vnf_dst_interface = kwargs.get('vnf_dst_interface')
             layer2 = kwargs.get('layer2', True)
             match = kwargs.get('match')
-            flow = (vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
+            flow = (vnf_src_name, vnf_src_interface,
+                    vnf_dst_name, vnf_dst_interface)
             if flow in self.chain_flow_cookies:
             if flow in self.chain_flow_cookies:
-                raise Exception("There is already a chain at the specified src/dst pair!")
-            # set up a layer 2 chain, this allows multiple chains for the same interface
+                raise Exception(
+                    "There is already a chain at the specified src/dst pair!")
+            # set up a layer 2 chain, this allows multiple chains for the same
+            # interface
             src_node = self.net.getNodeByName(vnf_src_name)
             dst_node = self.net.getNodeByName(vnf_dst_name)
             dst_intf = dst_node.intf(vnf_dst_interface)
             if layer2:
             src_node = self.net.getNodeByName(vnf_src_name)
             dst_node = self.net.getNodeByName(vnf_dst_name)
             dst_intf = dst_node.intf(vnf_dst_interface)
             if layer2:
-                switch, inport = self._get_connected_switch_data(vnf_src_name, vnf_src_interface)
-                self.setup_arp_reply_at(switch, inport, dst_intf.IP(), dst_intf.MAC())
+                switch, inport = self._get_connected_switch_data(
+                    vnf_src_name, vnf_src_interface)
+                self.setup_arp_reply_at(
+                    switch, inport, dst_intf.IP(), dst_intf.MAC())
                 if isinstance(match, str):
                     match += ",dl_dst=%s" % dst_intf.MAC()
                 else:
                 if isinstance(match, str):
                     match += ",dl_dst=%s" % dst_intf.MAC()
                 else:
@@ -252,7 +253,7 @@ class OpenstackManage(object):
 
             cookie = kwargs.get('cookie', self.get_cookie())
             self.cookies.add(cookie)
 
             cookie = kwargs.get('cookie', self.get_cookie())
             self.cookies.add(cookie)
-            c = self.net.setChain(
+            self.net.setChain(
                 vnf_src_name, vnf_dst_name,
                 vnf_src_interface=vnf_src_interface,
                 vnf_dst_interface=vnf_dst_interface,
                 vnf_src_name, vnf_dst_name,
                 vnf_src_interface=vnf_src_interface,
                 vnf_dst_interface=vnf_dst_interface,
@@ -263,7 +264,8 @@ class OpenstackManage(object):
                 cookie=cookie,
                 path=kwargs.get('path'))
 
                 cookie=cookie,
                 path=kwargs.get('path'))
 
-            # to keep this logic seperate of the core son-emu do the housekeeping here
+            # to keep this logic seperate of the core son-emu do the
+            # housekeeping here
             data = dict()
             data["src_vnf"] = vnf_src_name
             data["src_intf"] = vnf_src_interface
             data = dict()
             data["src_vnf"] = vnf_src_name
             data["src_intf"] = vnf_src_interface
@@ -278,15 +280,18 @@ class OpenstackManage(object):
                                               vnf_dst_interface)[0]
 
             # add route to dst ip to this interface
                                               vnf_dst_interface)[0]
 
             # add route to dst ip to this interface
-            # this might block on containers that are still setting up, so start a new thread
+            # this might block on containers that are still setting up, so
+            # start a new thread
             if not kwargs.get('no_route'):
                 # son_emu does not like concurrent commands for a container so we need to lock this if multiple chains
                 # on the same interface are created
             if not kwargs.get('no_route'):
                 # son_emu does not like concurrent commands for a container so we need to lock this if multiple chains
                 # on the same interface are created
-                src_node.setHostRoute(dst_node.intf(vnf_dst_interface).IP(), vnf_src_interface)
+                src_node.setHostRoute(dst_node.intf(
+                    vnf_dst_interface).IP(), vnf_src_interface)
 
             try:
 
             try:
-                son_emu_data = json.loads(self.get_son_emu_chain_data(vnf_src_name))
-            except:
+                son_emu_data = json.loads(
+                    self.get_son_emu_chain_data(vnf_src_name))
+            except BaseException:
                 son_emu_data = dict()
             if "son_emu_data" not in son_emu_data:
                 son_emu_data["son_emu_data"] = dict()
                 son_emu_data = dict()
             if "son_emu_data" not in son_emu_data:
                 son_emu_data["son_emu_data"] = dict()
@@ -294,7 +299,8 @@ class OpenstackManage(object):
                 son_emu_data["son_emu_data"]["interfaces"] = dict()
             if vnf_src_interface not in son_emu_data["son_emu_data"]["interfaces"]:
                 son_emu_data["son_emu_data"]["interfaces"][vnf_src_interface] = list()
                 son_emu_data["son_emu_data"]["interfaces"] = dict()
             if vnf_src_interface not in son_emu_data["son_emu_data"]["interfaces"]:
                 son_emu_data["son_emu_data"]["interfaces"][vnf_src_interface] = list()
-                son_emu_data["son_emu_data"]["interfaces"][vnf_src_interface].append(dst_intf.IP())
+                son_emu_data["son_emu_data"]["interfaces"][vnf_src_interface].append(
+                    dst_intf.IP())
 
             self.set_son_emu_chain_data(vnf_src_name, son_emu_data)
 
 
             self.set_son_emu_chain_data(vnf_src_name, son_emu_data)
 
@@ -356,12 +362,14 @@ class OpenstackManage(object):
         :param data: Raw data to store on the node.
         :type data: ``str``
         """
         :param data: Raw data to store on the node.
         :type data: ``str``
         """
-        self.net.getNodeByName(vnf_name).cmd("echo \'%s\' > /tmp/son_emu_data.json" % json.dumps(data))
+        self.net.getNodeByName(vnf_name).cmd(
+            "echo \'%s\' > /tmp/son_emu_data.json" % json.dumps(data))
         ip_list = []
         for intf in data['son_emu_data']['interfaces'].values():
             ip_list.extend(intf)
 
         ip_list = []
         for intf in data['son_emu_data']['interfaces'].values():
             ip_list.extend(intf)
 
-        self.net.getNodeByName(vnf_name).cmd("echo \'%s\' > /tmp/son_emu_data" % "\n".join(ip_list))
+        self.net.getNodeByName(vnf_name).cmd(
+            "echo \'%s\' > /tmp/son_emu_data" % "\n".join(ip_list))
 
     def get_son_emu_chain_data(self, vnf_name):
         """
 
     def get_son_emu_chain_data(self, vnf_name):
         """
@@ -372,7 +380,8 @@ class OpenstackManage(object):
         :return: raw data stored on the node
         :rtype: ``str``
         """
         :return: raw data stored on the node
         :rtype: ``str``
         """
-        return self.net.getNodeByName(vnf_name).cmd("cat /tmp/son_emu_data.json")
+        return self.net.getNodeByName(vnf_name).cmd(
+            "cat /tmp/son_emu_data.json")
 
     def _get_connected_switch_data(self, vnf_name, vnf_interface):
         """
 
     def _get_connected_switch_data(self, vnf_name, vnf_interface):
         """
@@ -390,8 +399,8 @@ class OpenstackManage(object):
             link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
             for link in link_dict:
                 if (link_dict[link]['src_port_id'] == vnf_interface or
             link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
             for link in link_dict:
                 if (link_dict[link]['src_port_id'] == vnf_interface or
-                            link_dict[link][
-                                'src_port_name'] == vnf_interface):
+                    link_dict[link][
+                        'src_port_name'] == vnf_interface):
                     # found the right link and connected switch
                     src_sw = connected_sw
                     src_sw_inport_nr = link_dict[link]['dst_port_nr']
                     # found the right link and connected switch
                     src_sw = connected_sw
                     src_sw_inport_nr = link_dict[link]['dst_port_nr']
@@ -415,7 +424,8 @@ class OpenstackManage(object):
         :return: path, src_sw, dst_sw
         :rtype: ``list``, ``str``, ``str``
         """
         :return: path, src_sw, dst_sw
         :rtype: ``list``, ``str``, ``str``
         """
-        # modified version of the _chainAddFlow from emuvim.dcemulator.net._chainAddFlow
+        # modified version of the _chainAddFlow from
+        # emuvim.dcemulator.net._chainAddFlow
         src_sw = None
         dst_sw = None
         logging.debug("Find shortest path from vnf %s to %s",
         src_sw = None
         dst_sw = None
         logging.debug("Find shortest path from vnf %s to %s",
@@ -425,8 +435,8 @@ class OpenstackManage(object):
             link_dict = self.net.DCNetwork_graph[src_vnf][connected_sw]
             for link in link_dict:
                 if (link_dict[link]['src_port_id'] == src_vnf_intf or
             link_dict = self.net.DCNetwork_graph[src_vnf][connected_sw]
             for link in link_dict:
                 if (link_dict[link]['src_port_id'] == src_vnf_intf or
-                            link_dict[link][
-                                'src_port_name'] == src_vnf_intf):
+                    link_dict[link][
+                        'src_port_name'] == src_vnf_intf):
                     # found the right link and connected switch
                     src_sw = connected_sw
                     break
                     # found the right link and connected switch
                     src_sw = connected_sw
                     break
@@ -435,8 +445,8 @@ class OpenstackManage(object):
             link_dict = self.net.DCNetwork_graph[connected_sw][dst_vnf]
             for link in link_dict:
                 if link_dict[link]['dst_port_id'] == dst_vnf_intf or \
             link_dict = self.net.DCNetwork_graph[connected_sw][dst_vnf]
             for link in link_dict:
                 if link_dict[link]['dst_port_id'] == dst_vnf_intf or \
-                                link_dict[link][
-                                    'dst_port_name'] == dst_vnf_intf:
+                        link_dict[link][
+                        'dst_port_name'] == dst_vnf_intf:
                     # found the right link and connected
                     dst_sw = connected_sw
                     break
                     # found the right link and connected
                     dst_sw = connected_sw
                     break
@@ -447,16 +457,18 @@ class OpenstackManage(object):
             # returns the first found shortest path
             # if all shortest paths are wanted, use: all_shortest_paths
             path = nx.shortest_path(self.net.DCNetwork_graph, src_sw, dst_sw)
             # returns the first found shortest path
             # if all shortest paths are wanted, use: all_shortest_paths
             path = nx.shortest_path(self.net.DCNetwork_graph, src_sw, dst_sw)
-        except:
+        except BaseException:
             logging.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
                 src_vnf, dst_vnf, src_sw, dst_sw))
             logging.debug("Graph nodes: %r" % self.net.DCNetwork_graph.nodes())
             logging.debug("Graph edges: %r" % self.net.DCNetwork_graph.edges())
             for e, v in self.net.DCNetwork_graph.edges():
                 logging.debug("%r" % self.net.DCNetwork_graph[e][v])
             logging.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
                 src_vnf, dst_vnf, src_sw, dst_sw))
             logging.debug("Graph nodes: %r" % self.net.DCNetwork_graph.nodes())
             logging.debug("Graph edges: %r" % self.net.DCNetwork_graph.edges())
             for e, v in self.net.DCNetwork_graph.edges():
                 logging.debug("%r" % self.net.DCNetwork_graph[e][v])
-            return "No path could be found between {0} and {1}".format(src_vnf, dst_vnf)
+            return "No path could be found between {0} and {1}".format(
+                src_vnf, dst_vnf)
 
 
-        logging.info("Shortest path between {0} and {1}: {2}".format(src_vnf, dst_vnf, path))
+        logging.info("Shortest path between {0} and {1}: {2}".format(
+            src_vnf, dst_vnf, path))
         return path, src_sw, dst_sw
 
     def add_loadbalancer(self, src_vnf_name, src_vnf_interface, lb_data):
         return path, src_sw, dst_sw
 
     def add_loadbalancer(self, src_vnf_name, src_vnf_interface, lb_data):
@@ -483,12 +495,15 @@ class OpenstackManage(object):
         custom_paths = lb_data.get('path', dict())
         dest_vnf_outport_nrs = list()
 
         custom_paths = lb_data.get('path', dict())
         dest_vnf_outport_nrs = list()
 
-        logging.debug("Call to add_loadbalancer at %s intfs:%s" % (src_vnf_name, src_vnf_interface))
+        logging.debug("Call to add_loadbalancer at %s intfs:%s" %
+                      (src_vnf_name, src_vnf_interface))
 
         if not self.check_vnf_intf_pair(src_vnf_name, src_vnf_interface):
 
         if not self.check_vnf_intf_pair(src_vnf_name, src_vnf_interface):
-            raise Exception(u"Source VNF %s or intfs %s does not exist" % (src_vnf_name, src_vnf_interface))
+            raise Exception(u"Source VNF %s or intfs %s does not exist" % (
+                src_vnf_name, src_vnf_interface))
 
 
-        # find the switch belonging to the source interface, as well as the inport nr
+        # find the switch belonging to the source interface, as well as the
+        # inport nr
         for connected_sw in net.DCNetwork_graph.neighbors(src_vnf_name):
             link_dict = net.DCNetwork_graph[src_vnf_name][connected_sw]
             for link in link_dict:
         for connected_sw in net.DCNetwork_graph.neighbors(src_vnf_name):
             link_dict = net.DCNetwork_graph[src_vnf_name][connected_sw]
             for link in link_dict:
@@ -508,19 +523,18 @@ class OpenstackManage(object):
                 link_dict = net.DCNetwork_graph[vnf_name][connected_sw]
                 for link in link_dict:
                     if link_dict[link]['src_port_name'] == dest_intfs_mapping[vnf_name]:
                 link_dict = net.DCNetwork_graph[vnf_name][connected_sw]
                 for link in link_dict:
                     if link_dict[link]['src_port_name'] == dest_intfs_mapping[vnf_name]:
-                        dest_vnf_outport_nrs.append(int(link_dict[link]['dst_port_nr']))
+                        dest_vnf_outport_nrs.append(
+                            int(link_dict[link]['dst_port_nr']))
         # get first switch
         if (src_vnf_name, src_vnf_interface) not in self.lb_flow_cookies:
             self.lb_flow_cookies[(src_vnf_name, src_vnf_interface)] = list()
 
         # get first switch
         if (src_vnf_name, src_vnf_interface) not in self.lb_flow_cookies:
             self.lb_flow_cookies[(src_vnf_name, src_vnf_interface)] = list()
 
-        src_intf = None
         src_ip = None
         src_mac = None
         for intf in net[src_vnf_name].intfs.values():
             if intf.name == src_vnf_interface:
                 src_mac = intf.mac
                 src_ip = intf.ip
         src_ip = None
         src_mac = None
         for intf in net[src_vnf_name].intfs.values():
             if intf.name == src_vnf_interface:
                 src_mac = intf.mac
                 src_ip = intf.ip
-                src_intf = intf
 
         # set up paths for each destination vnf individually
         index = 0
 
         # set up paths for each destination vnf individually
         index = 0
@@ -536,7 +550,8 @@ class OpenstackManage(object):
         data["cookie"] = cookie
 
         # lb mac for src -> target connections
         data["cookie"] = cookie
 
         # lb mac for src -> target connections
-        lb_mac = "31:33:70:%02x:%02x:%02x" % (random.randint(0, 255),random.randint(0, 255),random.randint(0, 255))
+        lb_mac = "31:33:70:%02x:%02x:%02x" % (random.randint(
+            0, 255), random.randint(0, 255), random.randint(0, 255))
 
         # calculate lb ip as src_intf.ip +1
         octets = src_ip.split('.')
 
         # calculate lb ip as src_intf.ip +1
         octets = src_ip.split('.')
@@ -544,8 +559,10 @@ class OpenstackManage(object):
         plus_one = '.'.join(octets)
 
         # set up arp reply as well as add the route to the interface
         plus_one = '.'.join(octets)
 
         # set up arp reply as well as add the route to the interface
-        self.setup_arp_reply_at(src_sw, src_sw_inport_nr, plus_one, lb_mac, cookie=cookie)
-        net.getNodeByName(src_vnf_name).setHostRoute(plus_one, src_vnf_interface)
+        self.setup_arp_reply_at(src_sw, src_sw_inport_nr,
+                                plus_one, lb_mac, cookie=cookie)
+        net.getNodeByName(src_vnf_name).setHostRoute(
+            plus_one, src_vnf_interface)
 
         for dst_vnf_name, dst_vnf_interface in dest_intfs_mapping.items():
             path, src_sw, dst_sw = self._get_path(src_vnf_name, dst_vnf_name,
 
         for dst_vnf_name, dst_vnf_interface in dest_intfs_mapping.items():
             path, src_sw, dst_sw = self._get_path(src_vnf_name, dst_vnf_name,
@@ -556,14 +573,17 @@ class OpenstackManage(object):
             if custom_paths is not None and dst_vnf_name in custom_paths:
                 if dst_vnf_interface in custom_paths[dst_vnf_name]:
                     path = custom_paths[dst_vnf_name][dst_vnf_interface]
             if custom_paths is not None and dst_vnf_name in custom_paths:
                 if dst_vnf_interface in custom_paths[dst_vnf_name]:
                     path = custom_paths[dst_vnf_name][dst_vnf_interface]
-                    logging.debug("Taking custom path from %s to %s: %s" % (src_vnf_name, dst_vnf_name, path))
+                    logging.debug("Taking custom path from %s to %s: %s" % (
+                        src_vnf_name, dst_vnf_name, path))
 
             if not self.check_vnf_intf_pair(dst_vnf_name, dst_vnf_interface):
                 self.delete_loadbalancer(src_vnf_name, src_vnf_interface)
 
             if not self.check_vnf_intf_pair(dst_vnf_name, dst_vnf_interface):
                 self.delete_loadbalancer(src_vnf_name, src_vnf_interface)
-                raise Exception(u"VNF %s or intfs %s does not exist" % (dst_vnf_name, dst_vnf_interface))
+                raise Exception(u"VNF %s or intfs %s does not exist" %
+                                (dst_vnf_name, dst_vnf_interface))
             if isinstance(path, dict):
                 self.delete_loadbalancer(src_vnf_name, src_vnf_interface)
             if isinstance(path, dict):
                 self.delete_loadbalancer(src_vnf_name, src_vnf_interface)
-                raise Exception(u"Can not find a valid path. Are you specifying the right interfaces?.")
+                raise Exception(
+                    u"Can not find a valid path. Are you specifying the right interfaces?.")
 
             target_mac = "fa:17:00:03:13:37"
             target_ip = "0.0.0.0"
 
             target_mac = "fa:17:00:03:13:37"
             target_ip = "0.0.0.0"
@@ -575,8 +595,9 @@ class OpenstackManage(object):
             current_hop = src_sw
             switch_inport_nr = src_sw_inport_nr
 
             current_hop = src_sw
             switch_inport_nr = src_sw_inport_nr
 
-            #self.setup_arp_reply_at(src_sw, src_sw_inport_nr, target_ip, target_mac, cookie=cookie)
-            net.getNodeByName(dst_vnf_name).setHostRoute(src_ip, dst_vnf_interface)
+            # self.setup_arp_reply_at(src_sw, src_sw_inport_nr, target_ip, target_mac, cookie=cookie)
+            net.getNodeByName(dst_vnf_name).setHostRoute(
+                src_ip, dst_vnf_interface)
 
             # choose free vlan if path contains more than 1 switch
             if len(path) > 1:
 
             # choose free vlan if path contains more than 1 switch
             if len(path) > 1:
@@ -607,16 +628,20 @@ class OpenstackManage(object):
                     switch_outport_nr = dst_sw_outport_nr
                     logging.info("end node reached: {0}".format(dst_vnf_name))
                 elif not isinstance(next_node, OVSSwitch):
                     switch_outport_nr = dst_sw_outport_nr
                     logging.info("end node reached: {0}".format(dst_vnf_name))
                 elif not isinstance(next_node, OVSSwitch):
-                    logging.info("Next node: {0} is not a switch".format(next_hop))
+                    logging.info(
+                        "Next node: {0} is not a switch".format(next_hop))
                     return "Next node: {0} is not a switch".format(next_hop)
                 else:
                     # take first link between switches by default
                     index_edge_out = 0
                     switch_outport_nr = net.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
 
                     return "Next node: {0} is not a switch".format(next_hop)
                 else:
                     # take first link between switches by default
                     index_edge_out = 0
                     switch_outport_nr = net.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
 
-                cmd = 'priority=1,in_port=%s,cookie=%s' % (switch_inport_nr, cookie)
-                cmd_back = 'priority=1,in_port=%s,cookie=%s' % (switch_outport_nr, cookie)
-                # if a vlan is picked, the connection is routed through multiple switches
+                cmd = 'priority=1,in_port=%s,cookie=%s' % (
+                    switch_inport_nr, cookie)
+                cmd_back = 'priority=1,in_port=%s,cookie=%s' % (
+                    switch_outport_nr, cookie)
+                # if a vlan is picked, the connection is routed through
+                # multiple switches
                 if vlan is not None:
                     if path.index(current_hop) == 0:  # first node
                         # flow #index set up
                 if vlan is not None:
                     if path.index(current_hop) == 0:  # first node
                         # flow #index set up
@@ -642,8 +667,10 @@ class OpenstackManage(object):
                         # remove any vlan tags
                         cmd += ',dl_vlan=%s' % vlan
                         cmd += ',actions=pop_vlan,output:%s' % switch_outport_nr
                         # remove any vlan tags
                         cmd += ',dl_vlan=%s' % vlan
                         cmd += ',actions=pop_vlan,output:%s' % switch_outport_nr
-                        # set up arp replys at the port so the dst nodes know the src
-                        self.setup_arp_reply_at(current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
+                        # set up arp replys at the port so the dst nodes know
+                        # the src
+                        self.setup_arp_reply_at(
+                            current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
 
                         # reverse route
                         cmd_back = 'in_port=%s' % switch_outport_nr
 
                         # reverse route
                         cmd_back = 'in_port=%s' % switch_outport_nr
@@ -663,8 +690,10 @@ class OpenstackManage(object):
                             cmd += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
                             cmd_back += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
                         else:
                             cmd += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
                             cmd_back += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
                         else:
-                            cmd += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_outport_nr)
-                            cmd_back += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_inport_nr)
+                            cmd += ',dl_vlan=%s,actions=output:%s' % (
+                                vlan, switch_outport_nr)
+                            cmd_back += ',dl_vlan=%s,actions=output:%s' % (
+                                vlan, switch_inport_nr)
                 # output the packet at the correct outport
                 else:
                     cmd = 'in_port=%s' % src_sw_inport_nr
                 # output the packet at the correct outport
                 else:
                     cmd = 'in_port=%s' % src_sw_inport_nr
@@ -686,7 +715,8 @@ class OpenstackManage(object):
                     cmd_back += ',set_field:%s->ip_src' % plus_one
                     cmd_back += ',output:%s' % src_sw_inport_nr
 
                     cmd_back += ',set_field:%s->ip_src' % plus_one
                     cmd_back += ',output:%s' % src_sw_inport_nr
 
-                    self.setup_arp_reply_at(current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
+                    self.setup_arp_reply_at(
+                        current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
 
                 # excecute the command on the target switch
                 logging.debug(cmd)
 
                 # excecute the command on the target switch
                 logging.debug(cmd)
@@ -703,7 +733,8 @@ class OpenstackManage(object):
             # advance to next destination
             index += 1
 
             # advance to next destination
             index += 1
 
-        # set up the actual load balancing rule as a multipath on the very first switch
+        # set up the actual load balancing rule as a multipath on the very
+        # first switch
         cmd = '"in_port=%s' % src_sw_inport_nr
         cmd += ',cookie=%s' % (cookie)
         cmd += ',ip'
         cmd = '"in_port=%s' % src_sw_inport_nr
         cmd += ',cookie=%s' % (cookie)
         cmd += ',ip'
@@ -713,7 +744,8 @@ class OpenstackManage(object):
         # load balance modulo n over all dest interfaces
         # TODO: in newer openvswitch implementations this should be changed to symmetric_l3l4+udp
         # to balance any kind of traffic
         # load balance modulo n over all dest interfaces
         # TODO: in newer openvswitch implementations this should be changed to symmetric_l3l4+udp
         # to balance any kind of traffic
-        cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(dest_intfs_mapping)
+        cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(
+            dest_intfs_mapping)
         # reuse the cookie as table entry as it will be unique
         cmd += ',resubmit(, %s)"' % cookie
 
         # reuse the cookie as table entry as it will be unique
         cmd += ',resubmit(, %s)"' % cookie
 
@@ -758,10 +790,12 @@ class OpenstackManage(object):
                 link_dict = net.DCNetwork_graph[vnf_name][connected_sw]
                 for link in link_dict:
                     if link_dict[link]['src_port_name'] == dest_intfs_mapping[vnf_name]:
                 link_dict = net.DCNetwork_graph[vnf_name][connected_sw]
                 for link in link_dict:
                     if link_dict[link]['src_port_name'] == dest_intfs_mapping[vnf_name]:
-                        dest_vnf_outport_nrs.append(int(link_dict[link]['dst_port_nr']))
+                        dest_vnf_outport_nrs.append(
+                            int(link_dict[link]['dst_port_nr']))
 
         if len(dest_vnf_outport_nrs) == 0:
 
         if len(dest_vnf_outport_nrs) == 0:
-            raise Exception("There are no paths specified for the loadbalancer")
+            raise Exception(
+                "There are no paths specified for the loadbalancer")
         src_ip = self.floating_intf.IP()
         src_mac = self.floating_intf.MAC()
 
         src_ip = self.floating_intf.IP()
         src_mac = self.floating_intf.MAC()
 
@@ -769,7 +803,8 @@ class OpenstackManage(object):
         index = 0
         cookie = self.get_cookie()
         main_cmd = "add-flow -OOpenFlow13"
         index = 0
         cookie = self.get_cookie()
         main_cmd = "add-flow -OOpenFlow13"
-        floating_ip = self.floating_network.get_new_ip_address("floating-ip").split("/")[0]
+        floating_ip = self.floating_network.get_new_ip_address(
+            "floating-ip").split("/")[0]
 
         for dst_vnf_name, dst_vnf_interface in dest_intfs_mapping.items():
             path = None
 
         for dst_vnf_name, dst_vnf_interface in dest_intfs_mapping.items():
             path = None
@@ -778,17 +813,20 @@ class OpenstackManage(object):
             if custom_paths is not None and dst_vnf_name in custom_paths:
                 if dst_vnf_interface in custom_paths[dst_vnf_name]:
                     path = custom_paths[dst_vnf_name][dst_vnf_interface]
             if custom_paths is not None and dst_vnf_name in custom_paths:
                 if dst_vnf_interface in custom_paths[dst_vnf_name]:
                     path = custom_paths[dst_vnf_name][dst_vnf_interface]
-                    logging.debug("Taking custom path to %s: %s" % (dst_vnf_name, path))
+                    logging.debug("Taking custom path to %s: %s" %
+                                  (dst_vnf_name, path))
             else:
                 if datacenter not in self.floating_links:
                     self.floating_links[datacenter] = \
                         net.addLink(self.floating_switch, datacenter)
                 path = \
             else:
                 if datacenter not in self.floating_links:
                     self.floating_links[datacenter] = \
                         net.addLink(self.floating_switch, datacenter)
                 path = \
-                self._get_path(self.floating_root.name, dst_vnf_name, self.floating_intf.name, dst_vnf_interface)[0]
+                    self._get_path(self.floating_root.name, dst_vnf_name,
+                                   self.floating_intf.name, dst_vnf_interface)[0]
 
             if isinstance(path, dict):
                 self.delete_flow_by_cookie(cookie)
 
             if isinstance(path, dict):
                 self.delete_flow_by_cookie(cookie)
-                raise Exception(u"Can not find a valid path. Are you specifying the right interfaces?.")
+                raise Exception(
+                    u"Can not find a valid path. Are you specifying the right interfaces?.")
 
             intf = net[dst_vnf_name].nameToIntf[dst_vnf_interface]
             target_mac = str(intf.MAC())
 
             intf = net[dst_vnf_name].nameToIntf[dst_vnf_interface]
             target_mac = str(intf.MAC())
@@ -812,16 +850,20 @@ class OpenstackManage(object):
                     switch_outport_nr = dst_sw_outport_nr
                     logging.info("end node reached: {0}".format(dst_vnf_name))
                 elif not isinstance(next_node, OVSSwitch):
                     switch_outport_nr = dst_sw_outport_nr
                     logging.info("end node reached: {0}".format(dst_vnf_name))
                 elif not isinstance(next_node, OVSSwitch):
-                    logging.info("Next node: {0} is not a switch".format(next_hop))
+                    logging.info(
+                        "Next node: {0} is not a switch".format(next_hop))
                     return "Next node: {0} is not a switch".format(next_hop)
                 else:
                     # take first link between switches by default
                     index_edge_out = 0
                     switch_outport_nr = net.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
 
                     return "Next node: {0} is not a switch".format(next_hop)
                 else:
                     # take first link between switches by default
                     index_edge_out = 0
                     switch_outport_nr = net.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
 
-                # default filters, just overwritten on the first node and last node
-                cmd = 'priority=1,in_port=%s,cookie=%s' % (switch_inport_nr, cookie)
-                cmd_back = 'priority=1,in_port=%s,cookie=%s' % (switch_outport_nr, cookie)
+                # default filters, just overwritten on the first node and last
+                # node
+                cmd = 'priority=1,in_port=%s,cookie=%s' % (
+                    switch_inport_nr, cookie)
+                cmd_back = 'priority=1,in_port=%s,cookie=%s' % (
+                    switch_outport_nr, cookie)
                 if i == 0:  # first node
                     cmd = 'in_port=%s' % src_sw_inport_nr
                     cmd += ',cookie=%s' % cookie
                 if i == 0:  # first node
                     cmd = 'in_port=%s' % src_sw_inport_nr
                     cmd += ',cookie=%s' % cookie
@@ -842,13 +884,16 @@ class OpenstackManage(object):
                     # remove any vlan tags
                     cmd_back += ',dl_vlan=%s' % vlan
                     cmd_back += ',actions=pop_vlan,output:%s' % switch_inport_nr
                     # remove any vlan tags
                     cmd_back += ',dl_vlan=%s' % vlan
                     cmd_back += ',actions=pop_vlan,output:%s' % switch_inport_nr
-                    self.setup_arp_reply_at(current_hop, src_sw_inport_nr, floating_ip, target_mac, cookie=cookie)
+                    self.setup_arp_reply_at(
+                        current_hop, src_sw_inport_nr, floating_ip, target_mac, cookie=cookie)
                 elif next_hop == dst_vnf_name:  # last switch
                     # remove any vlan tags
                     cmd += ',dl_vlan=%s' % vlan
                     cmd += ',actions=pop_vlan,output:%s' % switch_outport_nr
                 elif next_hop == dst_vnf_name:  # last switch
                     # remove any vlan tags
                     cmd += ',dl_vlan=%s' % vlan
                     cmd += ',actions=pop_vlan,output:%s' % switch_outport_nr
-                    # set up arp replys at the port so the dst nodes know the src
-                    self.setup_arp_reply_at(current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
+                    # set up arp replys at the port so the dst nodes know the
+                    # src
+                    self.setup_arp_reply_at(
+                        current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
 
                     # reverse route
                     cmd_back = 'in_port=%s' % switch_outport_nr
 
                     # reverse route
                     cmd_back = 'in_port=%s' % switch_outport_nr
@@ -861,7 +906,8 @@ class OpenstackManage(object):
                     cmd_back += ',set_field:%s->eth_src' % src_mac
                     cmd_back += ',set_field:%s->ip_src' % floating_ip
                     cmd_back += ',output:%s' % switch_inport_nr
                     cmd_back += ',set_field:%s->eth_src' % src_mac
                     cmd_back += ',set_field:%s->ip_src' % floating_ip
                     cmd_back += ',output:%s' % switch_inport_nr
-                    net.getNodeByName(dst_vnf_name).setHostRoute(src_ip, dst_vnf_interface)
+                    net.getNodeByName(dst_vnf_name).setHostRoute(
+                        src_ip, dst_vnf_interface)
                 else:  # middle node
                     # if we have a circle in the path we need to specify this, as openflow will ignore the packet
                     # if we just output it on the same port as it came in
                 else:  # middle node
                     # if we have a circle in the path we need to specify this, as openflow will ignore the packet
                     # if we just output it on the same port as it came in
@@ -869,8 +915,10 @@ class OpenstackManage(object):
                         cmd += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
                         cmd_back += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
                     else:
                         cmd += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
                         cmd_back += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
                     else:
-                        cmd += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_outport_nr)
-                        cmd_back += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_inport_nr)
+                        cmd += ',dl_vlan=%s,actions=output:%s' % (
+                            vlan, switch_outport_nr)
+                        cmd_back += ',dl_vlan=%s,actions=output:%s' % (
+                            vlan, switch_inport_nr)
 
                 # excecute the command on the target switch
                 logging.debug(cmd)
 
                 # excecute the command on the target switch
                 logging.debug(cmd)
@@ -887,7 +935,8 @@ class OpenstackManage(object):
             # advance to next destination
             index += 1
 
             # advance to next destination
             index += 1
 
-        # set up the actual load balancing rule as a multipath on the very first switch
+        # set up the actual load balancing rule as a multipath on the very
+        # first switch
         cmd = '"in_port=%s' % src_sw_inport_nr
         cmd += ',cookie=%s' % (cookie)
         cmd += ',ip'
         cmd = '"in_port=%s' % src_sw_inport_nr
         cmd += ',cookie=%s' % (cookie)
         cmd += ',ip'
@@ -897,7 +946,8 @@ class OpenstackManage(object):
         # load balance modulo n over all dest interfaces
         # TODO: in newer openvswitch implementations this should be changed to symmetric_l3l4+udp
         # to balance any kind of traffic
         # load balance modulo n over all dest interfaces
         # TODO: in newer openvswitch implementations this should be changed to symmetric_l3l4+udp
         # to balance any kind of traffic
-        cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(dest_intfs_mapping)
+        cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(
+            dest_intfs_mapping)
         # reuse the cookie as table entry as it will be unique
         cmd += ',resubmit(, %s)"' % cookie
 
         # reuse the cookie as table entry as it will be unique
         cmd += ',resubmit(, %s)"' % cookie
 
@@ -909,7 +959,8 @@ class OpenstackManage(object):
 
         return cookie, floating_ip
 
 
         return cookie, floating_ip
 
-    def setup_arp_reply_at(self, switch, port_nr, target_ip, target_mac, cookie=None):
+    def setup_arp_reply_at(self, switch, port_nr,
+                           target_ip, target_mac, cookie=None):
         """
         Sets up a custom ARP reply at a switch.
         An ARP request coming in on the `port_nr` for `target_ip` will be answered with target IP/MAC.
         """
         Sets up a custom ARP reply at a switch.
         An ARP request coming in on the `port_nr` for `target_ip` will be answered with target IP/MAC.
@@ -931,7 +982,8 @@ class OpenstackManage(object):
             cookie = self.get_cookie()
         main_cmd = "add-flow -OOpenFlow13"
 
             cookie = self.get_cookie()
         main_cmd = "add-flow -OOpenFlow13"
 
-        # first set up ARP requests for the source node, so it will always 'find' a partner
+        # first set up ARP requests for the source node, so it will always
+        # 'find' a partner
         cmd = '"in_port=%s' % port_nr
         cmd += ',cookie=%s' % cookie
         cmd += ',arp'
         cmd = '"in_port=%s' % port_nr
         cmd += ',cookie=%s' % cookie
         cmd += ',arp'
@@ -988,7 +1040,8 @@ class OpenstackManage(object):
         self.cookies.remove(cookie)
         return True
 
         self.cookies.remove(cookie)
         return True
 
-    def delete_chain_by_intf(self, src_vnf_name, src_vnf_intf, dst_vnf_name, dst_vnf_intf):
+    def delete_chain_by_intf(
+            self, src_vnf_name, src_vnf_intf, dst_vnf_name, dst_vnf_intf):
         """
         Removes a flow identified by the vnf_name/vnf_intf pairs
 
         """
         Removes a flow identified by the vnf_name/vnf_intf pairs
 
@@ -1003,16 +1056,18 @@ class OpenstackManage(object):
         :return: True if successful, else false
         :rtype: ``bool``
         """
         :return: True if successful, else false
         :rtype: ``bool``
         """
-        logging.debug("Deleting flow for vnf/intf pair %s %s" % (src_vnf_name, src_vnf_intf))
+        logging.debug("Deleting flow for vnf/intf pair %s %s" %
+                      (src_vnf_name, src_vnf_intf))
         if not self.check_vnf_intf_pair(src_vnf_name, src_vnf_intf):
             return False
         if not self.check_vnf_intf_pair(dst_vnf_name, dst_vnf_intf):
             return False
         target_flow = (src_vnf_name, src_vnf_intf, dst_vnf_name, dst_vnf_intf)
         if not self.check_vnf_intf_pair(src_vnf_name, src_vnf_intf):
             return False
         if not self.check_vnf_intf_pair(dst_vnf_name, dst_vnf_intf):
             return False
         target_flow = (src_vnf_name, src_vnf_intf, dst_vnf_name, dst_vnf_intf)
-        if not target_flow in self.chain_flow_cookies:
+        if target_flow not in self.chain_flow_cookies:
             return False
 
             return False
 
-        success = self.delete_flow_by_cookie(self.chain_flow_cookies[target_flow])
+        success = self.delete_flow_by_cookie(
+            self.chain_flow_cookies[target_flow])
 
         if success:
             del self.chain_flow_cookies[target_flow]
 
         if success:
             del self.chain_flow_cookies[target_flow]
@@ -1032,7 +1087,8 @@ class OpenstackManage(object):
         delete_group = list()
         group_id = self.get_flow_group(vnf_src_name, vnf_src_interface)
         for node in self.net.switches:
         delete_group = list()
         group_id = self.get_flow_group(vnf_src_name, vnf_src_interface)
         for node in self.net.switches:
-            for cookie in self.lb_flow_cookies[(vnf_src_name, vnf_src_interface)]:
+            for cookie in self.lb_flow_cookies[(
+                    vnf_src_name, vnf_src_interface)]:
                 flow = dict()
                 flow["dpid"] = int(node.dpid, 16)
                 flow["cookie"] = cookie
                 flow = dict()
                 flow["dpid"] = int(node.dpid, 16)
                 flow["cookie"] = cookie
@@ -1053,7 +1109,8 @@ class OpenstackManage(object):
         logging.debug("Deleting group with id %s" % group_id)
         for switch_del_group in delete_group:
             if self.net.controller == RemoteController:
         logging.debug("Deleting group with id %s" % group_id)
         for switch_del_group in delete_group:
             if self.net.controller == RemoteController:
-                self.net.ryu_REST("stats/groupentry/delete", data=switch_del_group)
+                self.net.ryu_REST("stats/groupentry/delete",
+                                  data=switch_del_group)
 
         # unmap groupid from the interface
         target_pair = (vnf_src_name, vnf_src_interface)
 
         # unmap groupid from the interface
         target_pair = (vnf_src_name, vnf_src_interface)
@@ -1071,7 +1128,8 @@ class OpenstackManage(object):
         """
         cookie = int(cookie)
         if cookie not in self.floating_cookies:
         """
         cookie = int(cookie)
         if cookie not in self.floating_cookies:
-            raise Exception("Can not delete floating loadbalancer as the flowcookie is not known")
+            raise Exception(
+                "Can not delete floating loadbalancer as the flowcookie is not known")
 
         self.delete_flow_by_cookie(cookie)
         floating_ip = self.floating_cookies[cookie]
 
         self.delete_flow_by_cookie(cookie)
         floating_ip = self.floating_cookies[cookie]
index e340a3a..b6347eb 100755 (executable)
@@ -1,36 +1,39 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+from manage import OpenstackManage
 
 
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
+from openstack_dummies.glance_dummy_api import GlanceDummyApi
+from openstack_dummies.heat_dummy_api import HeatDummyApi
+from openstack_dummies.keystone_dummy_api import KeystoneDummyApi
+from openstack_dummies.neutron_dummy_api import NeutronDummyApi
+from openstack_dummies.nova_dummy_api import NovaDummyApi
 
 
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from manage import OpenstackManage
-from openstack_dummies import *
 import logging
 import threading
 import compute
 import logging
 import threading
 import compute
-import requests
 import socket
 import time
 
 import socket
 import time
 
@@ -47,11 +50,16 @@ class OpenstackApiEndpoint():
         self.port = port
         self.compute = compute.OpenstackCompute()
         self.openstack_endpoints = dict()
         self.port = port
         self.compute = compute.OpenstackCompute()
         self.openstack_endpoints = dict()
-        self.openstack_endpoints['keystone'] = KeystoneDummyApi(self.ip, self.port)
-        self.openstack_endpoints['neutron'] = NeutronDummyApi(self.ip, self.port + 4696, self.compute)
-        self.openstack_endpoints['nova'] = NovaDummyApi(self.ip, self.port + 3774, self.compute)
-        self.openstack_endpoints['heat'] = HeatDummyApi(self.ip, self.port + 3004, self.compute)
-        self.openstack_endpoints['glance'] = GlanceDummyApi(self.ip, self.port + 4242, self.compute)
+        self.openstack_endpoints['keystone'] = KeystoneDummyApi(
+            self.ip, self.port)
+        self.openstack_endpoints['neutron'] = NeutronDummyApi(
+            self.ip, self.port + 4696, self.compute)
+        self.openstack_endpoints['nova'] = NovaDummyApi(
+            self.ip, self.port + 3774, self.compute)
+        self.openstack_endpoints['heat'] = HeatDummyApi(
+            self.ip, self.port + 3004, self.compute)
+        self.openstack_endpoints['glance'] = GlanceDummyApi(
+            self.ip, self.port + 4242, self.compute)
 
         self.rest_threads = list()
         self.manage = OpenstackManage()
 
         self.rest_threads = list()
         self.manage = OpenstackManage()
@@ -69,8 +77,8 @@ class OpenstackApiEndpoint():
         self.compute.dc = dc
         for ep in self.openstack_endpoints.values():
             ep.manage = self.manage
         self.compute.dc = dc
         for ep in self.openstack_endpoints.values():
             ep.manage = self.manage
-        logging.info \
-            ("Connected DC(%s) to API endpoint %s(%s:%d)" % (dc.label, self.__class__.__name__, self.ip, self.port))
+        logging.info("Connected DC(%s) to API endpoint %s(%s:%d)" %
+                     (dc.label, self.__class__.__name__, self.ip, self.port))
 
     def connect_dc_network(self, dc_network):
         """
 
     def connect_dc_network(self, dc_network):
         """
@@ -97,14 +105,14 @@ class OpenstackApiEndpoint():
             c.server_thread.start()
             if wait_for_port:
                 self._wait_for_port(c.ip, c.port)
             c.server_thread.start()
             if wait_for_port:
                 self._wait_for_port(c.ip, c.port)
-       
+
     def stop(self):
         """
         Stop all connected OpenStack endpoints that are connected to this API endpoint.
         """
         for c in self.openstack_endpoints.values():
             c.stop()
     def stop(self):
         """
         Stop all connected OpenStack endpoints that are connected to this API endpoint.
         """
         for c in self.openstack_endpoints.values():
             c.stop()
-        #for c in self.openstack_endpoints.values():
+        # for c in self.openstack_endpoints.values():
         #    if c.server_thread:
         #        print("Waiting for WSGIServers to be stopped ...")
         #        c.server_thread.join()
         #    if c.server_thread:
         #        print("Waiting for WSGIServers to be stopped ...")
         #        c.server_thread.join()
@@ -117,5 +125,6 @@ class OpenstackApiEndpoint():
             if r == 0:
                 break  # port is open proceed
             else:
             if r == 0:
                 break  # port is open proceed
             else:
-                logging.warning("Waiting for {}:{} ... ({}/10)".format(ip, port, i + 1))
+                logging.warning(
+                    "Waiting for {}:{} ... ({}/10)".format(ip, port, i + 1))
             time.sleep(1)
             time.sleep(1)
index ef97752..d888119 100755 (executable)
@@ -1,32 +1,25 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from glance_dummy_api import GlanceDummyApi
-from heat_dummy_api import HeatDummyApi
-from keystone_dummy_api import KeystoneDummyApi
-from neutron_dummy_api import NeutronDummyApi
-from nova_dummy_api import NovaDummyApi
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 93ad2bd..d8eeb79 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from flask import Flask, request
 from flask_restful import Api, Resource
 from gevent.pywsgi import WSGIServer
 from flask import Flask, request
 from flask_restful import Api, Resource
 from gevent.pywsgi import WSGIServer
@@ -71,8 +69,9 @@ class BaseOpenstackDummy(Resource):
         with self.manage.lock:
             with open(self.playbook_file, 'a') as logfile:
                 if len(request.data) > 0:
         with self.manage.lock:
             with open(self.playbook_file, 'a') as logfile:
                 if len(request.data) > 0:
-                    data = "# %s API\n" % str(self.__class__).split('.')[-1].rstrip('\'>')
+                    data = "# %s API\n" % str(
+                        self.__class__).split('.')[-1].rstrip('\'>')
                     data += "curl -X {type} -H \"Content-type: application/json\" -d '{data}' {url}".format(type=request.method,
                     data += "curl -X {type} -H \"Content-type: application/json\" -d '{data}' {url}".format(type=request.method,
-                                                                                            data=request.data,
-                                                                                            url=request.url)
+                                                                                                            data=request.data,
+                                                                                                            url=request.url)
                     logfile.write(data + "\n")
                     logfile.write(data + "\n")
index 4ce9e77..6edaa72 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from flask_restful import Resource
 from flask import Response, request
 from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
 from flask_restful import Resource
 from flask import Response, request
 from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
@@ -78,7 +76,8 @@ class GlanceListApiVersions(Resource):
             ]
         }]
         resp['versions'] = versions
             ]
         }]
         resp['versions'] = versions
-        return Response(json.dumps(resp), status=200, mimetype='application/json')
+        return Response(json.dumps(resp), status=200,
+                        mimetype='application/json')
 
 
 class GlanceSchema(Resource):
 
 
 class GlanceSchema(Resource):
@@ -88,7 +87,8 @@ class GlanceSchema(Resource):
         resp['name'] = 'someImageName'
         resp['properties'] = dict()
         # just an ugly hack to allow the openstack client to work
         resp['name'] = 'someImageName'
         resp['properties'] = dict()
         # just an ugly hack to allow the openstack client to work
-        return Response(json.dumps(resp), status=200, mimetype='application/json')
+        return Response(json.dumps(resp), status=200,
+                        mimetype='application/json')
 
 
 class GlanceListImagesApi(Resource):
 
 
 class GlanceListImagesApi(Resource):
@@ -97,7 +97,7 @@ class GlanceListImagesApi(Resource):
 
     def get(self):
         LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
 
     def get(self):
         LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
-        try:           
+        try:
             resp = dict()
             resp['next'] = None
             resp['first'] = "/v2/images"
             resp = dict()
             resp['next'] = None
             resp['first'] = "/v2/images"
@@ -132,10 +132,12 @@ class GlanceListImagesApi(Resource):
                     break
             if "marker" in request.args:  # ugly hack to fix pageination of openstack client
                 resp['images'] = None
                     break
             if "marker" in request.args:  # ugly hack to fix pageination of openstack client
                 resp['images'] = None
-            return Response(json.dumps(resp), status=200, mimetype="application/json")
+            return Response(json.dumps(resp), status=200,
+                            mimetype="application/json")
 
         except Exception as ex:
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve the list of images." % __name__)
+            LOG.exception(
+                u"%s: Could not retrieve the list of images." % __name__)
             return ex.message, 500
 
     def post(self):
             return ex.message, 500
 
     def post(self):
@@ -147,20 +149,22 @@ class GlanceListImagesApi(Resource):
         LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
         try:
             body_data = json.loads(request.data)
         LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
         try:
             body_data = json.loads(request.data)
-        except:
+        except BaseException:
             body_data = dict()
         # lets see what we should create
         img_name = request.headers.get("X-Image-Meta-Name")
         img_size = request.headers.get("X-Image-Meta-Size")
         img_disk_format = request.headers.get("X-Image-Meta-Disk-Format")
         img_is_public = request.headers.get("X-Image-Meta-Is-Public")
             body_data = dict()
         # lets see what we should create
         img_name = request.headers.get("X-Image-Meta-Name")
         img_size = request.headers.get("X-Image-Meta-Size")
         img_disk_format = request.headers.get("X-Image-Meta-Disk-Format")
         img_is_public = request.headers.get("X-Image-Meta-Is-Public")
-        img_container_format = request.headers.get("X-Image-Meta-Container-Format")
+        img_container_format = request.headers.get(
+            "X-Image-Meta-Container-Format")
         # try to use body payload if header fields are empty
         if img_name is None:
             img_name = body_data.get("name")
             img_size = 1234
             img_disk_format = body_data.get("disk_format")
         # try to use body payload if header fields are empty
         if img_name is None:
             img_name = body_data.get("name")
             img_size = 1234
             img_disk_format = body_data.get("disk_format")
-            img_is_public = True if "public" in body_data.get("visibility") else False
+            img_is_public = True if "public" in body_data.get(
+                "visibility") else False
             img_container_format = body_data.get("container_format")
         # try to find ID of already existing image (matched by name)
         img_id = None
             img_container_format = body_data.get("container_format")
         # try to find ID of already existing image (matched by name)
         img_id = None
@@ -212,15 +216,19 @@ class GlanceImageByIdApi(Resource):
                     resp['id'] = image.id
                     resp['name'] = image.name
 
                     resp['id'] = image.id
                     resp['name'] = image.name
 
-                    return Response(json.dumps(resp), status=200, mimetype="application/json")
+                    return Response(json.dumps(resp), status=200,
+                                    mimetype="application/json")
 
 
-            response = Response("Image with id or name %s does not exists." % id, status=404)
+            response = Response(
+                "Image with id or name %s does not exists." % id, status=404)
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve image with id %s." % (__name__, id))
-            return Response(ex.message, status=500, mimetype='application/json')
+            LOG.exception(
+                u"%s: Could not retrieve image with id %s." % (__name__, id))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
     def put(self, id):
         LOG.debug("API CALL: %s " % str(self.__class__.__name__))
 
     def put(self, id):
         LOG.debug("API CALL: %s " % str(self.__class__.__name__))
@@ -241,12 +249,16 @@ class GlanceImageByDockerNameApi(Resource):
                 resp = dict()
                 resp['id'] = image.id
                 resp['name'] = image.name
                 resp = dict()
                 resp['id'] = image.id
                 resp['name'] = image.name
-                return Response(json.dumps(resp), status=200, mimetype="application/json")
+                return Response(json.dumps(resp), status=200,
+                                mimetype="application/json")
 
 
-            response = Response("Image with id or name %s does not exists." % id, status=404)
+            response = Response(
+                "Image with id or name %s does not exists." % id, status=404)
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            logging.exception(u"%s: Could not retrieve image with id %s." % (__name__, id))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception(
+                u"%s: Could not retrieve image with id %s." % (__name__, id))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
index 9822f22..86fb6c1 100755 (executable)
@@ -1,33 +1,31 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from flask import request, Response
 from flask_restful import Resource
 from flask import request, Response
 from flask_restful import Resource
-from emuvim.api.openstack.resources import Stack
+from emuvim.api.openstack.resources.stack import Stack
 from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
 from emuvim.api.openstack.helper import get_host
 from datetime import datetime
 from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
 from emuvim.api.openstack.helper import get_host
 from datetime import datetime
@@ -88,7 +86,8 @@ class HeatListAPIVersions(Resource):
             ]
         }]
 
             ]
         }]
 
-        return Response(json.dumps(resp), status=200, mimetype="application/json")
+        return Response(json.dumps(resp), status=200,
+                        mimetype="application/json")
 
 
 class HeatCreateStack(Resource):
 
 
 class HeatCreateStack(Resource):
@@ -116,9 +115,11 @@ class HeatCreateStack(Resource):
             stack.stack_name = stack_dict['stack_name']
 
             reader = HeatParser(self.api.compute)
             stack.stack_name = stack_dict['stack_name']
 
             reader = HeatParser(self.api.compute)
-            if isinstance(stack_dict['template'], str) or isinstance(stack_dict['template'], unicode):
+            if isinstance(stack_dict['template'], str) or isinstance(
+                    stack_dict['template'], unicode):
                 stack_dict['template'] = json.loads(stack_dict['template'])
                 stack_dict['template'] = json.loads(stack_dict['template'])
-            if not reader.parse_input(stack_dict['template'], stack, self.api.compute.dc.label):
+            if not reader.parse_input(
+                    stack_dict['template'], stack, self.api.compute.dc.label):
                 self.api.compute.clean_broken_stack(stack)
                 return 'Could not create stack.', 400
 
                 self.api.compute.clean_broken_stack(stack)
                 return 'Could not create stack.', 400
 
@@ -136,7 +137,8 @@ class HeatCreateStack(Resource):
 
             self.api.compute.add_stack(stack)
             self.api.compute.deploy_stack(stack.id)
 
             self.api.compute.add_stack(stack)
             self.api.compute.deploy_stack(stack.id)
-            return Response(json.dumps(return_dict), status=201, mimetype="application/json")
+            return Response(json.dumps(return_dict), status=201,
+                            mimetype="application/json")
 
         except Exception as ex:
             LOG.exception("Heat: Create Stack exception.")
 
         except Exception as ex:
             LOG.exception("Heat: Create Stack exception.")
@@ -168,7 +170,8 @@ class HeatCreateStack(Resource):
                      "tags": ""
                      })
 
                      "tags": ""
                      })
 
-            return Response(json.dumps(return_stacks), status=200, mimetype="application/json")
+            return Response(json.dumps(return_stacks),
+                            status=200, mimetype="application/json")
         except Exception as ex:
             LOG.exception("Heat: List Stack exception.")
             return ex.message, 500
         except Exception as ex:
             LOG.exception("Heat: List Stack exception.")
             return ex.message, 500
@@ -225,7 +228,8 @@ class HeatShowStack(Resource):
                     "stack_name": stack.stack_name,
                     "stack_owner": "The owner of the stack.",  # add stack owner
                     "stack_status": stack.status,
                     "stack_name": stack.stack_name,
                     "stack_owner": "The owner of the stack.",  # add stack owner
                     "stack_status": stack.status,
-                    "stack_status_reason": "The reason for the current status of the stack.",  # add status reason
+                    # add status reason
+                    "stack_status_reason": "The reason for the current status of the stack.",
                     "template_description": "The description of the stack template.",
                     "stack_user_project_id": "The project UUID of the stack user.",
                     "timeout_mins": "",
                     "template_description": "The description of the stack template.",
                     "stack_user_project_id": "The project UUID of the stack user.",
                     "timeout_mins": "",
@@ -235,13 +239,14 @@ class HeatShowStack(Resource):
                 }
             }
 
                 }
             }
 
-            return Response(json.dumps(return_stack), status=200, mimetype="application/json")
+            return Response(json.dumps(return_stack),
+                            status=200, mimetype="application/json")
 
         except Exception as ex:
             LOG.exception("Heat: Show stack exception.")
             return ex.message, 500
 
 
         except Exception as ex:
             LOG.exception("Heat: Show stack exception.")
             return ex.message, 500
 
-        
+
 class HeatShowStackTemplate(Resource):
     def __init__(self, api):
         self.api = api
 class HeatShowStackTemplate(Resource):
     def __init__(self, api):
         self.api = api
@@ -253,7 +258,7 @@ class HeatShowStackTemplate(Resource):
         :param tenant_id:
         :param stack_name_or_id:
         :param stack_id:
         :param tenant_id:
         :param stack_name_or_id:
         :param stack_id:
-        :return: Returns a json response which contains the stack's template. 
+        :return: Returns a json response which contains the stack's template.
         """
         LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
         try:
         """
         LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
         try:
@@ -266,9 +271,10 @@ class HeatShowStackTemplate(Resource):
                         stack = tmp_stack
             if stack is None:
                 return 'Could not resolve Stack - ID', 404
                         stack = tmp_stack
             if stack is None:
                 return 'Could not resolve Stack - ID', 404
-            #LOG.debug("STACK: {}".format(stack))
-            #LOG.debug("TEMPLATE: {}".format(stack.template))
-            return Response(json.dumps(stack.template), status=200, mimetype="application/json")
+            # LOG.debug("STACK: {}".format(stack))
+            # LOG.debug("TEMPLATE: {}".format(stack.template))
+            return Response(json.dumps(stack.template),
+                            status=200, mimetype="application/json")
 
         except Exception as ex:
             LOG.exception("Heat: Show stack template exception.")
 
         except Exception as ex:
             LOG.exception("Heat: Show stack template exception.")
@@ -286,7 +292,7 @@ class HeatShowStackResources(Resource):
         :param tenant_id:
         :param stack_name_or_id:
         :param stack_id:
         :param tenant_id:
         :param stack_name_or_id:
         :param stack_id:
-        :return: Returns a json response which contains the stack's template. 
+        :return: Returns a json response which contains the stack's template.
         """
         LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
         try:
         """
         LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
         try:
@@ -302,7 +308,8 @@ class HeatShowStackResources(Resource):
 
             response = {"resources": []}
 
 
             response = {"resources": []}
 
-            return Response(json.dumps(response), status=200, mimetype="application/json")
+            return Response(json.dumps(response), status=200,
+                            mimetype="application/json")
 
         except Exception as ex:
             LOG.exception("Heat: Show stack template exception.")
 
         except Exception as ex:
             LOG.exception("Heat: Show stack template exception.")
@@ -320,7 +327,7 @@ class HeatUpdateStack(Resource):
     def patch(self, tenant_id, stack_name_or_id, stack_id=None):
         LOG.debug("API CALL: %s PATCH" % str(self.__class__.__name__))
         return self.update_stack(tenant_id, stack_name_or_id, stack_id)
     def patch(self, tenant_id, stack_name_or_id, stack_id=None):
         LOG.debug("API CALL: %s PATCH" % str(self.__class__.__name__))
         return self.update_stack(tenant_id, stack_name_or_id, stack_id)
-    
+
     def update_stack(self, tenant_id, stack_name_or_id, stack_id=None):
         """
         Updates an existing stack with a new heat template.
     def update_stack(self, tenant_id, stack_name_or_id, stack_id=None):
         """
         Updates an existing stack with a new heat template.
@@ -354,9 +361,11 @@ class HeatUpdateStack(Resource):
             stack.status = "UPDATE_COMPLETE"
 
             reader = HeatParser(self.api.compute)
             stack.status = "UPDATE_COMPLETE"
 
             reader = HeatParser(self.api.compute)
-            if isinstance(stack_dict['template'], str) or isinstance(stack_dict['template'], unicode):
+            if isinstance(stack_dict['template'], str) or isinstance(
+                    stack_dict['template'], unicode):
                 stack_dict['template'] = json.loads(stack_dict['template'])
                 stack_dict['template'] = json.loads(stack_dict['template'])
-            if not reader.parse_input(stack_dict['template'], stack, self.api.compute.dc.label, stack_update=True):
+            if not reader.parse_input(
+                    stack_dict['template'], stack, self.api.compute.dc.label, stack_update=True):
                 return 'Could not create stack.', 400
             stack.template = stack_dict['template']
 
                 return 'Could not create stack.', 400
             stack.template = stack_dict['template']
 
index 1a258df..0741553 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from flask_restful import Resource
 from flask import request, Response
 from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
 from flask_restful import Resource
 from flask import request, Response
 from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
@@ -39,11 +37,16 @@ class KeystoneDummyApi(BaseOpenstackDummy):
     def __init__(self, in_ip, in_port):
         super(KeystoneDummyApi, self).__init__(in_ip, in_port)
 
     def __init__(self, in_ip, in_port):
         super(KeystoneDummyApi, self).__init__(in_ip, in_port)
 
-        self.api.add_resource(KeystoneListVersions, "/", resource_class_kwargs={'api': self})
-        self.api.add_resource(KeystoneShowAPIv2, "/v2.0", resource_class_kwargs={'api': self})
-        self.api.add_resource(KeystoneGetToken, "/v2.0/tokens", resource_class_kwargs={'api': self})
-        self.api.add_resource(KeystoneShowAPIv3, "/v3.0", resource_class_kwargs={'api': self})
-        self.api.add_resource(KeystoneGetTokenv3, "/v3.0/auth/tokens", resource_class_kwargs={'api': self})
+        self.api.add_resource(KeystoneListVersions, "/",
+                              resource_class_kwargs={'api': self})
+        self.api.add_resource(KeystoneShowAPIv2, "/v2.0",
+                              resource_class_kwargs={'api': self})
+        self.api.add_resource(KeystoneGetToken, "/v2.0/tokens",
+                              resource_class_kwargs={'api': self})
+        self.api.add_resource(KeystoneShowAPIv3, "/v3.0",
+                              resource_class_kwargs={'api': self})
+        self.api.add_resource(
+            KeystoneGetTokenv3, "/v3.0/auth/tokens", resource_class_kwargs={'api': self})
 
 
 class KeystoneListVersions(Resource):
 
 
 class KeystoneListVersions(Resource):
@@ -85,7 +88,8 @@ class KeystoneListVersions(Resource):
         }]
         resp['versions']['values'] = version
 
         }]
         resp['versions']['values'] = version
 
-        return Response(json.dumps(resp), status=200, mimetype='application/json')
+        return Response(json.dumps(resp), status=200,
+                        mimetype='application/json')
 
 
 class KeystoneShowAPIv2(Resource):
 
 
 class KeystoneShowAPIv2(Resource):
@@ -106,8 +110,8 @@ class KeystoneShowAPIv2(Resource):
         """
         LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
 
         """
         LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
 
-        neutron_port = self.api.port + 4696
-        heat_port = self.api.port + 3004
+        neutron_port = self.api.port + 4696
+        heat_port = self.api.port + 3004
 
         resp = dict()
         resp['version'] = {
 
         resp = dict()
         resp['version'] = {
@@ -127,7 +131,8 @@ class KeystoneShowAPIv2(Resource):
             ]
         }
         LOG.debug(json.dumps(resp))
             ]
         }
         LOG.debug(json.dumps(resp))
-        return Response(json.dumps(resp), status=200, mimetype='application/json')
+        return Response(json.dumps(resp), status=200,
+                        mimetype='application/json')
 
 
 class KeystoneShowAPIv3(Resource):
 
 
 class KeystoneShowAPIv3(Resource):
@@ -148,8 +153,8 @@ class KeystoneShowAPIv3(Resource):
         """
         LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
 
         """
         LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
 
-        neutron_port = self.api.port + 4696
-        heat_port = self.api.port + 3004
+        neutron_port = self.api.port + 4696
+        heat_port = self.api.port + 3004
 
         resp = dict()
         resp['version'] = {
 
         resp = dict()
         resp['version'] = {
@@ -169,7 +174,8 @@ class KeystoneShowAPIv3(Resource):
             ]
         }
 
             ]
         }
 
-        return Response(json.dumps(resp), status=200, mimetype='application/json')
+        return Response(json.dumps(resp), status=200,
+                        mimetype='application/json')
 
 
 class KeystoneGetToken(Resource):
 
 
 class KeystoneGetToken(Resource):
@@ -208,11 +214,13 @@ class KeystoneGetToken(Resource):
 
             token['issued_at'] = "2014-01-30T15:30:58.819Z"
             token['expires'] = "2999-01-30T15:30:58.819Z"
 
             token['issued_at'] = "2014-01-30T15:30:58.819Z"
             token['expires'] = "2999-01-30T15:30:58.819Z"
-            token['id'] = req['auth'].get('token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
+            token['id'] = req['auth'].get(
+                'token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
             token['tenant'] = dict()
             token['tenant']['description'] = None
             token['tenant']['enabled'] = True
             token['tenant'] = dict()
             token['tenant']['description'] = None
             token['tenant']['enabled'] = True
-            token['tenant']['id'] = req['auth'].get('tenantId', 'fc394f2ab2df4114bde39905f800dc57')
+            token['tenant']['id'] = req['auth'].get(
+                'tenantId', 'fc394f2ab2df4114bde39905f800dc57')
             token['tenant']['name'] = "tenantName"
 
             ret['access']['user'] = dict()
             token['tenant']['name'] = "tenantName"
 
             ret['access']['user'] = dict()
@@ -220,7 +228,8 @@ class KeystoneGetToken(Resource):
             user['username'] = req.get('username', "username")
             user['name'] = "tenantName"
             user['roles_links'] = list()
             user['username'] = req.get('username', "username")
             user['name'] = "tenantName"
             user['roles_links'] = list()
-            user['id'] = token['tenant'].get('id', "fc394f2ab2df4114bde39905f800dc57")
+            user['id'] = token['tenant'].get(
+                'id', "fc394f2ab2df4114bde39905f800dc57")
             user['roles'] = [{'name': 'Member'}]
 
             ret['access']['region_name'] = "RegionOne"
             user['roles'] = [{'name': 'Member'}]
 
             ret['access']['region_name'] = "RegionOne"
@@ -252,7 +261,7 @@ class KeystoneGetToken(Resource):
                     "endpoints_links": [],
                     "type": "identity",
                     "name": "keystone"
                     "endpoints_links": [],
                     "type": "identity",
                     "name": "keystone"
-                },
+            },
                 {
                     "endpoints": [
                         {
                 {
                     "endpoints": [
                         {
@@ -266,7 +275,7 @@ class KeystoneGetToken(Resource):
                     "endpoints_links": [],
                     "type": "network",
                     "name": "neutron"
                     "endpoints_links": [],
                     "type": "network",
                     "name": "neutron"
-                },
+            },
                 {
                     "endpoints": [
                         {
                 {
                     "endpoints": [
                         {
@@ -280,7 +289,7 @@ class KeystoneGetToken(Resource):
                     "endpoints_links": [],
                     "type": "image",
                     "name": "glance"
                     "endpoints_links": [],
                     "type": "image",
                     "name": "glance"
-                },
+            },
                 {
                     "endpoints": [
                         {
                 {
                     "endpoints": [
                         {
@@ -294,27 +303,29 @@ class KeystoneGetToken(Resource):
                     "endpoints_links": [],
                     "type": "orchestration",
                     "name": "heat"
                     "endpoints_links": [],
                     "type": "orchestration",
                     "name": "heat"
-                }
+            }
             ]
 
             ret['access']["metadata"] = {
             ]
 
             ret['access']["metadata"] = {
-                                            "is_admin": 0,
-                                            "roles": [
-                                                "7598ac3c634d4c3da4b9126a5f67ca2b"
-                                            ]
-                                        },
+                "is_admin": 0,
+                "roles": [
+                    "7598ac3c634d4c3da4b9126a5f67ca2b"
+                ]
+            },
             ret['access']['trust'] = {
                 "id": "394998fa61f14736b1f0c1f322882949",
                 "trustee_user_id": "269348fdd9374b8885da1418e0730af1",
                 "trustor_user_id": "3ec3164f750146be97f21559ee4d9c51",
                 "impersonation": False
             }
             ret['access']['trust'] = {
                 "id": "394998fa61f14736b1f0c1f322882949",
                 "trustee_user_id": "269348fdd9374b8885da1418e0730af1",
                 "trustor_user_id": "3ec3164f750146be97f21559ee4d9c51",
                 "impersonation": False
             }
-            return Response(json.dumps(ret), status=200, mimetype='application/json')
+            return Response(json.dumps(ret), status=200,
+                            mimetype='application/json')
 
         except Exception as ex:
             logging.exception("Keystone: Get token failed.")
             return ex.message, 500
 
 
         except Exception as ex:
             logging.exception("Keystone: Get token failed.")
             return ex.message, 500
 
+
 class KeystoneGetTokenv3(Resource):
     """
     Returns a static keystone token.
 class KeystoneGetTokenv3(Resource):
     """
     Returns a static keystone token.
@@ -354,7 +365,8 @@ class KeystoneGetTokenv3(Resource):
             token['extras'] = dict()
             token['user'] = dict()
             user = token['user']
             token['extras'] = dict()
             token['user'] = dict()
             user = token['user']
-            user['id'] = req['auth'].get('token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
+            user['id'] = req['auth'].get(
+                'token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
             user['name'] = "tenantName"
             user['password_expires_at'] = None
             user['domain'] = {"id": "default", "name": "Default"}
             user['name'] = "tenantName"
             user['password_expires_at'] = None
             user['domain'] = {"id": "default", "name": "Default"}
@@ -363,7 +375,7 @@ class KeystoneGetTokenv3(Resource):
             # project
             token['project'] = {
                 "domain": {
             # project
             token['project'] = {
                 "domain": {
-                    "id" : "default",
+                    "id": "default",
                     "name": "Default"
                 },
                 "id": "8538a3f13f9541b28c2620eb19065e45",
                     "name": "Default"
                 },
                 "id": "8538a3f13f9541b28c2620eb19065e45",
@@ -396,7 +408,7 @@ class KeystoneGetTokenv3(Resource):
                     "id": "2dad48f09e2a447a9bf852bcd93543fc",
                     "type": "identity",
                     "name": "keystone"
                     "id": "2dad48f09e2a447a9bf852bcd93543fc",
                     "type": "identity",
                     "name": "keystone"
-                },
+            },
                 {
                     "endpoints": [
                         {
                 {
                     "endpoints": [
                         {
@@ -409,7 +421,7 @@ class KeystoneGetTokenv3(Resource):
                     "id": "2dad48f09e2a447a9bf852bcd93548cf",
                     "type": "network",
                     "name": "neutron"
                     "id": "2dad48f09e2a447a9bf852bcd93548cf",
                     "type": "network",
                     "name": "neutron"
-                },
+            },
                 {
                     "endpoints": [
                         {
                 {
                     "endpoints": [
                         {
@@ -422,7 +434,7 @@ class KeystoneGetTokenv3(Resource):
                     "id": "2dad48f09e2a447a9bf852bcd93548cf",
                     "type": "image",
                     "name": "glance"
                     "id": "2dad48f09e2a447a9bf852bcd93548cf",
                     "type": "image",
                     "name": "glance"
-                },
+            },
                 {
                     "endpoints": [
                         {
                 {
                     "endpoints": [
                         {
@@ -435,9 +447,10 @@ class KeystoneGetTokenv3(Resource):
                     "id": "2dad48f09e2a447a9bf852bcd93548bf",
                     "type": "orchestration",
                     "name": "heat"
                     "id": "2dad48f09e2a447a9bf852bcd93548bf",
                     "type": "orchestration",
                     "name": "heat"
-                }
+            }
             ]
             ]
-            return Response(json.dumps(ret), status=201, mimetype='application/json')
+            return Response(json.dumps(ret), status=201,
+                            mimetype='application/json')
 
         except Exception as ex:
             logging.exception("Keystone: Get token failed.")
 
         except Exception as ex:
             logging.exception("Keystone: Get token failed.")
index 6f6d3dc..154f1ca 100755 (executable)
@@ -1,34 +1,31 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from flask_restful import Resource
 from flask import request, Response
 from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
 from flask_restful import Resource
 from flask import request, Response
 from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
-from emuvim.api.openstack.helper import get_host
 from datetime import datetime
 import neutron_sfc_dummy_api as SFC
 import logging
 from datetime import datetime
 import neutron_sfc_dummy_api as SFC
 import logging
@@ -44,7 +41,8 @@ class NeutronDummyApi(BaseOpenstackDummy):
         super(NeutronDummyApi, self).__init__(ip, port)
         self.compute = compute
 
         super(NeutronDummyApi, self).__init__(ip, port)
         self.compute = compute
 
-        # create default networks (OSM usually assumes to have these pre-configured)
+        # create default networks (OSM usually assumes to have these
+        # pre-configured)
         self.compute.create_network("mgmt")
         self.compute.create_network("mgmtnet")
 
         self.compute.create_network("mgmt")
         self.compute.create_network("mgmtnet")
 
@@ -165,7 +163,8 @@ class NeutronListAPIVersions(Resource):
         }]
         resp['versions'] = versions
 
         }]
         resp['versions'] = versions
 
-        return Response(json.dumps(resp), status=200, mimetype='application/json')
+        return Response(json.dumps(resp), status=200,
+                        mimetype='application/json')
 
 
 class NeutronShowAPIv2Details(Resource):
 
 
 class NeutronShowAPIv2Details(Resource):
@@ -199,7 +198,7 @@ class NeutronShowAPIv2Details(Resource):
                 ],
                 "name": "network",
                 "collection": "networks"
                 ],
                 "name": "network",
                 "collection": "networks"
-            },
+        },
             {
                 "links": [
                     {
             {
                 "links": [
                     {
@@ -209,10 +208,11 @@ class NeutronShowAPIv2Details(Resource):
                 ],
                 "name": "ports",
                 "collection": "ports"
                 ],
                 "name": "ports",
                 "collection": "ports"
-            }
+        }
         ]
 
         ]
 
-        return Response(json.dumps(resp), status=200, mimetype='application/json')
+        return Response(json.dumps(resp), status=200,
+                        mimetype='application/json')
 
 
 class NeutronListNetworks(Resource):
 
 
 class NeutronListNetworks(Resource):
@@ -232,14 +232,18 @@ class NeutronListNetworks(Resource):
         try:
             if request.args.get('name'):
                 tmp_network = NeutronShowNetwork(self.api)
         try:
             if request.args.get('name'):
                 tmp_network = NeutronShowNetwork(self.api)
-                response = tmp_network.get_network(request.args.get('name'), True)
-                LOG.debug("{} RESPONSE (1): {}".format(self.__class__.__name__, response))
+                response = tmp_network.get_network(
+                    request.args.get('name'), True)
+                LOG.debug("{} RESPONSE (1): {}".format(
+                    self.__class__.__name__, response))
                 return response
             id_list = request.args.getlist('id')
             if len(id_list) == 1:
                 tmp_network = NeutronShowNetwork(self.api)
                 return response
             id_list = request.args.getlist('id')
             if len(id_list) == 1:
                 tmp_network = NeutronShowNetwork(self.api)
-                response = tmp_network.get_network(request.args.get('id'), True)
-                LOG.debug("{} RESPONSE (2): {}".format(self.__class__.__name__, response))
+                response = tmp_network.get_network(
+                    request.args.get('id'), True)
+                LOG.debug("{} RESPONSE (2): {}".format(
+                    self.__class__.__name__, response))
                 return response
 
             network_list = list()
                 return response
 
             network_list = list()
@@ -258,12 +262,15 @@ class NeutronListNetworks(Resource):
                             network_list.append(tmp_network_dict)
 
             network_dict["networks"] = network_list
                             network_list.append(tmp_network_dict)
 
             network_dict["networks"] = network_list
-            LOG.debug("{} RESPONSE (3): {}".format(self.__class__.__name__, network_dict))
-            return Response(json.dumps(network_dict), status=200, mimetype='application/json')
+            LOG.debug("{} RESPONSE (3): {}".format(
+                self.__class__.__name__, network_dict))
+            return Response(json.dumps(network_dict),
+                            status=200, mimetype='application/json')
 
         except Exception as ex:
             LOG.exception("Neutron: List networks exception.")
 
         except Exception as ex:
             LOG.exception("Neutron: List networks exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronShowNetwork(Resource):
 
 
 class NeutronShowNetwork(Resource):
@@ -294,9 +301,11 @@ class NeutronShowNetwork(Resource):
         :rtype: :class:`flask.response`
         """
         try:
         :rtype: :class:`flask.response`
         """
         try:
-            net = self.api.compute.find_network_by_name_or_id(network_name_or_id)
+            net = self.api.compute.find_network_by_name_or_id(
+                network_name_or_id)
             if net is None:
             if net is None:
-                return Response(u'Network not found.\n', status=404, mimetype='application/json')
+                return Response(u'Network not found.\n',
+                                status=404, mimetype='application/json')
 
             tmp_network_dict = net.create_network_dict()
             tmp_dict = dict()
 
             tmp_network_dict = net.create_network_dict()
             tmp_dict = dict()
@@ -305,12 +314,13 @@ class NeutronShowNetwork(Resource):
             else:
                 tmp_dict["network"] = tmp_network_dict
 
             else:
                 tmp_dict["network"] = tmp_network_dict
 
-            return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
-
+            return Response(json.dumps(tmp_dict), status=200,
+                            mimetype='application/json')
 
         except Exception as ex:
             logging.exception("Neutron: Show network exception.")
 
         except Exception as ex:
             logging.exception("Neutron: Show network exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronCreateNetwork(Resource):
 
 
 class NeutronCreateNetwork(Resource):
@@ -332,13 +342,16 @@ class NeutronCreateNetwork(Resource):
             name = network_dict['network']['name']
             net = self.api.compute.find_network_by_name_or_id(name)
             if net is not None:
             name = network_dict['network']['name']
             net = self.api.compute.find_network_by_name_or_id(name)
             if net is not None:
-                return Response('Network already exists.\n', status=400, mimetype='application/json')
+                return Response('Network already exists.\n',
+                                status=400, mimetype='application/json')
 
             net = self.api.compute.create_network(name)
 
             net = self.api.compute.create_network(name)
-            return Response(json.dumps({"network": net.create_network_dict()}), status=201, mimetype='application/json')
+            return Response(json.dumps(
+                {"network": net.create_network_dict()}), status=201, mimetype='application/json')
         except Exception as ex:
             LOG.exception("Neutron: Create network excepiton.")
         except Exception as ex:
             LOG.exception("Neutron: Create network excepiton.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronUpdateNetwork(Resource):
 
 
 class NeutronUpdateNetwork(Resource):
@@ -361,7 +374,6 @@ class NeutronUpdateNetwork(Resource):
             if network_id in self.api.compute.nets:
                 net = self.api.compute.nets[network_id]
                 network_dict = json.loads(request.data)
             if network_id in self.api.compute.nets:
                 net = self.api.compute.nets[network_id]
                 network_dict = json.loads(request.data)
-                old_net = copy.copy(net)
 
                 if "status" in network_dict["network"]:
                     net.status = network_dict["network"]["status"]
 
                 if "status" in network_dict["network"]:
                     net.status = network_dict["network"]["status"]
@@ -372,17 +384,21 @@ class NeutronUpdateNetwork(Resource):
                 if "admin_state_up" in network_dict["network"]:
                     pass  # tmp_network_dict["admin_state_up"] = True
                 if "tenant_id" in network_dict["network"]:
                 if "admin_state_up" in network_dict["network"]:
                     pass  # tmp_network_dict["admin_state_up"] = True
                 if "tenant_id" in network_dict["network"]:
-                    pass  # tmp_network_dict["tenant_id"] = "c1210485b2424d48804aad5d39c61b8f"
+                    # tmp_network_dict["tenant_id"] = "c1210485b2424d48804aad5d39c61b8f"
+                    pass
                 if "shared" in network_dict["network"]:
                     pass  # tmp_network_dict["shared"] = False
 
                 if "shared" in network_dict["network"]:
                     pass  # tmp_network_dict["shared"] = False
 
-                return Response(json.dumps(network_dict), status=200, mimetype='application/json')
+                return Response(json.dumps(network_dict),
+                                status=200, mimetype='application/json')
 
 
-            return Response('Network not found.\n', status=404, mimetype='application/json')
+            return Response('Network not found.\n', status=404,
+                            mimetype='application/json')
 
         except Exception as ex:
             LOG.exception("Neutron: Show networks exception.")
 
         except Exception as ex:
             LOG.exception("Neutron: Show networks exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronDeleteNetwork(Resource):
 
 
 class NeutronDeleteNetwork(Resource):
@@ -410,7 +426,7 @@ class NeutronDeleteNetwork(Resource):
             delete_subnet = NeutronDeleteSubnet(self.api)
             resp = delete_subnet.delete(net.subnet_id)
 
             delete_subnet = NeutronDeleteSubnet(self.api)
             resp = delete_subnet.delete(net.subnet_id)
 
-            if not '204' in resp.status and not '404' in resp.status:
+            if '204' not in resp.status and '404' not in resp.status:
                 return resp
 
             self.api.compute.delete_network(network_id)
                 return resp
 
             self.api.compute.delete_network(network_id)
@@ -418,7 +434,8 @@ class NeutronDeleteNetwork(Resource):
             return Response('', status=204, mimetype='application/json')
         except Exception as ex:
             LOG.exception("Neutron: Delete network exception.")
             return Response('', status=204, mimetype='application/json')
         except Exception as ex:
             LOG.exception("Neutron: Delete network exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronListSubnets(Resource):
 
 
 class NeutronListSubnets(Resource):
@@ -459,11 +476,13 @@ class NeutronListSubnets(Resource):
 
             subnet_dict["subnets"] = subnet_list
 
 
             subnet_dict["subnets"] = subnet_list
 
-            return Response(json.dumps(subnet_dict), status=200, mimetype='application/json')
+            return Response(json.dumps(subnet_dict), status=200,
+                            mimetype='application/json')
 
         except Exception as ex:
             LOG.exception("Neutron: List subnets exception.")
 
         except Exception as ex:
             LOG.exception("Neutron: List subnets exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronShowSubnet(Resource):
 
 
 class NeutronShowSubnet(Resource):
@@ -502,13 +521,16 @@ class NeutronShowSubnet(Resource):
                         tmp_dict["subnets"] = [tmp_subnet_dict]
                     else:
                         tmp_dict["subnet"] = tmp_subnet_dict
                         tmp_dict["subnets"] = [tmp_subnet_dict]
                     else:
                         tmp_dict["subnet"] = tmp_subnet_dict
-                    return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
+                    return Response(json.dumps(tmp_dict),
+                                    status=200, mimetype='application/json')
 
 
-            return Response('Subnet not found. (' + subnet_name_or_id + ')\n', status=404, mimetype='application/json')
+            return Response('Subnet not found. (' + subnet_name_or_id +
+                            ')\n', status=404, mimetype='application/json')
 
         except Exception as ex:
             LOG.exception("Neutron: Show subnet exception.")
 
         except Exception as ex:
             LOG.exception("Neutron: Show subnet exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronCreateSubnet(Resource):
 
 
 class NeutronCreateSubnet(Resource):
@@ -529,15 +551,20 @@ class NeutronCreateSubnet(Resource):
         LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
         try:
             subnet_dict = json.loads(request.data)
         LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
         try:
             subnet_dict = json.loads(request.data)
-            net = self.api.compute.find_network_by_name_or_id(subnet_dict['subnet']['network_id'])
+            net = self.api.compute.find_network_by_name_or_id(
+                subnet_dict['subnet']['network_id'])
 
             if net is None:
 
             if net is None:
-                return Response('Could not find network.\n', status=404, mimetype='application/json')
+                return Response('Could not find network.\n',
+                                status=404, mimetype='application/json')
 
 
-            net.subnet_name = subnet_dict["subnet"].get('name', str(net.name) + '-sub')
+            net.subnet_name = subnet_dict["subnet"].get(
+                'name', str(net.name) + '-sub')
             if net.subnet_id is not None:
             if net.subnet_id is not None:
-                LOG.error("Only one subnet per network is supported: {}".format(net.subnet_id))
-                return Response('Only one subnet per network is supported\n', status=409, mimetype='application/json')
+                LOG.error(
+                    "Only one subnet per network is supported: {}".format(net.subnet_id))
+                return Response('Only one subnet per network is supported\n',
+                                status=409, mimetype='application/json')
 
             if "id" in subnet_dict["subnet"]:
                 net.subnet_id = subnet_dict["subnet"]["id"]
 
             if "id" in subnet_dict["subnet"]:
                 net.subnet_id = subnet_dict["subnet"]["id"]
@@ -557,11 +584,13 @@ class NeutronCreateSubnet(Resource):
             if "enable_dhcp" in subnet_dict["subnet"]:
                 pass
 
             if "enable_dhcp" in subnet_dict["subnet"]:
                 pass
 
-            return Response(json.dumps({'subnet': net.create_subnet_dict()}), status=201, mimetype='application/json')
+            return Response(json.dumps(
+                {'subnet': net.create_subnet_dict()}), status=201, mimetype='application/json')
 
         except Exception as ex:
             LOG.exception("Neutron: Create network excepiton.")
 
         except Exception as ex:
             LOG.exception("Neutron: Create network excepiton.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronUpdateSubnet(Resource):
 
 
 class NeutronUpdateSubnet(Resource):
@@ -606,13 +635,16 @@ class NeutronUpdateSubnet(Resource):
 
                     net.subnet_update_time = str(datetime.now())
                     tmp_dict = {'subnet': net.create_subnet_dict()}
 
                     net.subnet_update_time = str(datetime.now())
                     tmp_dict = {'subnet': net.create_subnet_dict()}
-                    return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
+                    return Response(json.dumps(tmp_dict),
+                                    status=200, mimetype='application/json')
 
 
-            return Response('Network not found.\n', status=404, mimetype='application/json')
+            return Response('Network not found.\n', status=404,
+                            mimetype='application/json')
 
         except Exception as ex:
             LOG.exception("Neutron: Show networks exception.")
 
         except Exception as ex:
             LOG.exception("Neutron: Show networks exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronDeleteSubnet(Resource):
 
 
 class NeutronDeleteSubnet(Resource):
@@ -636,9 +668,11 @@ class NeutronDeleteSubnet(Resource):
                 if net.subnet_id == subnet_id:
                     for server in self.api.compute.computeUnits.values():
                         for port_name in server.port_names:
                 if net.subnet_id == subnet_id:
                     for server in self.api.compute.computeUnits.values():
                         for port_name in server.port_names:
-                            port = self.api.compute.find_port_by_name_or_id(port_name)
+                            port = self.api.compute.find_port_by_name_or_id(
+                                port_name)
                             if port is None:
                             if port is None:
-                                LOG.warning("Port search for {} returned None.".format(port_name))
+                                LOG.warning(
+                                    "Port search for {} returned None.".format(port_name))
                                 continue
                             if port.net_name == net.name:
                                 port.ip_address = None
                                 continue
                             if port.net_name == net.name:
                                 port.ip_address = None
@@ -650,12 +684,15 @@ class NeutronDeleteSubnet(Resource):
 
                     net.delete_subnet()
 
 
                     net.delete_subnet()
 
-                    return Response('', status=204, mimetype='application/json')
+                    return Response(
+                        '', status=204, mimetype='application/json')
 
 
-            return Response('Could not find subnet.', status=404, mimetype='application/json')
+            return Response('Could not find subnet.',
+                            status=404, mimetype='application/json')
         except Exception as ex:
             LOG.exception("Neutron: Delete subnet exception.")
         except Exception as ex:
             LOG.exception("Neutron: Delete subnet exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronListPorts(Resource):
 
 
 class NeutronListPorts(Resource):
@@ -695,11 +732,13 @@ class NeutronListPorts(Resource):
 
             port_dict["ports"] = port_list
 
 
             port_dict["ports"] = port_list
 
-            return Response(json.dumps(port_dict), status=200, mimetype='application/json')
+            return Response(json.dumps(port_dict), status=200,
+                            mimetype='application/json')
 
         except Exception as ex:
             LOG.exception("Neutron: List ports exception.")
 
         except Exception as ex:
             LOG.exception("Neutron: List ports exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronShowPort(Resource):
 
 
 class NeutronShowPort(Resource):
@@ -732,17 +771,20 @@ class NeutronShowPort(Resource):
         try:
             port = self.api.compute.find_port_by_name_or_id(port_name_or_id)
             if port is None:
         try:
             port = self.api.compute.find_port_by_name_or_id(port_name_or_id)
             if port is None:
-                return Response('Port not found. (' + port_name_or_id + ')\n', status=404, mimetype='application/json')
+                return Response('Port not found. (' + port_name_or_id + ')\n',
+                                status=404, mimetype='application/json')
             tmp_port_dict = port.create_port_dict(self.api.compute)
             tmp_dict = dict()
             if as_list:
                 tmp_dict["ports"] = [tmp_port_dict]
             else:
                 tmp_dict["port"] = tmp_port_dict
             tmp_port_dict = port.create_port_dict(self.api.compute)
             tmp_dict = dict()
             if as_list:
                 tmp_dict["ports"] = [tmp_port_dict]
             else:
                 tmp_dict["port"] = tmp_port_dict
-            return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
+            return Response(json.dumps(tmp_dict), status=200,
+                            mimetype='application/json')
         except Exception as ex:
             LOG.exception("Neutron: Show port exception.")
         except Exception as ex:
             LOG.exception("Neutron: Show port exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronCreatePort(Resource):
 
 
 class NeutronCreatePort(Resource):
@@ -764,7 +806,8 @@ class NeutronCreatePort(Resource):
             net_id = port_dict['port']['network_id']
 
             if net_id not in self.api.compute.nets:
             net_id = port_dict['port']['network_id']
 
             if net_id not in self.api.compute.nets:
-                return Response('Could not find network.\n', status=404, mimetype='application/json')
+                return Response('Could not find network.\n',
+                                status=404, mimetype='application/json')
 
             net = self.api.compute.nets[net_id]
             if 'name' in port_dict['port']:
 
             net = self.api.compute.nets[net_id]
             if 'name' in port_dict['port']:
@@ -774,7 +817,8 @@ class NeutronCreatePort(Resource):
                 name = "port:cp%s:man:%s" % (num_ports, str(uuid.uuid4()))
 
             if self.api.compute.find_port_by_name_or_id(name):
                 name = "port:cp%s:man:%s" % (num_ports, str(uuid.uuid4()))
 
             if self.api.compute.find_port_by_name_or_id(name):
-                return Response("Port with name %s already exists.\n" % name, status=500, mimetype='application/json')
+                return Response("Port with name %s already exists.\n" %
+                                name, status=500, mimetype='application/json')
 
             port = self.api.compute.create_port(name)
 
 
             port = self.api.compute.create_port(name)
 
@@ -796,7 +840,8 @@ class NeutronCreatePort(Resource):
             if "tenant_id" in port_dict["port"]:
                 pass
 
             if "tenant_id" in port_dict["port"]:
                 pass
 
-            # add the port to a stack if the specified network is a stack network
+            # add the port to a stack if the specified network is a stack
+            # network
             for stack in self.api.compute.stacks.values():
                 for net in stack.nets.values():
                     if net.id == net_id:
             for stack in self.api.compute.stacks.values():
                 for net in stack.nets.values():
                     if net.id == net_id:
@@ -806,7 +851,8 @@ class NeutronCreatePort(Resource):
                             mimetype='application/json')
         except Exception as ex:
             LOG.exception("Neutron: Show port exception.")
                             mimetype='application/json')
         except Exception as ex:
             LOG.exception("Neutron: Show port exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronUpdatePort(Resource):
 
 
 class NeutronUpdatePort(Resource):
@@ -829,7 +875,8 @@ class NeutronUpdatePort(Resource):
             port_dict = json.loads(request.data)
             port = self.api.compute.find_port_by_name_or_id(port_id)
             if port is None:
             port_dict = json.loads(request.data)
             port = self.api.compute.find_port_by_name_or_id(port_id)
             if port is None:
-                return Response("Port with id %s does not exists.\n" % port_id, status=404, mimetype='application/json')
+                return Response("Port with id %s does not exists.\n" %
+                                port_id, status=404, mimetype='application/json')
             old_port = copy.copy(port)
 
             stack = None
             old_port = copy.copy(port)
 
             stack = None
@@ -853,7 +900,8 @@ class NeutronUpdatePort(Resource):
                 port.set_name(port_dict["port"]["name"])
                 if stack is not None:
                     if port.net_name in stack.nets:
                 port.set_name(port_dict["port"]["name"])
                 if stack is not None:
                     if port.net_name in stack.nets:
-                        stack.nets[port.net_name].update_port_name_for_ip_address(port.ip_address, port.name)
+                        stack.nets[port.net_name].update_port_name_for_ip_address(
+                            port.ip_address, port.name)
                     stack.ports[port.name] = stack.ports[old_port.name]
                     del stack.ports[old_port.name]
             if "network_id" in port_dict["port"]:
                     stack.ports[port.name] = stack.ports[old_port.name]
                     del stack.ports[old_port.name]
             if "network_id" in port_dict["port"]:
@@ -867,7 +915,8 @@ class NeutronUpdatePort(Resource):
                             mimetype='application/json')
         except Exception as ex:
             LOG.exception("Neutron: Update port exception.")
                             mimetype='application/json')
         except Exception as ex:
             LOG.exception("Neutron: Update port exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronDeletePort(Resource):
 
 
 class NeutronDeletePort(Resource):
@@ -889,7 +938,8 @@ class NeutronDeletePort(Resource):
         try:
             port = self.api.compute.find_port_by_name_or_id(port_id)
             if port is None:
         try:
             port = self.api.compute.find_port_by_name_or_id(port_id)
             if port is None:
-                return Response("Port with id %s does not exists.\n" % port_id, status=404)
+                return Response("Port with id %s does not exists.\n" %
+                                port_id, status=404)
             stack = None
             for s in self.api.compute.stacks.values():
                 for p in s.ports.values():
             stack = None
             for s in self.api.compute.stacks.values():
                 for p in s.ports.values():
@@ -897,7 +947,8 @@ class NeutronDeletePort(Resource):
                         stack = s
             if stack is not None:
                 if port.net_name in stack.nets:
                         stack = s
             if stack is not None:
                 if port.net_name in stack.nets:
-                    stack.nets[port.net_name].withdraw_ip_address(port.ip_address)
+                    stack.nets[port.net_name].withdraw_ip_address(
+                        port.ip_address)
                 for server in stack.servers.values():
                     try:
                         server.port_names.remove(port.name)
                 for server in stack.servers.values():
                     try:
                         server.port_names.remove(port.name)
@@ -911,7 +962,8 @@ class NeutronDeletePort(Resource):
 
         except Exception as ex:
             LOG.exception("Neutron: Delete port exception.")
 
         except Exception as ex:
             LOG.exception("Neutron: Delete port exception.")
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class NeutronAddFloatingIp(Resource):
 
 
 class NeutronAddFloatingIp(Resource):
@@ -944,7 +996,8 @@ class NeutronAddFloatingIp(Resource):
             ip["floating_ip_address"] = "172.0.0.%d" % i
             ip["fixed_ip_address"] = "10.0.0.%d" % i
             resp["floatingips"].append(ip)
             ip["floating_ip_address"] = "172.0.0.%d" % i
             ip["fixed_ip_address"] = "10.0.0.%d" % i
             resp["floatingips"].append(ip)
-        return Response(json.dumps(resp), status=200, mimetype='application/json')
+        return Response(json.dumps(resp), status=200,
+                        mimetype='application/json')
 
     def post(self):
         """
 
     def post(self):
         """
@@ -972,7 +1025,8 @@ class NeutronAddFloatingIp(Resource):
                                     status=400, mimetype='application/json')
 
                 if port.floating_ip is not None:
                                     status=400, mimetype='application/json')
 
                 if port.floating_ip is not None:
-                    return Response("We allow only one floating ip per port\n", status=400, mimetype='application/json')
+                    return Response("We allow only one floating ip per port\n",
+                                    status=400, mimetype='application/json')
             else:
                 num_ports = len(self.api.compute.ports)
                 name = "port:cp%s:fl:%s" % (num_ports, str(uuid.uuid4()))
             else:
                 num_ports = len(self.api.compute.ports)
                 name = "port:cp%s:fl:%s" % (num_ports, str(uuid.uuid4()))
@@ -992,7 +1046,9 @@ class NeutronAddFloatingIp(Resource):
             resp["floating_ip_address"] = port.floating_ip
             resp["fixed_ip_address"] = port.floating_ip
 
             resp["floating_ip_address"] = port.floating_ip
             resp["fixed_ip_address"] = port.floating_ip
 
-            return Response(json.dumps(response), status=200, mimetype='application/json')
+            return Response(json.dumps(response), status=200,
+                            mimetype='application/json')
         except Exception as ex:
             LOG.exception("Neutron: Create FloatingIP exception %s.", ex)
         except Exception as ex:
             LOG.exception("Neutron: Create FloatingIP exception %s.", ex)
-            return Response(ex.message, status=500, mimetype='application/json')
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
index 1494bce..f2ea6bb 100644 (file)
@@ -1,38 +1,32 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from flask_restful import Resource
 from flask import request, Response
 import logging
 import json
 from flask_restful import Resource
 from flask import request, Response
 import logging
 import json
-import uuid
-
-from emuvim.api.openstack.resources.port_chain import PortChain
-from emuvim.api.openstack.helper import get_host
 
 
 class SFC(Resource):
 
 
 class SFC(Resource):
@@ -52,8 +46,10 @@ class PortPairsCreate(SFC):
             request_dict = json.loads(request.data).get("port_pair")
             name = request_dict["name"]
 
             request_dict = json.loads(request.data).get("port_pair")
             name = request_dict["name"]
 
-            ingress_port = self.api.compute.find_port_by_name_or_id(request_dict["ingress"])
-            egress_port = self.api.compute.find_port_by_name_or_id(request_dict["egress"])
+            ingress_port = self.api.compute.find_port_by_name_or_id(
+                request_dict["ingress"])
+            egress_port = self.api.compute.find_port_by_name_or_id(
+                request_dict["egress"])
 
             port_pair = self.api.compute.create_port_pair(name)
             port_pair.ingress = ingress_port
 
             port_pair = self.api.compute.create_port_pair(name)
             port_pair.ingress = ingress_port
@@ -66,10 +62,13 @@ class PortPairsCreate(SFC):
             resp = {
                 "port_pair": port_pair.create_dict(self.api.compute)
             }
             resp = {
                 "port_pair": port_pair.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=201, mimetype='application/json')
+            return Response(json.dumps(resp), status=201,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortPairsUpdate(SFC):
 
 
 class PortPairsUpdate(SFC):
@@ -87,10 +86,13 @@ class PortPairsUpdate(SFC):
             resp = {
                 "port_pair": port_pair.create_dict(self.api.compute)
             }
             resp = {
                 "port_pair": port_pair.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortPairsDelete(SFC):
 
 
 class PortPairsDelete(SFC):
@@ -102,8 +104,10 @@ class PortPairsDelete(SFC):
             return Response("", status=204,
                             mimetype='application/json')
         except Exception as ex:
             return Response("", status=204,
                             mimetype='application/json')
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortPairsList(SFC):
 
 
 class PortPairsList(SFC):
@@ -115,10 +119,13 @@ class PortPairsList(SFC):
                 port_pair_list.append(port_pair.create_dict(self.api.compute))
             resp = {"port_pairs": port_pair_list}
 
                 port_pair_list.append(port_pair.create_dict(self.api.compute))
             resp = {"port_pairs": port_pair_list}
 
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortPairsShow(SFC):
 
 
 class PortPairsShow(SFC):
@@ -130,10 +137,13 @@ class PortPairsShow(SFC):
             resp = {
                 "port_pair": port_pair.create_dict(self.api.compute)
             }
             resp = {
                 "port_pair": port_pair.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 ###############################################################################
 
 
 ###############################################################################
@@ -147,20 +157,25 @@ class PortPairGroupCreate(SFC):
         try:
             request_dict = json.loads(request.data).get("port_pair_group")
 
         try:
             request_dict = json.loads(request.data).get("port_pair_group")
 
-            port_pair_group = self.api.compute.create_port_pair_group(request_dict["name"])
+            port_pair_group = self.api.compute.create_port_pair_group(
+                request_dict["name"])
             port_pair_group.port_pairs = request_dict["port_pairs"]
             if "description" in request_dict:
                 port_pair_group.description = request_dict["description"]
             if "port_pair_group_parameters" in request_dict:
             port_pair_group.port_pairs = request_dict["port_pairs"]
             if "description" in request_dict:
                 port_pair_group.description = request_dict["description"]
             if "port_pair_group_parameters" in request_dict:
-                port_pair_group.port_pair_group_parameters = request_dict["port_pair_group_parameters"]
+                port_pair_group.port_pair_group_parameters = request_dict[
+                    "port_pair_group_parameters"]
 
             resp = {
                 "port_pair_group": port_pair_group.create_dict(self.api.compute)
             }
 
             resp = {
                 "port_pair_group": port_pair_group.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=201, mimetype='application/json')
+            return Response(json.dumps(resp), status=201,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortPairGroupUpdate(SFC):
 
 
 class PortPairGroupUpdate(SFC):
@@ -169,7 +184,8 @@ class PortPairGroupUpdate(SFC):
 
         try:
             request_dict = json.loads(request.data).get("port_pair_group")
 
         try:
             request_dict = json.loads(request.data).get("port_pair_group")
-            port_pair_group = self.api.compute.find_port_pair_group_by_name_or_id(group_id)
+            port_pair_group = self.api.compute.find_port_pair_group_by_name_or_id(
+                group_id)
             if "name" in request_dict:
                 port_pair_group.name = request_dict["name"]
             if "description" in request_dict:
             if "name" in request_dict:
                 port_pair_group.name = request_dict["name"]
             if "description" in request_dict:
@@ -180,10 +196,13 @@ class PortPairGroupUpdate(SFC):
             resp = {
                 "port_pair_group": port_pair_group.create_dict(self.api.compute)
             }
             resp = {
                 "port_pair_group": port_pair_group.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortPairGroupDelete(SFC):
 
 
 class PortPairGroupDelete(SFC):
@@ -195,8 +214,10 @@ class PortPairGroupDelete(SFC):
             return Response("", status=204,
                             mimetype='application/json')
         except Exception as ex:
             return Response("", status=204,
                             mimetype='application/json')
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortPairGroupList(SFC):
 
 
 class PortPairGroupList(SFC):
@@ -205,13 +226,17 @@ class PortPairGroupList(SFC):
         try:
             port_pair_group_list = []
             for port_pair_group in self.api.compute.port_pair_groups.values():
         try:
             port_pair_group_list = []
             for port_pair_group in self.api.compute.port_pair_groups.values():
-                port_pair_group_list.append(port_pair_group.create_dict(self.api.compute))
+                port_pair_group_list.append(
+                    port_pair_group.create_dict(self.api.compute))
             resp = {"port_pair_groups": port_pair_group_list}
 
             resp = {"port_pair_groups": port_pair_group_list}
 
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortPairGroupShow(SFC):
 
 
 class PortPairGroupShow(SFC):
@@ -219,14 +244,18 @@ class PortPairGroupShow(SFC):
         logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
 
         try:
         logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
 
         try:
-            port_pair_group = self.api.compute.find_port_pair_group_by_name_or_id(group_id)
+            port_pair_group = self.api.compute.find_port_pair_group_by_name_or_id(
+                group_id)
             resp = {
                 "port_pair_group": port_pair_group.create_dict(self.api.compute)
             }
             resp = {
                 "port_pair_group": port_pair_group.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 ###############################################################################
 
 
 ###############################################################################
@@ -240,7 +269,8 @@ class FlowClassifierCreate(SFC):
         try:
             request_dict = json.loads(request.data).get("flow_classifier")
 
         try:
             request_dict = json.loads(request.data).get("flow_classifier")
 
-            flow_classifier = self.api.compute.create_flow_classifier(request_dict["name"])
+            flow_classifier = self.api.compute.create_flow_classifier(
+                request_dict["name"])
             if "description" in request_dict:
                 flow_classifier.description = request_dict["description"]
             if "ethertype" in request_dict:
             if "description" in request_dict:
                 flow_classifier.description = request_dict["description"]
             if "ethertype" in request_dict:
@@ -252,9 +282,11 @@ class FlowClassifierCreate(SFC):
             if "source_port_range_max" in request_dict:
                 flow_classifier.source_port_range_max = request_dict["source_port_range_max"]
             if "destination_port_range_min" in request_dict:
             if "source_port_range_max" in request_dict:
                 flow_classifier.source_port_range_max = request_dict["source_port_range_max"]
             if "destination_port_range_min" in request_dict:
-                flow_classifier.destination_port_range_min = request_dict["destination_port_range_min"]
+                flow_classifier.destination_port_range_min = request_dict[
+                    "destination_port_range_min"]
             if "destination_port_range_max" in request_dict:
             if "destination_port_range_max" in request_dict:
-                flow_classifier.destination_port_range_max = request_dict["destination_port_range_max"]
+                flow_classifier.destination_port_range_max = request_dict[
+                    "destination_port_range_max"]
             if "source_ip_prefix" in request_dict:
                 flow_classifier.source_ip_prefix = request_dict["source_ip_prefix"]
             if "destination_ip_prefix" in request_dict:
             if "source_ip_prefix" in request_dict:
                 flow_classifier.source_ip_prefix = request_dict["source_ip_prefix"]
             if "destination_ip_prefix" in request_dict:
@@ -269,10 +301,13 @@ class FlowClassifierCreate(SFC):
             resp = {
                 "flow_classifier": flow_classifier.create_dict(self.api.compute)
             }
             resp = {
                 "flow_classifier": flow_classifier.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=201, mimetype='application/json')
+            return Response(json.dumps(resp), status=201,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class FlowClassifierUpdate(SFC):
 
 
 class FlowClassifierUpdate(SFC):
@@ -281,7 +316,8 @@ class FlowClassifierUpdate(SFC):
 
         try:
             request_dict = json.loads(request.data).get("flow_classifier")
 
         try:
             request_dict = json.loads(request.data).get("flow_classifier")
-            flow_classifier = self.api.compute.find_flow_classifier_by_name_or_id(flow_classifier_id)
+            flow_classifier = self.api.compute.find_flow_classifier_by_name_or_id(
+                flow_classifier_id)
             if "name" in request_dict:
                 flow_classifier.name = request_dict["name"]
             if "description" in request_dict:
             if "name" in request_dict:
                 flow_classifier.name = request_dict["name"]
             if "description" in request_dict:
@@ -290,10 +326,13 @@ class FlowClassifierUpdate(SFC):
             resp = {
                 "flow_classifier": flow_classifier.create_dict(self.api.compute)
             }
             resp = {
                 "flow_classifier": flow_classifier.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class FlowClassifierDelete(SFC):
 
 
 class FlowClassifierDelete(SFC):
@@ -305,8 +344,10 @@ class FlowClassifierDelete(SFC):
             return Response("", status=204,
                             mimetype='application/json')
         except Exception as ex:
             return Response("", status=204,
                             mimetype='application/json')
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class FlowClassifierList(SFC):
 
 
 class FlowClassifierList(SFC):
@@ -315,13 +356,17 @@ class FlowClassifierList(SFC):
         try:
             flow_classifier_list = []
             for flow_classifier in self.api.compute.flow_classifiers.values():
         try:
             flow_classifier_list = []
             for flow_classifier in self.api.compute.flow_classifiers.values():
-                flow_classifier_list.append(flow_classifier.create_dict(self.api.compute))
+                flow_classifier_list.append(
+                    flow_classifier.create_dict(self.api.compute))
             resp = {"flow_classifiers": flow_classifier_list}
 
             resp = {"flow_classifiers": flow_classifier_list}
 
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class FlowClassifierShow(SFC):
 
 
 class FlowClassifierShow(SFC):
@@ -329,14 +374,18 @@ class FlowClassifierShow(SFC):
         logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
 
         try:
         logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
 
         try:
-            flow_classifier = self.api.compute.find_flow_classifier_by_name_or_id(flow_classifier_id)
+            flow_classifier = self.api.compute.find_flow_classifier_by_name_or_id(
+                flow_classifier_id)
             resp = {
                 "flow_classifier": flow_classifier.create_dict(self.api.compute)
             }
             resp = {
                 "flow_classifier": flow_classifier.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 ###############################################################################
 
 
 ###############################################################################
@@ -350,7 +399,8 @@ class PortChainCreate(SFC):
         try:
             request_dict = json.loads(request.data).get("port_chain")
 
         try:
             request_dict = json.loads(request.data).get("port_chain")
 
-            port_chain = self.api.compute.create_port_chain(request_dict["name"])
+            port_chain = self.api.compute.create_port_chain(
+                request_dict["name"])
             port_chain.port_pair_groups = request_dict["port_pair_groups"]
             if "description" in request_dict:
                 port_chain.description = request_dict["description"]
             port_chain.port_pair_groups = request_dict["port_pair_groups"]
             if "description" in request_dict:
                 port_chain.description = request_dict["description"]
@@ -364,10 +414,13 @@ class PortChainCreate(SFC):
             resp = {
                 "port_chain": port_chain.create_dict(self.api.compute)
             }
             resp = {
                 "port_chain": port_chain.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=201, mimetype='application/json')
+            return Response(json.dumps(resp), status=201,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortChainUpdate(SFC):
 
 
 class PortChainUpdate(SFC):
@@ -394,10 +447,13 @@ class PortChainUpdate(SFC):
             resp = {
                 "port_chain": port_chain.create_dict(self.api.compute)
             }
             resp = {
                 "port_chain": port_chain.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortChainDelete(SFC):
 
 
 class PortChainDelete(SFC):
@@ -409,8 +465,10 @@ class PortChainDelete(SFC):
             return Response("", status=204,
                             mimetype='application/json')
         except Exception as ex:
             return Response("", status=204,
                             mimetype='application/json')
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortChainList(SFC):
 
 
 class PortChainList(SFC):
@@ -419,13 +477,17 @@ class PortChainList(SFC):
         try:
             port_chain_list = []
             for port_chain in self.api.compute.port_chains.values():
         try:
             port_chain_list = []
             for port_chain in self.api.compute.port_chains.values():
-                port_chain_list.append(port_chain.create_dict(self.api.compute))
+                port_chain_list.append(
+                    port_chain.create_dict(self.api.compute))
             resp = {"port_chains": port_chain_list}
 
             resp = {"port_chains": port_chain_list}
 
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
 
 
 class PortChainShow(SFC):
 
 
 class PortChainShow(SFC):
@@ -433,11 +495,15 @@ class PortChainShow(SFC):
         logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
 
         try:
         logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
 
         try:
-            port_chain = self.api.compute.find_port_chain_by_name_or_id(chain_id)
+            port_chain = self.api.compute.find_port_chain_by_name_or_id(
+                chain_id)
             resp = {
                 "port_chain": port_chain.create_dict(self.api.compute)
             }
             resp = {
                 "port_chain": port_chain.create_dict(self.api.compute)
             }
-            return Response(json.dumps(resp), status=200, mimetype='application/json')
+            return Response(json.dumps(resp), status=200,
+                            mimetype='application/json')
         except Exception as ex:
         except Exception as ex:
-            logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
-            return Response(ex.message, status=500, mimetype='application/json')
+            logging.exception("Neutron SFC: %s Exception." %
+                              str(self.__class__.__name__))
+            return Response(ex.message, status=500,
+                            mimetype='application/json')
index 9dcfa48..e12fb05 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from flask_restful import Resource
 from flask import Response, request
 from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
 from flask_restful import Resource
 from flask import Response, request
 from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
@@ -202,12 +200,14 @@ class NovaListServersApi(Resource):
 
                 resp['servers'].append(s)
 
 
                 resp['servers'].append(s)
 
-            response = Response(json.dumps(resp), status=200, mimetype="application/json")
+            response = Response(json.dumps(resp), status=200,
+                                mimetype="application/json")
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
+            LOG.exception(
+                u"%s: Could not retrieve the list of servers." % __name__)
             return ex.message, 500
 
     def post(self, id):
             return ex.message, 500
 
     def post(self, id):
@@ -227,12 +227,12 @@ class NovaListServersApi(Resource):
 
             if self.api.compute.find_server_by_name_or_id(name) is not None:
                 LOG.error("Server with name %s already exists. 409" % name)
 
             if self.api.compute.find_server_by_name_or_id(name) is not None:
                 LOG.error("Server with name %s already exists. 409" % name)
-                return Response("Server with name %s already exists." % name, status=409)
+                return Response(
+                    "Server with name %s already exists." % name, status=409)
             # TODO: not finished!
             # TODO: not finished!
-            resp = dict()
-
             server = self.api.compute.create_server(name)
             server = self.api.compute.create_server(name)
-            server.full_name = str(self.api.compute.dc.label) + "_" + server_dict["name"]
+            server.full_name = str(
+                self.api.compute.dc.label) + "_" + server_dict["name"]
             server.template_name = server_dict["name"]
             if "metadata" in server_dict:
                 server.properties = server_dict["metadata"]
             server.template_name = server_dict["name"]
             if "metadata" in server_dict:
                 server.properties = server_dict["metadata"]
@@ -246,11 +246,13 @@ class NovaListServersApi(Resource):
 
             if networks is not None:
                 for net in networks:
 
             if networks is not None:
                 for net in networks:
-                    port = self.api.compute.find_port_by_name_or_id(net.get('port', ""))
+                    port = self.api.compute.find_port_by_name_or_id(
+                        net.get('port', ""))
                     if port is not None:
                         server.port_names.append(port.name)
                     else:
                     if port is not None:
                         server.port_names.append(port.name)
                     else:
-                        return Response("Currently only networking by port is supported.", status=400)
+                        return Response(
+                            "Currently only networking by port is supported.", status=400)
 
             self.api.compute._start_compute(server)
 
 
             self.api.compute._start_compute(server)
 
@@ -301,12 +303,14 @@ class NovaListServersAndPortsApi(Resource):
 
                 resp['servers'].append(s)
 
 
                 resp['servers'].append(s)
 
-            response = Response(json.dumps(resp), status=200, mimetype="application/json")
+            response = Response(json.dumps(resp), status=200,
+                                mimetype="application/json")
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
+            LOG.exception(
+                u"%s: Could not retrieve the list of servers." % __name__)
             return ex.message, 500
 
 
             return ex.message, 500
 
 
@@ -363,12 +367,14 @@ class NovaListServersDetailed(Resource):
 
                 resp['servers'].append(s)
 
 
                 resp['servers'].append(s)
 
-            response = Response(json.dumps(resp), status=200, mimetype="application/json")
+            response = Response(json.dumps(resp), status=200,
+                                mimetype="application/json")
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
+            LOG.exception(
+                u"%s: Could not retrieve the list of servers." % __name__)
             return ex.message, 500
 
 
             return ex.message, 500
 
 
@@ -399,12 +405,14 @@ class NovaListFlavors(Resource):
                                                                             flavor.id)}]
                 resp['flavors'].append(f)
 
                                                                             flavor.id)}]
                 resp['flavors'].append(f)
 
-            response = Response(json.dumps(resp), status=200, mimetype="application/json")
+            response = Response(json.dumps(resp), status=200,
+                                mimetype="application/json")
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
+            LOG.exception(
+                u"%s: Could not retrieve the list of servers." % __name__)
             return ex.message, 500
 
     def post(self, id):
             return ex.message, 500
 
     def post(self, id):
@@ -424,7 +432,8 @@ class NovaListFlavors(Resource):
                                                                        id,
                                                                        f.id)}]
         resp = {"flavor": data}
                                                                        id,
                                                                        f.id)}]
         resp = {"flavor": data}
-        return Response(json.dumps(resp), status=200, mimetype="application/json")
+        return Response(json.dumps(resp), status=200,
+                        mimetype="application/json")
 
 
 class NovaListFlavorsDetails(Resource):
 
 
 class NovaListFlavorsDetails(Resource):
@@ -463,12 +472,14 @@ class NovaListFlavorsDetails(Resource):
                 f['rxtx_factor'] = 1.0
                 resp['flavors'].append(f)
 
                 f['rxtx_factor'] = 1.0
                 resp['flavors'].append(f)
 
-            response = Response(json.dumps(resp), status=200, mimetype="application/json")
+            response = Response(json.dumps(resp), status=200,
+                                mimetype="application/json")
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
+            LOG.exception(
+                u"%s: Could not retrieve the list of servers." % __name__)
             return ex.message, 500
 
     def post(self, id):
             return ex.message, 500
 
     def post(self, id):
@@ -488,7 +499,8 @@ class NovaListFlavorsDetails(Resource):
                                                                        id,
                                                                        f.id)}]
         resp = {"flavor": data}
                                                                        id,
                                                                        f.id)}]
         resp = {"flavor": data}
-        return Response(json.dumps(resp), status=200, mimetype="application/json")
+        return Response(json.dumps(resp), status=200,
+                        mimetype="application/json")
 
 
 class NovaListFlavorById(Resource):
 
 
 class NovaListFlavorById(Resource):
@@ -522,12 +534,14 @@ class NovaListFlavorById(Resource):
                                                                                      self.api.port,
                                                                                      id,
                                                                                      flavor.id)}]
                                                                                      self.api.port,
                                                                                      id,
                                                                                      flavor.id)}]
-            response = Response(json.dumps(resp), status=200, mimetype="application/json")
+            response = Response(json.dumps(resp), status=200,
+                                mimetype="application/json")
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve flavor with id %s" % (__name__, flavorid))
+            LOG.exception(u"%s: Could not retrieve flavor with id %s" %
+                          (__name__, flavorid))
             return ex.message, 500
 
     def delete(self, id, flavorid):
             return ex.message, 500
 
     def delete(self, id, flavorid):
@@ -565,12 +579,14 @@ class NovaListImages(Resource):
                                                                            id,
                                                                            image.id)}]
                 resp['images'].append(f)
                                                                            id,
                                                                            image.id)}]
                 resp['images'].append(f)
-            response = Response(json.dumps(resp), status=200, mimetype="application/json")
+            response = Response(json.dumps(resp), status=200,
+                                mimetype="application/json")
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve the list of images." % __name__)
+            LOG.exception(
+                u"%s: Could not retrieve the list of images." % __name__)
             return ex.message, 500
 
 
             return ex.message, 500
 
 
@@ -609,12 +625,14 @@ class NovaListImagesDetails(Resource):
                 }
                 resp['images'].append(f)
 
                 }
                 resp['images'].append(f)
 
-            response = Response(json.dumps(resp), status=200, mimetype="application/json")
+            response = Response(json.dumps(resp), status=200,
+                                mimetype="application/json")
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve the list of images." % __name__)
+            LOG.exception(
+                u"%s: Could not retrieve the list of images." % __name__)
             return ex.message, 500
 
 
             return ex.message, 500
 
 
@@ -642,14 +660,17 @@ class NovaListImageById(Resource):
                     i['id'] = image.id
                     i['name'] = image.name
 
                     i['id'] = image.id
                     i['name'] = image.name
 
-                    return Response(json.dumps(resp), status=200, mimetype="application/json")
+                    return Response(json.dumps(resp), status=200,
+                                    mimetype="application/json")
 
 
-            response = Response("Image with id or name %s does not exists." % imageid, status=404)
+            response = Response(
+                "Image with id or name %s does not exists." % imageid, status=404)
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve image with id %s." % (__name__, imageid))
+            LOG.exception(u"%s: Could not retrieve image with id %s." %
+                          (__name__, imageid))
             return ex.message, 500
 
     def delete(self, id, imageid):
             return ex.message, 500
 
     def delete(self, id, imageid):
@@ -680,7 +701,8 @@ class NovaShowServerDetails(Resource):
         try:
             server = self.api.compute.find_server_by_name_or_id(serverid)
             if server is None:
         try:
             server = self.api.compute.find_server_by_name_or_id(serverid)
             if server is None:
-                return Response("Server with id or name %s does not exists." % serverid, status=404)
+                return Response(
+                    "Server with id or name %s does not exists." % serverid, status=404)
             s = server.create_server_dict()
             s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
                                                                         self.api.port,
             s = server.create_server_dict()
             s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
                                                                         self.api.port,
@@ -714,12 +736,14 @@ class NovaShowServerDetails(Resource):
                 ]
             }
 
                 ]
             }
 
-            response = Response(json.dumps({'server': s}), status=200, mimetype="application/json")
+            response = Response(json.dumps(
+                {'server': s}), status=200, mimetype="application/json")
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve the server details." % __name__)
+            LOG.exception(
+                u"%s: Could not retrieve the server details." % __name__)
             return ex.message, 500
 
     def delete(self, id, serverid):
             return ex.message, 500
 
     def delete(self, id, serverid):
@@ -737,7 +761,8 @@ class NovaShowServerDetails(Resource):
         try:
             server = self.api.compute.find_server_by_name_or_id(serverid)
             if server is None:
         try:
             server = self.api.compute.find_server_by_name_or_id(serverid)
             if server is None:
-                return Response('Could not find server.', status=404, mimetype="application/json")
+                return Response('Could not find server.',
+                                status=404, mimetype="application/json")
 
             self.api.compute.stop_compute(server)
 
 
             self.api.compute.stop_compute(server)
 
@@ -769,11 +794,13 @@ class NovaInterfaceToServer(Resource):
         try:
             server = self.api.compute.find_server_by_name_or_id(serverid)
             if server is None:
         try:
             server = self.api.compute.find_server_by_name_or_id(serverid)
             if server is None:
-                return Response("Server with id or name %s does not exists." % serverid, status=404)
+                return Response(
+                    "Server with id or name %s does not exists." % serverid, status=404)
 
             if server.emulator_compute is None:
                 LOG.error("The targeted container does not exist.")
 
             if server.emulator_compute is None:
                 LOG.error("The targeted container does not exist.")
-                return Response("The targeted container of %s does not exist." % serverid, status=404)
+                return Response(
+                    "The targeted container of %s does not exist." % serverid, status=404)
             data = json.loads(request.data).get("interfaceAttachment")
             resp = dict()
             port = data.get("port_id", None)
             data = json.loads(request.data).get("interfaceAttachment")
             resp = dict()
             port = data.get("port_id", None)
@@ -791,7 +818,8 @@ class NovaInterfaceToServer(Resource):
             elif net is not None:
                 network = self.api.compute.find_network_by_name_or_id(net)
                 if network is None:
             elif net is not None:
                 network = self.api.compute.find_network_by_name_or_id(net)
                 if network is None:
-                    return Response("Network with id or name %s does not exists." % net, status=404)
+                    return Response(
+                        "Network with id or name %s does not exists." % net, status=404)
                 port = self.api.compute.create_port("port:cp%s:fl:%s" %
                                                     (len(self.api.compute.ports), str(uuid.uuid4())))
 
                 port = self.api.compute.create_port("port:cp%s:fl:%s" %
                                                     (len(self.api.compute.ports), str(uuid.uuid4())))
 
@@ -804,10 +832,12 @@ class NovaInterfaceToServer(Resource):
                 port = self.api.compute.find_port_by_name_or_id(port)
                 network_dict['id'] = port.intf_name
                 network_dict['ip'] = port.ip_address
                 port = self.api.compute.find_port_by_name_or_id(port)
                 network_dict['id'] = port.intf_name
                 network_dict['ip'] = port.ip_address
-                network = self.api.compute.find_network_by_name_or_id(port.net_name)
+                network = self.api.compute.find_network_by_name_or_id(
+                    port.net_name)
                 network_dict[network_dict['id']] = network.name
             else:
                 network_dict[network_dict['id']] = network.name
             else:
-                raise Exception("You can only attach interfaces by port or network at the moment")
+                raise Exception(
+                    "You can only attach interfaces by port or network at the moment")
 
             if network == self.api.manage.floating_network:
                 dc.net.addLink(server.emulator_compute, self.api.manage.floating_switch,
 
             if network == self.api.manage.floating_network:
                 dc.net.addLink(server.emulator_compute, self.api.manage.floating_switch,
@@ -817,19 +847,22 @@ class NovaInterfaceToServer(Resource):
                                params1=network_dict, cls=Link, intfName1=port.intf_name)
             resp["port_state"] = "ACTIVE"
             resp["port_id"] = port.id
                                params1=network_dict, cls=Link, intfName1=port.intf_name)
             resp["port_state"] = "ACTIVE"
             resp["port_id"] = port.id
-            resp["net_id"] = self.api.compute.find_network_by_name_or_id(port.net_name).id
+            resp["net_id"] = self.api.compute.find_network_by_name_or_id(
+                port.net_name).id
             resp["mac_addr"] = port.mac_address
             resp["fixed_ips"] = list()
             fixed_ips = dict()
             fixed_ips["ip_address"] = port.ip_address
             fixed_ips["subnet_id"] = network.subnet_name
             resp["fixed_ips"].append(fixed_ips)
             resp["mac_addr"] = port.mac_address
             resp["fixed_ips"] = list()
             fixed_ips = dict()
             fixed_ips["ip_address"] = port.ip_address
             fixed_ips["subnet_id"] = network.subnet_name
             resp["fixed_ips"].append(fixed_ips)
-            response = Response(json.dumps({"interfaceAttachment": resp}), status=202, mimetype="application/json")
+            response = Response(json.dumps(
+                {"interfaceAttachment": resp}), status=202, mimetype="application/json")
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not add interface to the server." % __name__)
+            LOG.exception(
+                u"%s: Could not add interface to the server." % __name__)
             return ex.message, 500
 
 
             return ex.message, 500
 
 
@@ -855,14 +888,16 @@ class NovaShowAndDeleteInterfaceAtServer(Resource):
         try:
             server = self.api.compute.find_server_by_name_or_id(serverid)
             if server is None:
         try:
             server = self.api.compute.find_server_by_name_or_id(serverid)
             if server is None:
-                return Response("Server with id or name %s does not exists." % serverid, status=404)
+                return Response(
+                    "Server with id or name %s does not exists." % serverid, status=404)
             port = self.api.compute.find_port_by_name_or_id(port_id)
             if port is None:
             port = self.api.compute.find_port_by_name_or_id(port_id)
             if port is None:
-                return Response("Port with id or name %s does not exists." % port_id, status=404)
+                return Response(
+                    "Port with id or name %s does not exists." % port_id, status=404)
 
             for link in self.api.compute.dc.net.links:
                 if str(link.intf1) == port.intf_name and \
 
             for link in self.api.compute.dc.net.links:
                 if str(link.intf1) == port.intf_name and \
-                                str(link.intf1.ip) == port.ip_address.split('/')[0]:
+                        str(link.intf1.ip) == port.ip_address.split('/')[0]:
                     self.api.compute.dc.net.removeLink(link)
                     break
 
                     self.api.compute.dc.net.removeLink(link)
                     break
 
@@ -871,7 +906,8 @@ class NovaShowAndDeleteInterfaceAtServer(Resource):
             return response
 
         except Exception as ex:
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not detach interface from the server." % __name__)
+            LOG.exception(
+                u"%s: Could not detach interface from the server." % __name__)
             return ex.message, 500
 
 
             return ex.message, 500
 
 
@@ -919,10 +955,12 @@ class NovaLimits(Resource):
                     "rate": []
                 }
             }
                     "rate": []
                 }
             }
-            response = Response(json.dumps(resp), status=200, mimetype="application/json")
+            response = Response(json.dumps(resp), status=200,
+                                mimetype="application/json")
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
             response.headers['Access-Control-Allow-Origin'] = '*'
             return response
 
         except Exception as ex:
-            LOG.exception(u"%s: Could not retrieve the list of images." % __name__)
+            LOG.exception(
+                u"%s: Could not retrieve the list of images." % __name__)
             return ex.message, 500
             return ex.message, 500
index e252c1e..d888119 100755 (executable)
@@ -1,41 +1,25 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from instance_flavor import InstanceFlavor
-from model import Model
-from net import Net
-from port import Port
-from port_pair import PortPair
-from port_pair_group import PortPairGroup
-from flow_classifier import FlowClassifier
-from port_chain import PortChain
-from resource import Resource
-from router import Router
-from server import Server
-from stack import Stack
-from template import Template
-from image import Image
\ No newline at end of file
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 5b50c92..284fee8 100644 (file)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import uuid
 
 
 import uuid
 
 
index 715d0f9..3d9c66b 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import uuid
 from datetime import datetime
 
 import uuid
 from datetime import datetime
 
index 90f7fe5..1e7b6f6 100755 (executable)
@@ -1,35 +1,34 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import uuid
 
 
 class InstanceFlavor:
 import uuid
 
 
 class InstanceFlavor:
-    def __init__(self, name, cpu=None, memory=None, memory_unit=None, storage=None, storage_unit=None):
+    def __init__(self, name, cpu=None, memory=None,
+                 memory_unit=None, storage=None, storage_unit=None):
         self.id = str(uuid.uuid4())
         self.name = name
         self.cpu = cpu
         self.id = str(uuid.uuid4())
         self.name = name
         self.cpu = cpu
index 39be655..0938203 100755 (executable)
@@ -1,32 +1,33 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 
 
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
 
 
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
 class LoadBalancer(object):
 class LoadBalancer(object):
-    def __init__(self, name, id=None, flavor=None, image=None, command=None, nw_list=None):
+    def __init__(self, name, id=None, flavor=None,
+                 image=None, command=None, nw_list=None):
         self.name = name
         self.id = id  # not set
         self.out_ports = dict()
         self.name = name
         self.id = id  # not set
         self.out_ports = dict()
index ea5c054..ae7ceff 100755 (executable)
@@ -1,30 +1,30 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 
 
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
 
 
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
 class Model:
     def __init__(self, resources=None):
         if not resources:
 class Model:
     def __init__(self, resources=None):
         if not resources:
index db38348..112cca6 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import re
 
 
 import re
 
 
@@ -63,9 +61,11 @@ class Net:
         if self.start_end_dict is None:
             return None
 
         if self.start_end_dict is None:
             return None
 
-        int_start_ip = Net.ip_2_int(self.start_end_dict['start']) + 2  # First address as network address not usable
+        # First address as network address not usable
+        int_start_ip = Net.ip_2_int(self.start_end_dict['start']) + 2
         # Second one is for gateways only
         # Second one is for gateways only
-        int_end_ip = Net.ip_2_int(self.start_end_dict['end']) - 1  # Last address for broadcasts
+        # Last address for broadcasts
+        int_end_ip = Net.ip_2_int(self.start_end_dict['end']) - 1
         while int_start_ip in self._issued_ip_addresses and int_start_ip <= int_end_ip:
             int_start_ip += 1
 
         while int_start_ip in self._issued_ip_addresses and int_start_ip <= int_end_ip:
             int_start_ip += 1
 
@@ -90,8 +90,10 @@ class Net:
         if int_ip in self._issued_ip_addresses:
             return False
 
         if int_ip in self._issued_ip_addresses:
             return False
 
-        int_start_ip = Net.ip_2_int(self.start_end_dict['start']) + 1  # First address as network address not usable
-        int_end_ip = Net.ip_2_int(self.start_end_dict['end']) - 1  # Last address for broadcasts
+        # First address as network address not usable
+        int_start_ip = Net.ip_2_int(self.start_end_dict['start']) + 1
+        # Last address for broadcasts
+        int_end_ip = Net.ip_2_int(self.start_end_dict['end']) - 1
         if int_ip < int_start_ip or int_ip > int_end_ip:
             return False
 
         if int_ip < int_start_ip or int_ip > int_end_ip:
             return False
 
@@ -110,7 +112,7 @@ class Net:
         """
         int_ip = Net.cidr_2_int(cidr)
 
         """
         int_ip = Net.cidr_2_int(cidr)
 
-        if not int_ip in self._issued_ip_addresses:
+        if int_ip not in self._issued_ip_addresses:
             return False
 
         if self._issued_ip_addresses[int_ip] == port_name:
             return False
 
         if self._issued_ip_addresses[int_ip] == port_name:
@@ -285,14 +287,16 @@ class Net:
         :rtype: ``dict``
         """
         network_dict = dict()
         :rtype: ``dict``
         """
         network_dict = dict()
-        network_dict["status"] = "ACTIVE"  # TODO do we support inactive networks?
-        if self.subnet_id == None:
+        # TODO do we support inactive networks?
+        network_dict["status"] = "ACTIVE"
+        if self.subnet_id is None:
             network_dict["subnets"] = []
         else:
             network_dict["subnets"] = [self.subnet_id]
         network_dict["name"] = self.name
         network_dict["admin_state_up"] = True  # TODO is it always true?
             network_dict["subnets"] = []
         else:
             network_dict["subnets"] = [self.subnet_id]
         network_dict["name"] = self.name
         network_dict["admin_state_up"] = True  # TODO is it always true?
-        network_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456"  # TODO what should go in here
+        # TODO what should go in here
+        network_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456"
         network_dict["id"] = self.id
         network_dict["shared"] = False  # TODO is it always false?
         return network_dict
         network_dict["id"] = self.id
         network_dict["shared"] = False  # TODO is it always false?
         return network_dict
@@ -307,7 +311,8 @@ class Net:
         subnet_dict = dict()
         subnet_dict["name"] = self.subnet_name
         subnet_dict["network_id"] = self.id
         subnet_dict = dict()
         subnet_dict["name"] = self.subnet_name
         subnet_dict["network_id"] = self.id
-        subnet_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456"  # TODO what should go in here?
+        # TODO what should go in here?
+        subnet_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456"
         subnet_dict["created_at"] = self.subnet_creation_time
         subnet_dict["dns_nameservers"] = []
         subnet_dict["allocation_pools"] = [self.start_end_dict]
         subnet_dict["created_at"] = self.subnet_creation_time
         subnet_dict["dns_nameservers"] = []
         subnet_dict["allocation_pools"] = [self.start_end_dict]
@@ -322,10 +327,10 @@ class Net:
 
     def __eq__(self, other):
         if self.name == other.name and self.subnet_name == other.subnet_name and \
 
     def __eq__(self, other):
         if self.name == other.name and self.subnet_name == other.subnet_name and \
-                        self.gateway_ip == other.gateway_ip and \
-                        self.segmentation_id == other.segmentation_id and \
-                        self._cidr == other._cidr and \
-                        self.start_end_dict == other.start_end_dict:
+                self.gateway_ip == other.gateway_ip and \
+                self.segmentation_id == other.segmentation_id and \
+                self._cidr == other._cidr and \
+                self.start_end_dict == other.start_end_dict:
             return True
         return False
 
             return True
         return False
 
index 2e4d693..1d82e92 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import logging
 import threading
 import uuid
 import logging
 import threading
 import uuid
@@ -34,7 +32,8 @@ intf_names = dict()
 
 
 class Port:
 
 
 class Port:
-    def __init__(self, name, ip_address=None, mac_address=None, floating_ip=None):
+    def __init__(self, name, ip_address=None,
+                 mac_address=None, floating_ip=None):
         self.name = name
         self.intf_name = None
         self.id = str(uuid.uuid4())
         self.name = name
         self.intf_name = None
         self.id = str(uuid.uuid4())
@@ -80,13 +79,13 @@ class Port:
         if len(split_name) >= 3:
             if split_name[2] == 'input' or split_name[2] == 'in':
                 self.intf_name = split_name[0][:4] + '-' + \
         if len(split_name) >= 3:
             if split_name[2] == 'input' or split_name[2] == 'in':
                 self.intf_name = split_name[0][:4] + '-' + \
-                                 'in'
+                    'in'
             elif split_name[2] == 'output' or split_name[2] == 'out':
                 self.intf_name = split_name[0][:4] + '-' + \
             elif split_name[2] == 'output' or split_name[2] == 'out':
                 self.intf_name = split_name[0][:4] + '-' + \
-                                 'out'
+                    'out'
             else:
                 self.intf_name = split_name[0][:4] + '-' + \
             else:
                 self.intf_name = split_name[0][:4] + '-' + \
-                                 split_name[2][:4]
+                    split_name[2][:4]
         else:
             self.intf_name = self.name[:9]
 
         else:
             self.intf_name = self.name[:9]
 
@@ -96,12 +95,14 @@ class Port:
         global intf_names
         intf_len = len(self.intf_name)
         self.intf_name = self.intf_name + '-' + str(counter)[:4]
         global intf_names
         intf_len = len(self.intf_name)
         self.intf_name = self.intf_name + '-' + str(counter)[:4]
-        while self.intf_name in intf_names and counter < 999 and not intf_names[self.intf_name][0] == self.id:
+        while self.intf_name in intf_names and counter < 999 and not intf_names[
+                self.intf_name][0] == self.id:
             counter += 1
             self.intf_name = self.intf_name[:intf_len] + '-' + str(counter)[:4]
 
         if counter >= 1000:
             counter += 1
             self.intf_name = self.intf_name[:intf_len] + '-' + str(counter)[:4]
 
         if counter >= 1000:
-            logging.ERROR("Port %s could not create unique interface name (%s)", self.name, self.intf_name)
+            logging.ERROR(
+                "Port %s could not create unique interface name (%s)", self.name, self.intf_name)
             lock.release()
             return
 
             lock.release()
             return
 
@@ -132,7 +133,8 @@ class Port:
         """
         port_dict = dict()
         port_dict["admin_state_up"] = True  # TODO is it always true?
         """
         port_dict = dict()
         port_dict["admin_state_up"] = True  # TODO is it always true?
-        port_dict["device_id"] = "257614cc-e178-4c92-9c61-3b28d40eca44"  # TODO find real values
+        # TODO find real values
+        port_dict["device_id"] = "257614cc-e178-4c92-9c61-3b28d40eca44"
         port_dict["device_owner"] = ""  # TODO do we have such things?
         net = compute.find_network_by_name_or_id(self.net_name)
         port_dict["fixed_ips"] = [
         port_dict["device_owner"] = ""  # TODO do we have such things?
         net = compute.find_network_by_name_or_id(self.net_name)
         port_dict["fixed_ips"] = [
@@ -146,7 +148,8 @@ class Port:
         port_dict["name"] = self.name
         port_dict["network_id"] = net.id if net is not None else ""
         port_dict["status"] = "ACTIVE"  # TODO do we support inactive port?
         port_dict["name"] = self.name
         port_dict["network_id"] = net.id if net is not None else ""
         port_dict["status"] = "ACTIVE"  # TODO do we support inactive port?
-        port_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456"  # TODO find real tenant_id
+        # TODO find real tenant_id
+        port_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456"
         return port_dict
 
     def compare_attributes(self, other):
         return port_dict
 
     def compare_attributes(self, other):
@@ -163,7 +166,7 @@ class Port:
             return False
 
         if self.name == other.name and self.floating_ip == other.floating_ip and \
             return False
 
         if self.name == other.name and self.floating_ip == other.floating_ip and \
-                                       self.net_name == other.net_name:
+                self.net_name == other.net_name:
             return True
         return False
 
             return True
         return False
 
@@ -172,9 +175,9 @@ class Port:
             return False
 
         if self.name == other.name and self.ip_address == other.ip_address and \
             return False
 
         if self.name == other.name and self.ip_address == other.ip_address and \
-                        self.mac_address == other.mac_address and \
-                        self.floating_ip == other.floating_ip and \
-                        self.net_name == other.net_name:
+                self.mac_address == other.mac_address and \
+                self.floating_ip == other.floating_ip and \
+                self.net_name == other.net_name:
             return True
         return False
 
             return True
         return False
 
index d82c9b1..dfdd2d3 100644 (file)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import random
 import uuid
 import logging
 import random
 import uuid
 import logging
@@ -40,7 +38,8 @@ class PortChain(object):
         self.flow_classifiers = list()
         self.chain_parameters = dict()
 
         self.flow_classifiers = list()
         self.chain_parameters = dict()
 
-        # Cookie for internal identification of installed flows (e.g. to delete them)
+        # Cookie for internal identification of installed flows (e.g. to delete
+        # them)
         self.cookie = random.randint(1, 0xffffffff)
 
     def create_dict(self, compute):
         self.cookie = random.randint(1, 0xffffffff)
 
     def create_dict(self, compute):
@@ -56,13 +55,16 @@ class PortChain(object):
 
     def install(self, compute):
         for flow_classifier_id in self.flow_classifiers:
 
     def install(self, compute):
         for flow_classifier_id in self.flow_classifiers:
-            flow_classifier = compute.find_flow_classifier_by_name_or_id(flow_classifier_id)
+            flow_classifier = compute.find_flow_classifier_by_name_or_id(
+                flow_classifier_id)
             if flow_classifier:
                 pass
             if flow_classifier:
                 pass
-                # TODO: for every flow classifier create match and pass it to setChain
+                # TODO: for every flow classifier create match and pass it to
+                # setChain
 
         for group_id in self.port_pair_groups:
 
         for group_id in self.port_pair_groups:
-            port_pair_group = compute.find_port_pair_group_by_name_or_id(group_id)
+            port_pair_group = compute.find_port_pair_group_by_name_or_id(
+                group_id)
             for port_pair_id in port_pair_group.port_pairs:
                 port_pair = compute.find_port_pair_by_name_or_id(port_pair_id)
 
             for port_pair_id in port_pair_group.port_pairs:
                 port_pair = compute.find_port_pair_by_name_or_id(port_pair_id)
 
@@ -76,10 +78,12 @@ class PortChain(object):
 
                 # TODO: Not sure, if this should throw an error
                 if not server_ingress:
 
                 # TODO: Not sure, if this should throw an error
                 if not server_ingress:
-                    logging.warn("Neutron SFC: ingress port %s not connected." % str(port_pair.ingress.name))
+                    logging.warn("Neutron SFC: ingress port %s not connected." % str(
+                        port_pair.ingress.name))
                     continue
                 if not server_egress:
                     continue
                 if not server_egress:
-                    logging.warn("Neutron SFC: egress port %s not connected." % str(port_pair.egress.name))
+                    logging.warn("Neutron SFC: egress port %s not connected." % str(
+                        port_pair.egress.name))
                     continue
 
                 compute.dc.net.setChain(
                     continue
 
                 compute.dc.net.setChain(
index 52a4ba0..a2e31bc 100644 (file)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import uuid
 
 
 import uuid
 
 
index 75a5d0e..fd97917 100644 (file)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import uuid
 
 
 import uuid
 
 
index efa33b1..9a33ebf 100755 (executable)
@@ -1,30 +1,30 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 
 
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
 
 
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
 class Resource:
     def __init__(self, name, type=None, properties=None):
         self.name = name
 class Resource:
     def __init__(self, name, type=None, properties=None):
         self.name = name
index 27abf53..3d30e1b 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import uuid
 
 
 import uuid
 
 
@@ -39,6 +37,6 @@ class Router:
 
     def __eq__(self, other):
         if self.name == other.name and len(self.subnet_names) == len(other.subnet_names) and \
 
     def __eq__(self, other):
         if self.name == other.name and len(self.subnet_names) == len(other.subnet_names) and \
-                        set(self.subnet_names) == set(other.subnet_names):
+                set(self.subnet_names) == set(other.subnet_names):
             return True
         return False
             return True
         return False
index 2224de9..48f1898 100755 (executable)
@@ -1,32 +1,33 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 
 
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
 
 
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
 class Server(object):
 class Server(object):
-    def __init__(self, name, id=None, flavor=None, image=None, command=None, nw_list=None):
+    def __init__(self, name, id=None, flavor=None,
+                 image=None, command=None, nw_list=None):
         self.name = name
         self.full_name = None
         self.template_name = None
         self.name = name
         self.full_name = None
         self.template_name = None
@@ -50,19 +51,19 @@ class Server(object):
         :rtype: ``bool``
         """
         if self.name == other.name and self.full_name == other.full_name and \
         :rtype: ``bool``
         """
         if self.name == other.name and self.full_name == other.full_name and \
-                        self.flavor == other.flavor and \
-                        self.image == other.image and \
-                        self.command == other.command:
+                self.flavor == other.flavor and \
+                self.image == other.image and \
+                self.command == other.command:
             return True
         return False
 
     def __eq__(self, other):
         if self.name == other.name and self.full_name == other.full_name and \
             return True
         return False
 
     def __eq__(self, other):
         if self.name == other.name and self.full_name == other.full_name and \
-                        self.flavor == other.flavor and \
-                        self.image == other.image and \
-                        self.command == other.command and \
-                        len(self.port_names) == len(other.port_names) and \
-                        set(self.port_names) == set(other.port_names):
+                self.flavor == other.flavor and \
+                self.image == other.image and \
+                self.command == other.command and \
+                len(self.port_names) == len(other.port_names) and \
+                set(self.port_names) == set(other.port_names):
             return True
         return False
 
             return True
         return False
 
index 453e20c..cabaffa 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import uuid
 
 
 import uuid
 
 
index 5b1ebe9..bbb7713 100755 (executable)
@@ -1,30 +1,30 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 
 
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
 
 
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
 class Template:
     def __init__(self, resources=None):
         self.version = '2015-04-30'
 class Template:
     def __init__(self, resources=None):
         self.version = '2015-04-30'
index 395c0ce..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 78d96da..5120b9e 100755 (executable)
@@ -1,36 +1,33 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import logging
 from flask_restful import Resource
 from flask import request
 import json
 import threading
 import logging
 from flask_restful import Resource
 from flask import request
 import json
 import threading
-from copy import deepcopy
 
 logging.basicConfig()
 
 
 logging.basicConfig()
 
@@ -61,7 +58,7 @@ class Compute(Resource):
         data = request.json
         if data is None:
             data = {}
         data = request.json
         if data is None:
             data = {}
-        elif type(data) is not dict:
+        elif not isinstance(data, dict):
             data = json.loads(request.json)
 
         network = data.get("network")
             data = json.loads(request.json)
 
         network = data.get("network")
@@ -84,10 +81,12 @@ class Compute(Resource):
                 env = config.get("Env", list())
                 for env_var in env:
                     var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
                 env = config.get("Env", list())
                 for env_var in env:
                     var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
-                    logging.debug("%r = %r" % (var , cmd))
-                    if var=="SON_EMU_CMD" or var=="VIM_EMU_CMD":
-                        logging.info("Executing entry point script in %r: %r" % (c.name, cmd))
-                        # execute command in new thread to ensure that API is not blocked by VNF
+                    logging.debug("%r = %r" % (var, cmd))
+                    if var == "SON_EMU_CMD" or var == "VIM_EMU_CMD":
+                        logging.info(
+                            "Executing entry point script in %r: %r" % (c.name, cmd))
+                        # execute command in new thread to ensure that API is
+                        # not blocked by VNF
                         t = threading.Thread(target=c.cmdPrint, args=(cmd,))
                         t.daemon = True
                         t.start()
                         t = threading.Thread(target=c.cmdPrint, args=(cmd,))
                         t.daemon = True
                         t.start()
@@ -105,7 +104,8 @@ class Compute(Resource):
         logging.debug("API CALL: compute status")
 
         try:
         logging.debug("API CALL: compute status")
 
         try:
-            return dcs.get(dc_label).containers.get(compute_name).getStatus(), 200, CORS_HEADER
+            return dcs.get(dc_label).containers.get(
+                compute_name).getStatus(), 200, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
@@ -113,7 +113,8 @@ class Compute(Resource):
     def delete(self, dc_label, compute_name):
         logging.debug("API CALL: compute stop")
         try:
     def delete(self, dc_label, compute_name):
         logging.debug("API CALL: compute stop")
         try:
-            return dcs.get(dc_label).stopCompute(compute_name), 200, CORS_HEADER
+            return dcs.get(dc_label).stopCompute(
+                compute_name), 200, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
@@ -152,20 +153,25 @@ class ComputeList(Resource):
                     all_containers += dc.listCompute()
                     all_extSAPs += dc.listExtSAPs()
 
                     all_containers += dc.listCompute()
                     all_extSAPs += dc.listExtSAPs()
 
-                extSAP_list = [(sap.name, sap.getStatus()) for sap in all_extSAPs]
-                container_list = [(c.name, c.getStatus()) for c in all_containers]
+                extSAP_list = [(sap.name, sap.getStatus())
+                               for sap in all_extSAPs]
+                container_list = [(c.name, c.getStatus())
+                                  for c in all_containers]
                 total_list = container_list + extSAP_list
                 return total_list, 200, CORS_HEADER
             else:
                 # return list of compute nodes for specified DC
                 total_list = container_list + extSAP_list
                 return total_list, 200, CORS_HEADER
             else:
                 # return list of compute nodes for specified DC
-                container_list = [(c.name, c.getStatus()) for c in dcs.get(dc_label).listCompute()]
-                extSAP_list = [(sap.name, sap.getStatus()) for sap in dcs.get(dc_label).listExtSAPs()]
+                container_list = [(c.name, c.getStatus())
+                                  for c in dcs.get(dc_label).listCompute()]
+                extSAP_list = [(sap.name, sap.getStatus())
+                               for sap in dcs.get(dc_label).listExtSAPs()]
                 total_list = container_list + extSAP_list
                 return total_list, 200, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
 
                 total_list = container_list + extSAP_list
                 return total_list, 200, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
 
+
 class ComputeResources(Resource):
     """
     Update the container's resources using the docker.update function
 class ComputeResources(Resource):
     """
     Update the container's resources using the docker.update function
@@ -206,8 +212,9 @@ class ComputeResources(Resource):
         # then no data
         if params is None:
             params = {}
         # then no data
         if params is None:
             params = {}
-        logging.debug("REST CALL: update container resources {0}".format(params))
-        #check if container exists
+        logging.debug(
+            "REST CALL: update container resources {0}".format(params))
+        # check if container exists
         d = dcs.get(dc_label).net.getNodeByName(compute_name)
 
         # general request of cpu percentage
         d = dcs.get(dc_label).net.getNodeByName(compute_name)
 
         # general request of cpu percentage
@@ -217,24 +224,26 @@ class ComputeResources(Resource):
             cpu_period = int(dcs.get(dc_label).net.cpu_period)
             value = params.get('cpu_bw')
             cpu_quota = int(cpu_period * float(value))
             cpu_period = int(dcs.get(dc_label).net.cpu_period)
             value = params.get('cpu_bw')
             cpu_quota = int(cpu_period * float(value))
-            #put default values back
+            # put default values back
             if float(value) <= 0:
                 cpu_period = 100000
                 cpu_quota = -1
             params['cpu_period'] = cpu_period
             params['cpu_quota'] = cpu_quota
             if float(value) <= 0:
                 cpu_period = 100000
                 cpu_quota = -1
             params['cpu_period'] = cpu_period
             params['cpu_quota'] = cpu_quota
-            #d.updateCpuLimit(cpu_period=cpu_period, cpu_quota=cpu_quota)
+            # d.updateCpuLimit(cpu_period=cpu_period, cpu_quota=cpu_quota)
 
         # only pass allowed keys to docker
         allowed_keys = ['blkio_weight', 'cpu_period', 'cpu_quota', 'cpu_shares', 'cpuset_cpus',
                         'cpuset_mems', 'mem_limit', 'mem_reservation', 'memswap_limit',
                         'kernel_memory', 'restart_policy']
 
         # only pass allowed keys to docker
         allowed_keys = ['blkio_weight', 'cpu_period', 'cpu_quota', 'cpu_shares', 'cpuset_cpus',
                         'cpuset_mems', 'mem_limit', 'mem_reservation', 'memswap_limit',
                         'kernel_memory', 'restart_policy']
-        filtered_params = {key:params[key] for key in allowed_keys if key in params}
+        filtered_params = {key: params[key]
+                           for key in allowed_keys if key in params}
 
         d.update_resources(**filtered_params)
 
         return d
 
 
         d.update_resources(**filtered_params)
 
         return d
 
+
 class DatacenterList(Resource):
     global dcs
 
 class DatacenterList(Resource):
     global dcs
 
index bf0f26b..094f122 100755 (executable)
@@ -1,41 +1,35 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Distributed Cloud Emulator (dcemulator)
-Networking and monitoring functions
-(c) 2015 by Steven Van Rossem <steven.vanrossem@intec.ugent.be>
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+#
+# Distributed Cloud Emulator (dcemulator)
+# Networking and monitoring functions
+# (c) 2015 by Steven Van Rossem <steven.vanrossem@intec.ugent.be>
 import logging
 import logging
-from flask_restful import Resource, reqparse
+from flask_restful import Resource
 from flask import request
 from flask import request
-import json
 
 logging.basicConfig()
 
 
 logging.basicConfig()
 
@@ -44,7 +38,6 @@ CORS_HEADER = {'Access-Control-Allow-Origin': '*'}
 net = None
 
 
 net = None
 
 
-
 class MonitorInterfaceAction(Resource):
     """
     Monitor the counters of a VNF interface
 class MonitorInterfaceAction(Resource):
     """
     Monitor the counters of a VNF interface
@@ -68,11 +61,13 @@ class MonitorInterfaceAction(Resource):
 
         try:
             if cookie:
 
         try:
             if cookie:
-                c = net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)
+                c = net.monitor_agent.setup_flow(
+                    vnf_name, vnf_interface, metric, cookie)
             else:
             else:
-                c = net.monitor_agent.setup_metric(vnf_name, vnf_interface, metric)
+                c = net.monitor_agent.setup_metric(
+                    vnf_name, vnf_interface, metric)
             # return monitor message response
             # return monitor message response
-            return  str(c), 200, CORS_HEADER
+            return str(c), 200, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
@@ -90,9 +85,11 @@ class MonitorInterfaceAction(Resource):
 
         try:
             if cookie:
 
         try:
             if cookie:
-                c = net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
+                c = net.monitor_agent.stop_flow(
+                    vnf_name, vnf_interface, metric, cookie)
             else:
             else:
-                c = net.monitor_agent.stop_metric(vnf_name, vnf_interface, metric)
+                c = net.monitor_agent.stop_metric(
+                    vnf_name, vnf_interface, metric)
             # return monitor message response
             return str(c), 200, CORS_HEADER
         except Exception as ex:
             # return monitor message response
             return str(c), 200, CORS_HEADER
         except Exception as ex:
@@ -123,7 +120,8 @@ class MonitorFlowAction(Resource):
         cookie = data.get("cookie", 0)
 
         try:
         cookie = data.get("cookie", 0)
 
         try:
-            c = net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)
+            c = net.monitor_agent.setup_flow(
+                vnf_name, vnf_interface, metric, cookie)
             # return monitor message response
             return str(c), 200, CORS_HEADER
         except Exception as ex:
             # return monitor message response
             return str(c), 200, CORS_HEADER
         except Exception as ex:
@@ -142,13 +140,15 @@ class MonitorFlowAction(Resource):
         cookie = data.get("cookie", 0)
 
         try:
         cookie = data.get("cookie", 0)
 
         try:
-            c = net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
+            c = net.monitor_agent.stop_flow(
+                vnf_name, vnf_interface, metric, cookie)
             # return monitor message response
             return str(c), 200, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
 
             # return monitor message response
             return str(c), 200, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
 
+
 class MonitorLinkAction(Resource):
     """
     Add or remove flow monitoring on chains between VNFs.
 class MonitorLinkAction(Resource):
     """
     Add or remove flow monitoring on chains between VNFs.
@@ -171,7 +171,8 @@ class MonitorLinkAction(Resource):
     :return: message string indicating if the chain action is succesful or not
     """
 
     :return: message string indicating if the chain action is succesful or not
     """
 
-    # the global net is set from the topology file, and connected via connectDCNetwork function in rest_api_endpoint.py
+    # the global net is set from the topology file, and connected via
+    # connectDCNetwork function in rest_api_endpoint.py
     global net
 
     def put(self):
     global net
 
     def put(self):
@@ -222,7 +223,7 @@ class MonitorLinkAction(Resource):
             monitor = data.get("monitor")
             monitor_placement = data.get("monitor_placement")
 
             monitor = data.get("monitor")
             monitor_placement = data.get("monitor_placement")
 
-            #first install monitor flow
+            # first install monitor flow
             c1 = net.setChain(
                 vnf_src_name, vnf_dst_name,
                 vnf_src_interface=vnf_src_interface,
             c1 = net.setChain(
                 vnf_src_name, vnf_dst_name,
                 vnf_src_interface=vnf_src_interface,
@@ -237,7 +238,7 @@ class MonitorLinkAction(Resource):
                 monitor=monitor,
                 monitor_placement=monitor_placement)
 
                 monitor=monitor,
                 monitor_placement=monitor_placement)
 
-            #then export monitor flow
+            # then export monitor flow
             metric = data.get("metric")
             if 'rx' in monitor_placement:
                 vnf_name = vnf_dst_name
             metric = data.get("metric")
             if 'rx' in monitor_placement:
                 vnf_name = vnf_dst_name
@@ -248,9 +249,11 @@ class MonitorLinkAction(Resource):
 
             c2 = 'command unknown'
             if command == 'add-flow':
 
             c2 = 'command unknown'
             if command == 'add-flow':
-                c2 = net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)
+                c2 = net.monitor_agent.setup_flow(
+                    vnf_name, vnf_interface, metric, cookie)
             elif command == 'del-flows':
             elif command == 'del-flows':
-                c2 = net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
+                c2 = net.monitor_agent.stop_flow(
+                    vnf_name, vnf_interface, metric, cookie)
 
             # return setChain response
             return (str(c1) + " " + str(c2)), 200, CORS_HEADER
 
             # return setChain response
             return (str(c1) + " " + str(c2)), 200, CORS_HEADER
@@ -258,6 +261,7 @@ class MonitorLinkAction(Resource):
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
 
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
 
+
 class MonitorSkewAction(Resource):
     """
     Monitor the counters of a VNF interface
 class MonitorSkewAction(Resource):
     """
     Monitor the counters of a VNF interface
@@ -277,10 +281,11 @@ class MonitorSkewAction(Resource):
         resource_name = data.get("resource_name", 'cpu')
         try:
             # configure skewmon
         resource_name = data.get("resource_name", 'cpu')
         try:
             # configure skewmon
-            c = net.monitor_agent.update_skewmon(vnf_name, resource_name, action='start')
+            c = net.monitor_agent.update_skewmon(
+                vnf_name, resource_name, action='start')
 
             # return monitor message response
 
             # return monitor message response
-            return  str(c), 200, CORS_HEADER
+            return str(c), 200, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
@@ -295,7 +300,8 @@ class MonitorSkewAction(Resource):
         resource_name = data.get("resource_name", 'cpu')
         try:
             # configure skewmon
         resource_name = data.get("resource_name", 'cpu')
         try:
             # configure skewmon
-            c = net.monitor_agent.update_skewmon(vnf_name, resource_name, action='stop')
+            c = net.monitor_agent.update_skewmon(
+                vnf_name, resource_name, action='stop')
 
             # return monitor message response
             return str(c), 200, CORS_HEADER
 
             # return monitor message response
             return str(c), 200, CORS_HEADER
@@ -303,6 +309,7 @@ class MonitorSkewAction(Resource):
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
 
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
 
+
 class MonitorTerminal(Resource):
     """
     start a terminal for the selected VNFs
 class MonitorTerminal(Resource):
     """
     start a terminal for the selected VNFs
@@ -323,7 +330,7 @@ class MonitorTerminal(Resource):
             c = net.monitor_agent.term(vnf_list)
 
             # return monitor message response
             c = net.monitor_agent.term(vnf_list)
 
             # return monitor message response
-            return  str(c), 200, CORS_HEADER
+            return str(c), 200, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
index 9aab1e6..e8c6b9a 100755 (executable)
@@ -1,48 +1,43 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Distributed Cloud Emulator (dcemulator)
-Networking and monitoring functions
-(c) 2015 by Steven Van Rossem <steven.vanrossem@intec.ugent.be>
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+#
+# Distributed Cloud Emulator (dcemulator)
+# Networking and monitoring functions
+# (c) 2015 by Steven Van Rossem <steven.vanrossem@intec.ugent.be>
 import logging
 from flask_restful import Resource
 from flask import request
 import logging
 from flask_restful import Resource
 from flask import request
-import json
 import networkx
 
 logging.basicConfig()
 
 CORS_HEADER = {'Access-Control-Allow-Origin': '*'}
 
 import networkx
 
 logging.basicConfig()
 
 CORS_HEADER = {'Access-Control-Allow-Origin': '*'}
 
-# the global net is set from the topology file, and connected via connectDCNetwork function in rest_api_endpoint.py
+# the global net is set from the topology file, and connected via
+# connectDCNetwork function in rest_api_endpoint.py
 net = None
 
 
 net = None
 
 
@@ -135,9 +130,8 @@ class DrawD3jsgraph(Resource):
         node_attr = networkx.get_node_attributes(net.DCNetwork_graph, 'type')
         for node_name in net.DCNetwork_graph.nodes():
             nodes2.append(node_name)
         node_attr = networkx.get_node_attributes(net.DCNetwork_graph, 'type')
         for node_name in net.DCNetwork_graph.nodes():
             nodes2.append(node_name)
-            node_index = nodes2.index(node_name)
             type = node_attr[node_name]
             type = node_attr[node_name]
-            node_dict = {"name":node_name,"group":type}
+            node_dict = {"name": node_name, "group": type}
             nodes.append(node_dict)
 
         # add links between other DCs
             nodes.append(node_dict)
 
         # add links between other DCs
@@ -145,8 +139,9 @@ class DrawD3jsgraph(Resource):
             node1_index = nodes2.index(node1_name)
             for node2_name in net.DCNetwork_graph.neighbors(node1_name):
                 node2_index = nodes2.index(node2_name)
             node1_index = nodes2.index(node1_name)
             for node2_name in net.DCNetwork_graph.neighbors(node1_name):
                 node2_index = nodes2.index(node2_name)
-                edge_dict = {"source": node1_index, "target": node2_index, "value": 10}
+                edge_dict = {"source": node1_index,
+                             "target": node2_index, "value": 10}
                 links.append(edge_dict)
 
                 links.append(edge_dict)
 
-        json = {"nodes":nodes, "links":links}
+        json = {"nodes": nodes, "links": links}
         return json, 200, CORS_HEADER
         return json, 200, CORS_HEADER
index a980dc9..4f9d6d8 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 
 import logging
 import threading
 
 import logging
 import threading
@@ -34,7 +32,7 @@ from gevent.pywsgi import WSGIServer
 
 # need to import total module to set its global variable dcs
 import compute
 
 # need to import total module to set its global variable dcs
 import compute
-from compute import dcs, ComputeList, Compute, ComputeResources, DatacenterList, DatacenterStatus
+from compute import ComputeList, Compute, ComputeResources, DatacenterList, DatacenterStatus
 
 # need to import total module to set its global variable net
 import network
 
 # need to import total module to set its global variable net
 import network
@@ -60,31 +58,36 @@ class RestApiEndpoint(object):
         self.ip = listenip
         self.port = port
 
         self.ip = listenip
         self.port = port
 
-        # connect this DC network to the rest api endpoint (needed for the networking and monitoring api)
+        # connect this DC network to the rest api endpoint (needed for the
+        # networking and monitoring api)
         self.connectDCNetwork(DCnetwork)
 
         # setup Flask
         # find directory of dashboard files
         self.connectDCNetwork(DCnetwork)
 
         # setup Flask
         # find directory of dashboard files
-        dashboard_file = pkg_resources.resource_filename('emuvim.dashboard', "index.html")
+        dashboard_file = pkg_resources.resource_filename(
+            'emuvim.dashboard', "index.html")
         dashboard_dir = path.dirname(dashboard_file)
         logging.info("Started emu dashboard: {0}".format(dashboard_dir))
 
         dashboard_dir = path.dirname(dashboard_file)
         logging.info("Started emu dashboard: {0}".format(dashboard_dir))
 
-        self.app = Flask(__name__, static_folder=dashboard_dir, static_url_path='/dashboard')
+        self.app = Flask(__name__, static_folder=dashboard_dir,
+                         static_url_path='/dashboard')
         self.api = Api(self.app)
 
         # setup endpoints
 
         # compute related actions (start/stop VNFs, get info)
         self.api = Api(self.app)
 
         # setup endpoints
 
         # compute related actions (start/stop VNFs, get info)
-        self.api.add_resource(Compute, "/restapi/compute/<dc_label>/<compute_name>")
+        self.api.add_resource(
+            Compute, "/restapi/compute/<dc_label>/<compute_name>")
         self.api.add_resource(ComputeList,
         self.api.add_resource(ComputeList,
-                      "/restapi/compute",
-                      "/restapi/compute/<dc_label>")
-        self.api.add_resource(ComputeResources, "/restapi/compute/resources/<dc_label>/<compute_name>")
+                              "/restapi/compute",
+                              "/restapi/compute/<dc_label>")
+        self.api.add_resource(
+            ComputeResources, "/restapi/compute/resources/<dc_label>/<compute_name>")
 
 
-        self.api.add_resource(DatacenterStatus, "/restapi/datacenter/<dc_label>")
+        self.api.add_resource(
+            DatacenterStatus, "/restapi/datacenter/<dc_label>")
         self.api.add_resource(DatacenterList, "/restapi/datacenter")
 
         self.api.add_resource(DatacenterList, "/restapi/datacenter")
 
-
         # network related actions (setup chaining between VNFs)
         self.api.add_resource(NetworkAction,
                               "/restapi/network")
         # network related actions (setup chaining between VNFs)
         self.api.add_resource(NetworkAction,
                               "/restapi/network")
@@ -95,7 +98,8 @@ class RestApiEndpoint(object):
         # export a network interface traffic rate counter
         self.api.add_resource(MonitorInterfaceAction,
                               "/restapi/monitor/interface")
         # export a network interface traffic rate counter
         self.api.add_resource(MonitorInterfaceAction,
                               "/restapi/monitor/interface")
-        # export flow traffic counter, of a manually pre-installed flow entry, specified by its cookie
+        # export flow traffic counter, of a manually pre-installed flow entry,
+        # specified by its cookie
         self.api.add_resource(MonitorFlowAction,
                               "/restapi/monitor/flow")
         # install monitoring of a specific flow on a pre-existing link in the service.
         self.api.add_resource(MonitorFlowAction,
                               "/restapi/monitor/flow")
         # install monitoring of a specific flow on a pre-existing link in the service.
@@ -110,9 +114,8 @@ class RestApiEndpoint(object):
         self.api.add_resource(MonitorTerminal,
                               "/restapi/monitor/term")
 
         self.api.add_resource(MonitorTerminal,
                               "/restapi/monitor/term")
 
-
-        logging.debug("Created API endpoint %s(%s:%d)" % (self.__class__.__name__, self.ip, self.port))
-
+        logging.debug("Created API endpoint %s(%s:%d)" %
+                      (self.__class__.__name__, self.ip, self.port))
 
     def connectDatacenter(self, dc):
         compute.dcs[dc.label] = dc
 
     def connectDatacenter(self, dc):
         compute.dcs[dc.label] = dc
@@ -130,18 +133,22 @@ class RestApiEndpoint(object):
         self.thread = threading.Thread(target=self._start_flask, args=())
         self.thread.daemon = True
         self.thread.start()
         self.thread = threading.Thread(target=self._start_flask, args=())
         self.thread.daemon = True
         self.thread.start()
-        logging.info("Started API endpoint @ http://%s:%d" % (self.ip, self.port))
+        logging.info("Started API endpoint @ http://%s:%d" %
+                     (self.ip, self.port))
 
     def stop(self):
         if self.http_server:
             self.http_server.close()
 
     def _start_flask(self):
 
     def stop(self):
         if self.http_server:
             self.http_server.close()
 
     def _start_flask(self):
-        #self.app.run(self.ip, self.port, debug=False, use_reloader=False)
-        #this should be a more production-fit http-server
-        #self.app.logger.setLevel(logging.ERROR)
+        # self.app.run(self.ip, self.port, debug=False, use_reloader=False)
+        # this should be a more production-fit http-server
+        # self.app.logger.setLevel(logging.ERROR)
         self.http_server = WSGIServer((self.ip, self.port),
         self.http_server = WSGIServer((self.ip, self.port),
-                                 self.app,
-                                 log=open("/dev/null", "w")  # This disables HTTP request logs to not mess up the CLI when e.g. the auto-updated dashboard is used
-        )
+                                      self.app,
+                                      # This disables HTTP request logs to not
+                                      # mess up the CLI when e.g. the
+                                      # auto-updated dashboard is used
+                                      log=open("/dev/null", "w")
+                                      )
         self.http_server.serve_forever()
         self.http_server.serve_forever()
index 879320c..44988d6 100755 (executable)
@@ -1,37 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-This module implements a simple REST API that behaves like SONATA's gatekeeper.
-
-It is only used to support the development of SONATA's SDK tools and to demonstrate
-the year 1 version of the emulator until the integration with WP4's orchestrator is done.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import logging
 import threading
 import dummygatekeeper as dgk
 import logging
 import threading
 import dummygatekeeper as dgk
@@ -47,7 +38,7 @@ class SonataDummyGatekeeperEndpoint(object):
     """
 
     def __init__(self, listenip, port, deploy_sap=False, docker_management=False,
     """
 
     def __init__(self, listenip, port, deploy_sap=False, docker_management=False,
-                 auto_deploy=False,  auto_delete=False, sap_vnfd_path=None):
+                 auto_deploy=False, auto_delete=False, sap_vnfd_path=None):
         self.dcs = {}
         self.ip = listenip
         self.port = port
         self.dcs = {}
         self.ip = listenip
         self.port = port
index f20483b..59b1900 100755 (executable)
@@ -1,37 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-This module implements a simple REST API that behaves like SONATA's gatekeeper.
-
-It is only used to support the development of SONATA's SDK tools and to demonstrate
-the year 1 version of the emulator until the integration with WP4's orchestrator is done.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import logging
 import os
 import uuid
 import logging
 import os
 import uuid
@@ -39,7 +30,7 @@ import hashlib
 import zipfile
 import yaml
 import threading
 import zipfile
 import yaml
 import threading
-from docker import DockerClient, APIClient
+from docker import DockerClient
 from flask import Flask, request
 import flask_restful as fr
 from collections import defaultdict
 from flask import Flask, request
 import flask_restful as fr
 from collections import defaultdict
@@ -49,6 +40,7 @@ from random import randint
 import ipaddress
 import copy
 import time
 import ipaddress
 import copy
 import time
+from functools import reduce
 
 logging.basicConfig()
 LOG = logging.getLogger("sonata-dummy-gatekeeper")
 
 logging.basicConfig()
 LOG = logging.getLogger("sonata-dummy-gatekeeper")
@@ -62,28 +54,34 @@ CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
 # Enable Dockerfile build functionality
 BUILD_DOCKERFILE = False
 
 # Enable Dockerfile build functionality
 BUILD_DOCKERFILE = False
 
-# flag to indicate that we run without the emulator (only the bare API for integration testing)
+# flag to indicate that we run without the emulator (only the bare API for
+# integration testing)
 GK_STANDALONE_MODE = False
 
 # should a new version of an image be pulled even if its available
 FORCE_PULL = False
 
 # Automatically deploy SAPs (endpoints) of the service as new containers
 GK_STANDALONE_MODE = False
 
 # should a new version of an image be pulled even if its available
 FORCE_PULL = False
 
 # Automatically deploy SAPs (endpoints) of the service as new containers
-# Attention: This is not a configuration switch but a global variable! Don't change its default value.
+# Attention: This is not a configuration switch but a global variable!
+# Don't change its default value.
 DEPLOY_SAP = False
 
 DEPLOY_SAP = False
 
-# flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
+# flag to indicate if we use bidirectional forwarding rules in the
+# automatic chaining process
 BIDIRECTIONAL_CHAIN = False
 
 BIDIRECTIONAL_CHAIN = False
 
-# override the management interfaces in the descriptors with default docker0 interfaces in the containers
+# override the management interfaces in the descriptors with default
+# docker0 interfaces in the containers
 USE_DOCKER_MGMT = False
 
 USE_DOCKER_MGMT = False
 
-# automatically deploy uploaded packages (no need to execute son-access deploy --latest separately)
+# automatically deploy uploaded packages (no need to execute son-access
+# deploy --latest separately)
 AUTO_DEPLOY = False
 
 # and also automatically terminate any other running services
 AUTO_DELETE = False
 
 AUTO_DEPLOY = False
 
 # and also automatically terminate any other running services
 AUTO_DELETE = False
 
+
 def generate_subnets(prefix, base, subnet_size=50, mask=24):
     # Generate a list of ipaddress in subnets
     r = list()
 def generate_subnets(prefix, base, subnet_size=50, mask=24):
     # Generate a list of ipaddress in subnets
     r = list()
@@ -91,6 +89,8 @@ def generate_subnets(prefix, base, subnet_size=50, mask=24):
         subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
         r.append(ipaddress.ip_network(unicode(subnet)))
     return r
         subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
         r.append(ipaddress.ip_network(unicode(subnet)))
     return r
+
+
 # private subnet definitions for the generated interfaces
 # 10.10.xxx.0/24
 SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
 # private subnet definitions for the generated interfaces
 # 10.10.xxx.0/24
 SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
@@ -100,18 +100,20 @@ ELAN_SUBNETS = generate_subnets('10.20', 0, subnet_size=50, mask=24)
 ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
 
 # path to the VNFD for the SAP VNF that is deployed as internal SAP point
 ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
 
 # path to the VNFD for the SAP VNF that is deployed as internal SAP point
-SAP_VNFD=None
+SAP_VNFD = None
 
 # Time in seconds to wait for vnf stop scripts to execute fully
 VNF_STOP_WAIT_TIME = 5
 
 
 # Time in seconds to wait for vnf stop scripts to execute fully
 VNF_STOP_WAIT_TIME = 5
 
+
 class Gatekeeper(object):
 
     def __init__(self):
         self.services = dict()
         self.dcs = dict()
         self.net = None
 class Gatekeeper(object):
 
     def __init__(self):
         self.services = dict()
         self.dcs = dict()
         self.net = None
-        self.vnf_counter = 0  # used to generate short names for VNFs (Mininet limitation)
+        # used to generate short names for VNFs (Mininet limitation)
+        self.vnf_counter = 0
         LOG.info("Create SONATA dummy gatekeeper.")
 
     def register_service_package(self, service_uuid, service):
         LOG.info("Create SONATA dummy gatekeeper.")
 
     def register_service_package(self, service_uuid, service):
@@ -143,7 +145,8 @@ class Service(object):
         self.uuid = service_uuid
         self.package_file_hash = package_file_hash
         self.package_file_path = package_file_path
         self.uuid = service_uuid
         self.package_file_hash = package_file_hash
         self.package_file_path = package_file_path
-        self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
+        self.package_content_path = os.path.join(
+            CATALOG_FOLDER, "services/%s" % self.uuid)
         self.manifest = None
         self.nsd = None
         self.vnfds = dict()
         self.manifest = None
         self.nsd = None
         self.vnfds = dict()
@@ -194,9 +197,10 @@ class Service(object):
         self.instances[instance_uuid] = dict()
         self.instances[instance_uuid]["vnf_instances"] = list()
 
         self.instances[instance_uuid] = dict()
         self.instances[instance_uuid]["vnf_instances"] = list()
 
-        # 2. compute placement of this service instance (adds DC names to VNFDs)
+        # 2. compute placement of this service instance (adds DC names to
+        # VNFDs)
         if not GK_STANDALONE_MODE:
         if not GK_STANDALONE_MODE:
-            #self._calculate_placement(FirstDcPlacement)
+            # self._calculate_placement(FirstDcPlacement)
             self._calculate_placement(RoundRobinDcPlacementWithSAPs)
         # 3. start all vnfds that we have in the service (except SAPs)
         for vnf_id in self.vnfds:
             self._calculate_placement(RoundRobinDcPlacementWithSAPs)
         # 3. start all vnfds that we have in the service (except SAPs)
         for vnf_id in self.vnfds:
@@ -216,9 +220,11 @@ class Service(object):
         if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
             vlinks = self.nsd["virtual_links"]
             # constituent virtual links are not checked
         if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
             vlinks = self.nsd["virtual_links"]
             # constituent virtual links are not checked
-            #fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
-            eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
-            elan_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-LAN")]
+            # fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
+            eline_fwd_links = [l for l in vlinks if (
+                l["connectivity_type"] == "E-Line")]
+            elan_fwd_links = [l for l in vlinks if (
+                l["connectivity_type"] == "E-LAN")]
 
             GK.net.deployed_elines.extend(eline_fwd_links)
             GK.net.deployed_elans.extend(elan_fwd_links)
 
             GK.net.deployed_elines.extend(eline_fwd_links)
             GK.net.deployed_elans.extend(elan_fwd_links)
@@ -229,8 +235,10 @@ class Service(object):
             # 5b. deploy E-LAN links
             self._connect_elans(elan_fwd_links, instance_uuid)
 
             # 5b. deploy E-LAN links
             self._connect_elans(elan_fwd_links, instance_uuid)
 
-        # 6. run the emulator specific entrypoint scripts in the VNFIs of this service instance
-        self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
+        # 6. run the emulator specific entrypoint scripts in the VNFIs of this
+        # service instance
+        self._trigger_emulator_start_scripts_in_vnfis(
+            self.instances[instance_uuid]["vnf_instances"])
 
         LOG.info("Service started. Instance id: %r" % instance_uuid)
         return instance_uuid
 
         LOG.info("Service started. Instance id: %r" % instance_uuid)
         return instance_uuid
@@ -248,7 +256,8 @@ class Service(object):
         # instance_uuid = str(self.uuid.uuid4())
         vnf_instances = self.instances[instance_uuid]["vnf_instances"]
 
         # instance_uuid = str(self.uuid.uuid4())
         vnf_instances = self.instances[instance_uuid]["vnf_instances"]
 
-        # trigger stop skripts in vnf instances and wait a few seconds for completion
+        # trigger stop skripts in vnf instances and wait a few seconds for
+        # completion
         self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
         time.sleep(VNF_STOP_WAIT_TIME)
 
         self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
         time.sleep(VNF_STOP_WAIT_TIME)
 
@@ -259,7 +268,8 @@ class Service(object):
             ext_sap = self.saps[sap_name]
             target_dc = ext_sap.get("dc")
             target_dc.removeExternalSAP(sap_name)
             ext_sap = self.saps[sap_name]
             target_dc = ext_sap.get("dc")
             target_dc.removeExternalSAP(sap_name)
-            LOG.info("Stopping the SAP instance: %r in DC %r" % (sap_name, target_dc))
+            LOG.info("Stopping the SAP instance: %r in DC %r" %
+                     (sap_name, target_dc))
 
         if not GK_STANDALONE_MODE:
             # remove placement?
 
         if not GK_STANDALONE_MODE:
             # remove placement?
@@ -290,7 +300,8 @@ class Service(object):
             assert(docker_name is not None)
             assert(target_dc is not None)
             if not self._check_docker_image_exists(docker_name):
             assert(docker_name is not None)
             assert(target_dc is not None)
             if not self._check_docker_image_exists(docker_name):
-                raise Exception("Docker image %r not found. Abort." % docker_name)
+                raise Exception(
+                    "Docker image %r not found. Abort." % docker_name)
 
             # 3. get the resource limits
             res_req = u.get("resource_requirements")
 
             # 3. get the resource limits
             res_req = u.get("resource_requirements")
@@ -298,77 +309,88 @@ class Service(object):
             if cpu_list is None:
                 cpu_list = res_req.get("cpu").get("vcpus")
             if cpu_list is None:
             if cpu_list is None:
                 cpu_list = res_req.get("cpu").get("vcpus")
             if cpu_list is None:
-                cpu_list="1"
+                cpu_list = "1"
             cpu_bw = res_req.get("cpu").get("cpu_bw")
             if not cpu_bw:
             cpu_bw = res_req.get("cpu").get("cpu_bw")
             if not cpu_bw:
-                cpu_bw=1
+                cpu_bw = 1
             mem_num = str(res_req.get("memory").get("size"))
             mem_num = str(res_req.get("memory").get("size"))
-            if len(mem_num)==0:
-                mem_num="2"
+            if len(mem_num) == 0:
+                mem_num = "2"
             mem_unit = str(res_req.get("memory").get("size_unit"))
             mem_unit = str(res_req.get("memory").get("size_unit"))
-            if str(mem_unit)==0:
-                mem_unit="GB"
+            if str(mem_unit) == 0:
+                mem_unit = "GB"
             mem_limit = float(mem_num)
             mem_limit = float(mem_num)
-            if mem_unit=="GB":
-                mem_limit=mem_limit*1024*1024*1024
-            elif mem_unit=="MB":
-                mem_limit=mem_limit*1024*1024
-            elif mem_unit=="KB":
-                mem_limit=mem_limit*1024
+            if mem_unit == "GB":
+                mem_limit = mem_limit * 1024 * 1024 * 1024
+            elif mem_unit == "MB":
+                mem_limit = mem_limit * 1024 * 1024
+            elif mem_unit == "KB":
+                mem_limit = mem_limit * 1024
             mem_lim = int(mem_limit)
             mem_lim = int(mem_limit)
-            cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
+            cpu_period, cpu_quota = self._calculate_cpu_cfs_values(
+                float(cpu_bw))
 
 
-            # check if we need to deploy the management ports (defined as type:management both on in the vnfd and nsd)
+            # check if we need to deploy the management ports (defined as
+            # type:management both on in the vnfd and nsd)
             intfs = vnfd.get("connection_points", [])
             mgmt_intf_names = []
             if USE_DOCKER_MGMT:
             intfs = vnfd.get("connection_points", [])
             mgmt_intf_names = []
             if USE_DOCKER_MGMT:
-                mgmt_intfs = [vnf_id + ':' + intf['id'] for intf in intfs if intf.get('type') == 'management']
-                # check if any of these management interfaces are used in a management-type network in the nsd
+                mgmt_intfs = [vnf_id + ':' + intf['id']
+                              for intf in intfs if intf.get('type') == 'management']
+                # check if any of these management interfaces are used in a
+                # management-type network in the nsd
                 for nsd_intf_name in mgmt_intfs:
                 for nsd_intf_name in mgmt_intfs:
-                    vlinks = [ l["connection_points_reference"] for l in self.nsd.get("virtual_links", [])]
+                    vlinks = [l["connection_points_reference"]
+                              for l in self.nsd.get("virtual_links", [])]
                     for link in vlinks:
                     for link in vlinks:
-                        if nsd_intf_name in link and self.check_mgmt_interface(link):
-                            # this is indeed a management interface and can be skipped
-                            vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(nsd_intf_name)
-                            found_interfaces = [intf for intf in intfs if intf.get('id') == vnf_interface]
+                        if nsd_intf_name in link and self.check_mgmt_interface(
+                                link):
+                            # this is indeed a management interface and can be
+                            # skipped
+                            vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
+                                nsd_intf_name)
+                            found_interfaces = [
+                                intf for intf in intfs if intf.get('id') == vnf_interface]
                             intfs.remove(found_interfaces[0])
                             mgmt_intf_names.append(vnf_interface)
 
             # 4. generate the volume paths for the docker container
                             intfs.remove(found_interfaces[0])
                             mgmt_intf_names.append(vnf_interface)
 
             # 4. generate the volume paths for the docker container
-            volumes=list()
+            volumes = list()
             # a volume to extract log files
             # a volume to extract log files
-            docker_log_path = "/tmp/results/%s/%s"%(self.uuid,vnf_id)
-            LOG.debug("LOG path for vnf %s is %s."%(vnf_id,docker_log_path))
+            docker_log_path = "/tmp/results/%s/%s" % (self.uuid, vnf_id)
+            LOG.debug("LOG path for vnf %s is %s." % (vnf_id, docker_log_path))
             if not os.path.exists(docker_log_path):
             if not os.path.exists(docker_log_path):
-                LOG.debug("Creating folder %s"%docker_log_path)
+                LOG.debug("Creating folder %s" % docker_log_path)
                 os.makedirs(docker_log_path)
 
                 os.makedirs(docker_log_path)
 
-            volumes.append(docker_log_path+":/mnt/share/")
-
+            volumes.append(docker_log_path + ":/mnt/share/")
 
             # 5. do the dc.startCompute(name="foobar") call to run the container
             # TODO consider flavors, and other annotations
             # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
             # use the vnf_id in the nsd as docker name
             # so deployed containers can be easily mapped back to the nsd
 
             # 5. do the dc.startCompute(name="foobar") call to run the container
             # TODO consider flavors, and other annotations
             # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
             # use the vnf_id in the nsd as docker name
             # so deployed containers can be easily mapped back to the nsd
-            LOG.info("Starting %r as %r in DC %r" % (vnf_name, vnf_id, vnfd.get("dc")))
+            LOG.info("Starting %r as %r in DC %r" %
+                     (vnf_name, vnf_id, vnfd.get("dc")))
             LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
             vnfi = target_dc.startCompute(
             LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
             vnfi = target_dc.startCompute(
-                    vnf_id,
-                    network=intfs,
-                    image=docker_name,
-                    flavor_name="small",
-                    cpu_quota=cpu_quota,
-                    cpu_period=cpu_period,
-                    cpuset=cpu_list,
-                    mem_limit=mem_lim,
-                    volumes=volumes,
-                    type=kwargs.get('type','docker'))
-
-            # rename the docker0 interfaces (eth0) to the management port name defined in the VNFD
+                vnf_id,
+                network=intfs,
+                image=docker_name,
+                flavor_name="small",
+                cpu_quota=cpu_quota,
+                cpu_period=cpu_period,
+                cpuset=cpu_list,
+                mem_limit=mem_lim,
+                volumes=volumes,
+                type=kwargs.get('type', 'docker'))
+
+            # rename the docker0 interfaces (eth0) to the management port name
+            # defined in the VNFD
             if USE_DOCKER_MGMT:
                 for intf_name in mgmt_intf_names:
             if USE_DOCKER_MGMT:
                 for intf_name in mgmt_intf_names:
-                    self._vnf_reconfigure_network(vnfi, 'eth0', new_name=intf_name)
+                    self._vnf_reconfigure_network(
+                        vnfi, 'eth0', new_name=intf_name)
 
             return vnfi
 
 
             return vnfi
 
@@ -383,7 +405,8 @@ class Service(object):
         dc = vnfi.datacenter
 
         # stop the vnfi
         dc = vnfi.datacenter
 
         # stop the vnfi
-        LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
+        LOG.info("Stopping the vnf instance contained in %r in DC %r" %
+                 (status["name"], dc))
         dc.stopCompute(status["name"])
 
     def _get_vnf_instance(self, instance_uuid, vnf_id):
         dc.stopCompute(status["name"])
 
     def _get_vnf_instance(self, instance_uuid, vnf_id):
@@ -416,17 +439,18 @@ class Service(object):
             intf = vnfi.intf(intf=if_name)
             if intf is not None:
                 intf.setIP(net_str)
             intf = vnfi.intf(intf=if_name)
             if intf is not None:
                 intf.setIP(net_str)
-                LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
+                LOG.debug("Reconfigured network of %s:%s to %r" %
+                          (vnfi.name, if_name, net_str))
             else:
             else:
-                LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
+                LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
+                    vnfi.name, if_name))
 
         if new_name is not None:
             vnfi.cmd('ip link set', if_name, 'down')
             vnfi.cmd('ip link set', if_name, 'name', new_name)
             vnfi.cmd('ip link set', new_name, 'up')
 
         if new_name is not None:
             vnfi.cmd('ip link set', if_name, 'down')
             vnfi.cmd('ip link set', if_name, 'name', new_name)
             vnfi.cmd('ip link set', new_name, 'up')
-            LOG.debug("Reconfigured interface name of %s:%s to %s" % (vnfi.name, if_name, new_name))
-
-
+            LOG.debug("Reconfigured interface name of %s:%s to %s" %
+                      (vnfi.name, if_name, new_name))
 
     def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
         for vnfi in vnfi_list:
 
     def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
         for vnfi in vnfi_list:
@@ -434,10 +458,12 @@ class Service(object):
             env = config.get("Env", list())
             for env_var in env:
                 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
             env = config.get("Env", list())
             for env_var in env:
                 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
-                LOG.debug("%r = %r" % (var , cmd))
-                if var=="SON_EMU_CMD":
-                    LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
-                    # execute command in new thread to ensure that GK is not blocked by VNF
+                LOG.debug("%r = %r" % (var, cmd))
+                if var == "SON_EMU_CMD":
+                    LOG.info("Executing entry point script in %r: %r" %
+                             (vnfi.name, cmd))
+                    # execute command in new thread to ensure that GK is not
+                    # blocked by VNF
                     t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
                     t.daemon = True
                     t.start()
                     t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
                     t.daemon = True
                     t.start()
@@ -448,15 +474,15 @@ class Service(object):
             env = config.get("Env", list())
             for env_var in env:
                 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
             env = config.get("Env", list())
             for env_var in env:
                 var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
-                if var=="SON_EMU_CMD_STOP":
-                    LOG.info("Executing stop script in %r: %r" % (vnfi.name, cmd))
-                    # execute command in new thread to ensure that GK is not blocked by VNF
+                if var == "SON_EMU_CMD_STOP":
+                    LOG.info("Executing stop script in %r: %r" %
+                             (vnfi.name, cmd))
+                    # execute command in new thread to ensure that GK is not
+                    # blocked by VNF
                     t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
                     t.daemon = True
                     t.start()
 
                     t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
                     t.daemon = True
                     t.start()
 
-
-
     def _unpack_service_package(self):
         """
         unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
     def _unpack_service_package(self):
         """
         unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
@@ -465,7 +491,6 @@ class Service(object):
         with zipfile.ZipFile(self.package_file_path, "r") as z:
             z.extractall(self.package_content_path)
 
         with zipfile.ZipFile(self.package_file_path, "r") as z:
             z.extractall(self.package_content_path)
 
-
     def _load_package_descriptor(self):
         """
         Load the main package descriptor YAML and keep it as dict.
     def _load_package_descriptor(self):
         """
         Load the main package descriptor YAML and keep it as dict.
@@ -488,7 +513,7 @@ class Service(object):
             GK.net.deployed_nsds.append(self.nsd)
             # create dict to find the vnf_name for any vnf id
             self.vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
             GK.net.deployed_nsds.append(self.nsd)
             # create dict to find the vnf_name for any vnf id
             self.vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
-                                                reduce(lambda x, y: dict(x, **y),
+                                               reduce(lambda x, y: dict(x, **y),
                                                        map(lambda d: {d["vnf_id"]: d["vnf_name"]},
                                                            self.nsd["network_functions"])))
 
                                                        map(lambda d: {d["vnf_id"]: d["vnf_name"]},
                                                            self.nsd["network_functions"])))
 
@@ -504,14 +529,15 @@ class Service(object):
         vnfd_set = dict()
         if "package_content" in self.manifest:
             for pc in self.manifest.get("package_content"):
         vnfd_set = dict()
         if "package_content" in self.manifest:
             for pc in self.manifest.get("package_content"):
-                if pc.get("content-type") == "application/sonata.function_descriptor":
+                if pc.get(
+                        "content-type") == "application/sonata.function_descriptor":
                     vnfd_path = os.path.join(
                         self.package_content_path,
                         make_relative_path(pc.get("name")))
                     vnfd = load_yaml(vnfd_path)
                     vnfd_set[vnfd.get("name")] = vnfd
             # then link each vnf_id in the nsd to its vnfd
                     vnfd_path = os.path.join(
                         self.package_content_path,
                         make_relative_path(pc.get("name")))
                     vnfd = load_yaml(vnfd_path)
                     vnfd_set[vnfd.get("name")] = vnfd
             # then link each vnf_id in the nsd to its vnfd
-            for  vnf_id in self.vnf_id2vnf_name:
+            for vnf_id in self.vnf_id2vnf_name:
                 vnf_name = self.vnf_id2vnf_name[vnf_id]
                 self.vnfds[vnf_id] = vnfd_set[vnf_name]
                 LOG.debug("Loaded VNFD: {0} id: {1}".format(vnf_name, vnf_id))
                 vnf_name = self.vnf_id2vnf_name[vnf_id]
                 self.vnfds[vnf_id] = vnfd_set[vnf_name]
                 LOG.debug("Loaded VNFD: {0} id: {1}".format(vnf_name, vnf_id))
@@ -520,7 +546,8 @@ class Service(object):
         # create list of all SAPs
         # check if we need to deploy management ports
         if USE_DOCKER_MGMT:
         # create list of all SAPs
         # check if we need to deploy management ports
         if USE_DOCKER_MGMT:
-            SAPs = [p for p in self.nsd["connection_points"] if 'management' not in p.get('type')]
+            SAPs = [p for p in self.nsd["connection_points"]
+                    if 'management' not in p.get('type')]
         else:
             SAPs = [p for p in self.nsd["connection_points"]]
 
         else:
             SAPs = [p for p in self.nsd["connection_points"]]
 
@@ -530,21 +557,26 @@ class Service(object):
             # make sure SAP has type set (default internal)
             sap["type"] = sap.get("type", 'internal')
 
             # make sure SAP has type set (default internal)
             sap["type"] = sap.get("type", 'internal')
 
-            # Each Service Access Point (connection_point) in the nsd is an IP address on the host
+            # Each Service Access Point (connection_point) in the nsd is an IP
+            # address on the host
             if sap["type"] == "external":
                 # add to vnfds to calculate placement later on
                 sap_net = SAP_SUBNETS.pop(0)
             if sap["type"] == "external":
                 # add to vnfds to calculate placement later on
                 sap_net = SAP_SUBNETS.pop(0)
-                self.saps[sap_docker_name] = {"name": sap_docker_name , "type": "external", "net": sap_net}
+                self.saps[sap_docker_name] = {
+                    "name": sap_docker_name, "type": "external", "net": sap_net}
                 # add SAP vnf to list in the NSD so it is deployed later on
                 # add SAP vnf to list in the NSD so it is deployed later on
-                # each SAP gets a unique VNFD and vnf_id in the NSD and custom type (only defined in the dummygatekeeper)
+                # each SAP gets a unique VNFD and vnf_id in the NSD and custom
+                # type (only defined in the dummygatekeeper)
                 self.nsd["network_functions"].append(
                     {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_ext"})
 
                 self.nsd["network_functions"].append(
                     {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_ext"})
 
-            # Each Service Access Point (connection_point) in the nsd is getting its own container (default)
+            # Each Service Access Point (connection_point) in the nsd is
+            # getting its own container (default)
             elif sap["type"] == "internal" or sap["type"] == "management":
                 # add SAP to self.vnfds
                 if SAP_VNFD is None:
             elif sap["type"] == "internal" or sap["type"] == "management":
                 # add SAP to self.vnfds
                 if SAP_VNFD is None:
-                    sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
+                    sapfile = pkg_resources.resource_filename(
+                        __name__, "sap_vnfd.yml")
                 else:
                     sapfile = SAP_VNFD
                 sap_vnfd = load_yaml(sapfile)
                 else:
                     sapfile = SAP_VNFD
                 sap_vnfd = load_yaml(sapfile)
@@ -558,17 +590,20 @@ class Service(object):
                 self.nsd["network_functions"].append(
                     {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_int"})
 
                 self.nsd["network_functions"].append(
                     {"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_int"})
 
-            LOG.debug("Loaded SAP: name: {0}, type: {1}".format(sap_docker_name, sap['type']))
+            LOG.debug("Loaded SAP: name: {0}, type: {1}".format(
+                sap_docker_name, sap['type']))
 
         # create sap lists
 
         # create sap lists
-        self.saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
-        self.saps_int = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "internal"]
+        self.saps_ext = [self.saps[sap]['name']
+                         for sap in self.saps if self.saps[sap]["type"] == "external"]
+        self.saps_int = [self.saps[sap]['name']
+                         for sap in self.saps if self.saps[sap]["type"] == "internal"]
 
     def _start_sap(self, sap, instance_uuid):
         if not DEPLOY_SAP:
             return
 
 
     def _start_sap(self, sap, instance_uuid):
         if not DEPLOY_SAP:
             return
 
-        LOG.info('start SAP: {0} ,type: {1}'.format(sap['name'],sap['type']))
+        LOG.info('start SAP: {0} ,type: {1}'.format(sap['name'], sap['type']))
         if sap["type"] == "internal":
             vnfi = None
             if not GK_STANDALONE_MODE:
         if sap["type"] == "internal":
             vnfi = None
             if not GK_STANDALONE_MODE:
@@ -593,39 +628,49 @@ class Service(object):
         for link in eline_fwd_links:
             # check if we need to deploy this link when its a management link:
             if USE_DOCKER_MGMT:
         for link in eline_fwd_links:
             # check if we need to deploy this link when its a management link:
             if USE_DOCKER_MGMT:
-                if self.check_mgmt_interface(link["connection_points_reference"]):
+                if self.check_mgmt_interface(
+                        link["connection_points_reference"]):
                     continue
 
                     continue
 
-            src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
-            dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
+            src_id, src_if_name, src_sap_id = parse_interface(
+                link["connection_points_reference"][0])
+            dst_id, dst_if_name, dst_sap_id = parse_interface(
+                link["connection_points_reference"][1])
 
             setChaining = False
             # check if there is a SAP in the link and chain everything together
             if src_sap_id in self.saps and dst_sap_id in self.saps:
 
             setChaining = False
             # check if there is a SAP in the link and chain everything together
             if src_sap_id in self.saps and dst_sap_id in self.saps:
-                LOG.info('2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id, dst_sap_id))
+                LOG.info(
+                    '2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id, dst_sap_id))
                 continue
 
             elif src_sap_id in self.saps_ext:
                 src_id = src_sap_id
                 continue
 
             elif src_sap_id in self.saps_ext:
                 src_id = src_sap_id
-                # set intf name to None so the chaining function will choose the first one
+                # set intf name to None so the chaining function will choose
+                # the first one
                 src_if_name = None
                 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
                 if dst_vnfi is not None:
                     # choose first ip address in sap subnet
                     sap_net = self.saps[src_sap_id]['net']
                 src_if_name = None
                 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
                 if dst_vnfi is not None:
                     # choose first ip address in sap subnet
                     sap_net = self.saps[src_sap_id]['net']
-                    sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
-                    self._vnf_reconfigure_network(dst_vnfi, dst_if_name, sap_ip)
+                    sap_ip = "{0}/{1}".format(str(sap_net[2]),
+                                              sap_net.prefixlen)
+                    self._vnf_reconfigure_network(
+                        dst_vnfi, dst_if_name, sap_ip)
                     setChaining = True
 
             elif dst_sap_id in self.saps_ext:
                 dst_id = dst_sap_id
                     setChaining = True
 
             elif dst_sap_id in self.saps_ext:
                 dst_id = dst_sap_id
-                # set intf name to None so the chaining function will choose the first one
+                # set intf name to None so the chaining function will choose
+                # the first one
                 dst_if_name = None
                 src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
                 if src_vnfi is not None:
                     sap_net = self.saps[dst_sap_id]['net']
                 dst_if_name = None
                 src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
                 if src_vnfi is not None:
                     sap_net = self.saps[dst_sap_id]['net']
-                    sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
-                    self._vnf_reconfigure_network(src_vnfi, src_if_name, sap_ip)
+                    sap_ip = "{0}/{1}".format(str(sap_net[2]),
+                                              sap_net.prefixlen)
+                    self._vnf_reconfigure_network(
+                        src_vnfi, src_if_name, sap_ip)
                     setChaining = True
 
             # Link between 2 VNFs
                     setChaining = True
 
             # Link between 2 VNFs
@@ -635,20 +680,23 @@ class Service(object):
                     src_id = src_sap_id
                 if dst_sap_id in self.saps_int:
                     dst_id = dst_sap_id
                     src_id = src_sap_id
                 if dst_sap_id in self.saps_int:
                     dst_id = dst_sap_id
-                # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
+                # re-configure the VNFs IP assignment and ensure that a new
+                # subnet is used for each E-Link
                 src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
                 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
                 if src_vnfi is not None and dst_vnfi is not None:
                     eline_net = ELINE_SUBNETS.pop(0)
                 src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
                 dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
                 if src_vnfi is not None and dst_vnfi is not None:
                     eline_net = ELINE_SUBNETS.pop(0)
-                    ip1 = "{0}/{1}".format(str(eline_net[1]), eline_net.prefixlen)
-                    ip2 = "{0}/{1}".format(str(eline_net[2]), eline_net.prefixlen)
+                    ip1 = "{0}/{1}".format(str(eline_net[1]),
+                                           eline_net.prefixlen)
+                    ip2 = "{0}/{1}".format(str(eline_net[2]),
+                                           eline_net.prefixlen)
                     self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
                     self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
                     setChaining = True
 
             # Set the chaining
             if setChaining:
                     self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
                     self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
                     setChaining = True
 
             # Set the chaining
             if setChaining:
-                ret = GK.net.setChain(
+                GK.net.setChain(
                     src_id, dst_id,
                     vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
                     bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
                     src_id, dst_id,
                     vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
                     bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
@@ -656,7 +704,6 @@ class Service(object):
                     "Setting up E-Line link. (%s:%s) -> (%s:%s)" % (
                         src_id, src_if_name, dst_id, dst_if_name))
 
                     "Setting up E-Line link. (%s:%s) -> (%s:%s)" % (
                         src_id, src_if_name, dst_id, dst_if_name))
 
-
     def _connect_elans(self, elan_fwd_links, instance_uuid):
         """
         Connect all E-LAN links in the NSD
     def _connect_elans(self, elan_fwd_links, instance_uuid):
         """
         Connect all E-LAN links in the NSD
@@ -667,17 +714,18 @@ class Service(object):
         for link in elan_fwd_links:
             # check if we need to deploy this link when its a management link:
             if USE_DOCKER_MGMT:
         for link in elan_fwd_links:
             # check if we need to deploy this link when its a management link:
             if USE_DOCKER_MGMT:
-                if self.check_mgmt_interface(link["connection_points_reference"]):
+                if self.check_mgmt_interface(
+                        link["connection_points_reference"]):
                     continue
 
             elan_vnf_list = []
                     continue
 
             elan_vnf_list = []
-            # check if an external SAP is in the E-LAN (then a subnet is already defined)
+            # check if an external SAP is in the E-LAN (then a subnet is
+            # already defined)
             intfs_elan = [intf for intf in link["connection_points_reference"]]
             lan_sap = self.check_ext_saps(intfs_elan)
             if lan_sap:
                 lan_net = self.saps[lan_sap]['net']
                 lan_hosts = list(lan_net.hosts())
             intfs_elan = [intf for intf in link["connection_points_reference"]]
             lan_sap = self.check_ext_saps(intfs_elan)
             if lan_sap:
                 lan_net = self.saps[lan_sap]['net']
                 lan_hosts = list(lan_net.hosts())
-                sap_ip = str(lan_hosts.pop(0))
             else:
                 lan_net = ELAN_SUBNETS.pop(0)
                 lan_hosts = list(lan_net.hosts())
             else:
                 lan_net = ELAN_SUBNETS.pop(0)
                 lan_hosts = list(lan_net.hosts())
@@ -686,12 +734,15 @@ class Service(object):
             for intf in link["connection_points_reference"]:
 
                 # skip external SAPs, they already have an ip
             for intf in link["connection_points_reference"]:
 
                 # skip external SAPs, they already have an ip
-                vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf)
+                vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
+                    intf)
                 if vnf_sap_docker_name in self.saps_ext:
                 if vnf_sap_docker_name in self.saps_ext:
-                    elan_vnf_list.append({'name': vnf_sap_docker_name, 'interface': vnf_interface})
+                    elan_vnf_list.append(
+                        {'name': vnf_sap_docker_name, 'interface': vnf_interface})
                     continue
 
                     continue
 
-                ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)), lan_net.prefixlen)
+                ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
+                                              lan_net.prefixlen)
                 vnf_id, intf_name, vnf_sap_id = parse_interface(intf)
 
                 # make sure we use the correct sap vnf name
                 vnf_id, intf_name, vnf_sap_id = parse_interface(intf)
 
                 # make sure we use the correct sap vnf name
@@ -711,12 +762,12 @@ class Service(object):
                 if vnfi is not None:
                     self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
                     # add this vnf and interface to the E-LAN for tagging
                 if vnfi is not None:
                     self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
                     # add this vnf and interface to the E-LAN for tagging
-                    elan_vnf_list.append({'name': src_docker_name, 'interface': intf_name})
+                    elan_vnf_list.append(
+                        {'name': src_docker_name, 'interface': intf_name})
 
             # install the VLAN tags for this E-LAN
             GK.net.setLAN(elan_vnf_list)
 
 
             # install the VLAN tags for this E-LAN
             GK.net.setLAN(elan_vnf_list)
 
-
     def _load_docker_files(self):
         """
         Get all paths to Dockerfiles from VNFDs and store them in dict.
     def _load_docker_files(self):
         """
         Get all paths to Dockerfiles from VNFDs and store them in dict.
@@ -737,7 +788,8 @@ class Service(object):
         Get all URLs to pre-build docker images in some repo.
         :return:
         """
         Get all URLs to pre-build docker images in some repo.
         :return:
         """
-        # also merge sap dicts, because internal saps also need a docker container
+        # also merge sap dicts, because internal saps also need a docker
+        # container
         all_vnfs = self.vnfds.copy()
         all_vnfs.update(self.saps)
 
         all_vnfs = self.vnfds.copy()
         all_vnfs.update(self.saps)
 
@@ -748,7 +800,8 @@ class Service(object):
                     if url is not None:
                         url = url.replace("http://", "")
                         self.remote_docker_image_urls[k] = url
                     if url is not None:
                         url = url.replace("http://", "")
                         self.remote_docker_image_urls[k] = url
-                        LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
+                        LOG.debug("Found Docker image URL (%r): %r" %
+                                  (k, self.remote_docker_image_urls[k]))
 
     def _build_images_from_dockerfiles(self):
         """
 
     def _build_images_from_dockerfiles(self):
         """
@@ -757,9 +810,11 @@ class Service(object):
         if GK_STANDALONE_MODE:
             return  # do not build anything in standalone mode
         dc = DockerClient()
         if GK_STANDALONE_MODE:
             return  # do not build anything in standalone mode
         dc = DockerClient()
-        LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
+        LOG.info("Building %d Docker images (this may take several minutes) ..." % len(
+            self.local_docker_files))
         for k, v in self.local_docker_files.iteritems():
         for k, v in self.local_docker_files.iteritems():
-            for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
+            for line in dc.build(path=v.replace(
+                    "Dockerfile", ""), tag=k, rm=False, nocache=False):
                 LOG.debug("DOCKER BUILD: %s" % line)
             LOG.info("Docker image created: %s" % k)
 
                 LOG.debug("DOCKER BUILD: %s" % line)
             LOG.info("Docker image created: %s" % k)
 
@@ -769,7 +824,8 @@ class Service(object):
         """
         dc = DockerClient()
         for url in self.remote_docker_image_urls.itervalues():
         """
         dc = DockerClient()
         for url in self.remote_docker_image_urls.itervalues():
-            if not FORCE_PULL:  # only pull if not present (speedup for development)
+            # only pull if not present (speedup for development)
+            if not FORCE_PULL:
                 if len(dc.images.list(name=url)) > 0:
                     LOG.debug("Image %r present. Skipping pull." % url)
                     continue
                 if len(dc.images.list(name=url)) > 0:
                     LOG.debug("Image %r present. Skipping pull." % url)
                     continue
@@ -777,16 +833,13 @@ class Service(object):
             # this seems to fail with latest docker api version 2.0.2
             # dc.images.pull(url,
             #        insecure_registry=True)
             # this seems to fail with latest docker api version 2.0.2
             # dc.images.pull(url,
             #        insecure_registry=True)
-            #using docker cli instead
+            # using docker cli instead
             cmd = ["docker",
                    "pull",
                    url,
                    ]
             Popen(cmd).wait()
 
             cmd = ["docker",
                    "pull",
                    url,
                    ]
             Popen(cmd).wait()
 
-
-
-
     def _check_docker_image_exists(self, image_name):
         """
         Query the docker service and check if the given image exists
     def _check_docker_image_exists(self, image_name):
         """
         Query the docker service and check if the given image exists
@@ -814,7 +867,6 @@ class Service(object):
             sap_dict = self.saps[sap]
             LOG.info("Placed SAP %r on DC %r" % (sap, str(sap_dict.get("dc"))))
 
             sap_dict = self.saps[sap]
             LOG.info("Placed SAP %r on DC %r" % (sap, str(sap_dict.get("dc"))))
 
-
     def _calculate_cpu_cfs_values(self, cpu_time_percentage):
         """
         Calculate cpu period and quota for CFS
     def _calculate_cpu_cfs_values(self, cpu_time_percentage):
         """
         Calculate cpu period and quota for CFS
@@ -828,30 +880,38 @@ class Service(object):
         # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
         # Attention minimum cpu_quota is 1ms (micro)
         cpu_period = 1000000  # lets consider a fixed period of 1000000 microseconds for now
         # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
         # Attention minimum cpu_quota is 1ms (micro)
         cpu_period = 1000000  # lets consider a fixed period of 1000000 microseconds for now
-        LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
-        cpu_quota = cpu_period * cpu_time_percentage  # calculate the fraction of cpu time for this container
-        # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
+        LOG.debug("cpu_period is %r, cpu_percentage is %r" %
+                  (cpu_period, cpu_time_percentage))
+        # calculate the fraction of cpu time for this container
+        cpu_quota = cpu_period * cpu_time_percentage
+        # ATTENTION >= 1000 to avoid a invalid argument system error ... no
+        # idea why
         if cpu_quota < 1000:
             LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
             cpu_quota = 1000
             LOG.warning("Increased CPU quota to avoid system error.")
         if cpu_quota < 1000:
             LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
             cpu_quota = 1000
             LOG.warning("Increased CPU quota to avoid system error.")
-        LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
+        LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
+                  (cpu_period, cpu_quota))
         return int(cpu_period), int(cpu_quota)
 
     def check_ext_saps(self, intf_list):
         # check if the list of interfacs contains an external SAP
         return int(cpu_period), int(cpu_quota)
 
     def check_ext_saps(self, intf_list):
         # check if the list of interfacs contains an external SAP
-        saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
+        saps_ext = [self.saps[sap]['name']
+                    for sap in self.saps if self.saps[sap]["type"] == "external"]
         for intf_name in intf_list:
         for intf_name in intf_list:
-            vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf_name)
+            vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
+                intf_name)
             if vnf_sap_docker_name in saps_ext:
                 return vnf_sap_docker_name
 
     def check_mgmt_interface(self, intf_list):
             if vnf_sap_docker_name in saps_ext:
                 return vnf_sap_docker_name
 
     def check_mgmt_interface(self, intf_list):
-        SAPs_mgmt = [p.get('id') for p in self.nsd["connection_points"] if 'management' in p.get('type')]
+        SAPs_mgmt = [p.get('id') for p in self.nsd["connection_points"]
+                     if 'management' in p.get('type')]
         for intf_name in intf_list:
             if intf_name in SAPs_mgmt:
                 return True
 
         for intf_name in intf_list:
             if intf_name in SAPs_mgmt:
                 return True
 
+
 """
 Some (simple) placement algorithms
 """
 """
 Some (simple) placement algorithms
 """
@@ -861,6 +921,7 @@ class FirstDcPlacement(object):
     """
     Placement: Always use one and the same data center from the GK.dcs dict.
     """
     """
     Placement: Always use one and the same data center from the GK.dcs dict.
     """
+
     def place(self, nsd, vnfds, saps, dcs):
         for id, vnfd in vnfds.iteritems():
             vnfd["dc"] = list(dcs.itervalues())[0]
     def place(self, nsd, vnfds, saps, dcs):
         for id, vnfd in vnfds.iteritems():
             vnfd["dc"] = list(dcs.itervalues())[0]
@@ -870,6 +931,7 @@ class RoundRobinDcPlacement(object):
     """
     Placement: Distribute VNFs across all available DCs in a round robin fashion.
     """
     """
     Placement: Distribute VNFs across all available DCs in a round robin fashion.
     """
+
     def place(self, nsd, vnfds, saps, dcs):
         c = 0
         dcs_list = list(dcs.itervalues())
     def place(self, nsd, vnfds, saps, dcs):
         c = 0
         dcs_list = list(dcs.itervalues())
@@ -877,11 +939,13 @@ class RoundRobinDcPlacement(object):
             vnfd["dc"] = dcs_list[c % len(dcs_list)]
             c += 1  # inc. c to use next DC
 
             vnfd["dc"] = dcs_list[c % len(dcs_list)]
             c += 1  # inc. c to use next DC
 
+
 class RoundRobinDcPlacementWithSAPs(object):
     """
     Placement: Distribute VNFs across all available DCs in a round robin fashion,
     every SAP is instantiated on the same DC as the connected VNF.
     """
 class RoundRobinDcPlacementWithSAPs(object):
     """
     Placement: Distribute VNFs across all available DCs in a round robin fashion,
     every SAP is instantiated on the same DC as the connected VNF.
     """
+
     def place(self, nsd, vnfds, saps, dcs):
 
         # place vnfs
     def place(self, nsd, vnfds, saps, dcs):
 
         # place vnfs
@@ -893,13 +957,18 @@ class RoundRobinDcPlacementWithSAPs(object):
 
         # place SAPs
         vlinks = nsd.get("virtual_links", [])
 
         # place SAPs
         vlinks = nsd.get("virtual_links", [])
-        eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
-        elan_fwd_links = [l for l in vlinks if  (l["connectivity_type"] == "E-LAN")]
+        eline_fwd_links = [l for l in vlinks if (
+            l["connectivity_type"] == "E-Line")]
+        elan_fwd_links = [l for l in vlinks if (
+            l["connectivity_type"] == "E-LAN")]
 
 
-        # SAPs on E-Line links are placed on the same DC as the VNF on the E-Line
+        # SAPs on E-Line links are placed on the same DC as the VNF on the
+        # E-Line
         for link in eline_fwd_links:
         for link in eline_fwd_links:
-            src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
-            dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
+            src_id, src_if_name, src_sap_id = parse_interface(
+                link["connection_points_reference"][0])
+            dst_id, dst_if_name, dst_sap_id = parse_interface(
+                link["connection_points_reference"][1])
 
             # check if there is a SAP in the link
             if src_sap_id in saps:
 
             # check if there is a SAP in the link
             if src_sap_id in saps:
@@ -920,11 +989,10 @@ class RoundRobinDcPlacementWithSAPs(object):
                 # find SAP interfaces
                 intf_id, intf_name, intf_sap_id = parse_interface(intf)
                 if intf_sap_id in saps:
                 # find SAP interfaces
                 intf_id, intf_name, intf_sap_id = parse_interface(intf)
                 if intf_sap_id in saps:
-                    dc = dcs_list[randint(0, dc_len-1)]
+                    dc = dcs_list[randint(0, dc_len - 1)]
                     saps[intf_sap_id]['dc'] = dc
 
 
                     saps[intf_sap_id]['dc'] = dc
 
 
-
 """
 Resource definitions and API endpoints
 """
 """
 Resource definitions and API endpoints
 """
@@ -950,7 +1018,8 @@ class Packages(fr.Resource):
             elif len(request.data) > 0:
                 son_file = request.data
             else:
             elif len(request.data) > 0:
                 son_file = request.data
             else:
-                return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
+                return {"service_uuid": None, "size": 0, "sha1": None,
+                        "error": "upload failed. file not found."}, 500
             # generate a uuid to reference this package
             service_uuid = str(uuid.uuid4())
             file_hash = hashlib.sha1(str(son_file)).hexdigest()
             # generate a uuid to reference this package
             service_uuid = str(uuid.uuid4())
             file_hash = hashlib.sha1(str(son_file)).hexdigest()
@@ -969,11 +1038,14 @@ class Packages(fr.Resource):
             if AUTO_DELETE:
                 service_list = copy.copy(GK.services)
                 for service_uuid in service_list:
             if AUTO_DELETE:
                 service_list = copy.copy(GK.services)
                 for service_uuid in service_list:
-                    instances_list = copy.copy(GK.services[service_uuid].instances)
+                    instances_list = copy.copy(
+                        GK.services[service_uuid].instances)
                     for instance_uuid in instances_list:
                         # valid service and instance UUID, stop service
                     for instance_uuid in instances_list:
                         # valid service and instance UUID, stop service
-                        GK.services.get(service_uuid).stop_service(instance_uuid)
-                        LOG.info("service instance with uuid %r stopped." % instance_uuid)
+                        GK.services.get(service_uuid).stop_service(
+                            instance_uuid)
+                        LOG.info("service instance with uuid %r stopped." %
+                                 instance_uuid)
 
             # create a service object and register it
             s = Service(service_uuid, file_hash, upload_path)
 
             # create a service object and register it
             s = Service(service_uuid, file_hash, upload_path)
@@ -983,13 +1055,15 @@ class Packages(fr.Resource):
             if AUTO_DEPLOY:
                 # ok, we have a service uuid, lets start the service
                 reset_subnets()
             if AUTO_DEPLOY:
                 # ok, we have a service uuid, lets start the service
                 reset_subnets()
-                service_instance_uuid = GK.services.get(service_uuid).start_service()
+                GK.services.get(service_uuid).start_service()
 
             # generate the JSON result
 
             # generate the JSON result
-            return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
-        except Exception as ex:
+            return {"service_uuid": service_uuid, "size": size,
+                    "sha1": file_hash, "error": None}, 201
+        except BaseException:
             LOG.exception("Service package upload failed:")
             LOG.exception("Service package upload failed:")
-            return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
+            return {"service_uuid": None, "size": 0,
+                    "sha1": None, "error": "upload failed"}, 500
 
     def get(self):
         """
 
     def get(self):
         """
@@ -1014,12 +1088,15 @@ class Instantiations(fr.Resource):
         service_uuid = json_data.get("service_uuid")
 
         # lets be a bit fuzzy here to make testing easier
         service_uuid = json_data.get("service_uuid")
 
         # lets be a bit fuzzy here to make testing easier
-        if (service_uuid is None or service_uuid=="latest") and len(GK.services) > 0:
-            # if we don't get a service uuid, we simple start the first service in the list
+        if (service_uuid is None or service_uuid ==
+                "latest") and len(GK.services) > 0:
+            # if we don't get a service uuid, we simple start the first service
+            # in the list
             service_uuid = list(GK.services.iterkeys())[0]
         if service_uuid in GK.services:
             # ok, we have a service uuid, lets start the service
             service_uuid = list(GK.services.iterkeys())[0]
         if service_uuid in GK.services:
             # ok, we have a service uuid, lets start the service
-            service_instance_uuid = GK.services.get(service_uuid).start_service()
+            service_instance_uuid = GK.services.get(
+                service_uuid).start_service()
             return {"service_instance_uuid": service_instance_uuid}, 201
         return "Service not found", 404
 
             return {"service_instance_uuid": service_instance_uuid}, 201
         return "Service not found", 404
 
@@ -1043,17 +1120,21 @@ class Instantiations(fr.Resource):
 
         # try to be fuzzy
         if service_uuid is None and len(GK.services) > 0:
 
         # try to be fuzzy
         if service_uuid is None and len(GK.services) > 0:
-            #if we don't get a service uuid, we simply stop the last service in the list
+            # if we don't get a service uuid, we simply stop the last service
+            # in the list
             service_uuid = list(GK.services.iterkeys())[0]
             service_uuid = list(GK.services.iterkeys())[0]
-        if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
-            instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
+        if instance_uuid is None and len(
+                GK.services[service_uuid].instances) > 0:
+            instance_uuid = list(
+                GK.services[service_uuid].instances.iterkeys())[0]
 
         if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
             # valid service and instance UUID, stop service
             GK.services.get(service_uuid).stop_service(instance_uuid)
 
         if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
             # valid service and instance UUID, stop service
             GK.services.get(service_uuid).stop_service(instance_uuid)
-            return "service instance with uuid %r stopped." % instance_uuid,200
+            return "service instance with uuid %r stopped." % instance_uuid, 200
         return "Service not found", 404
 
         return "Service not found", 404
 
+
 class Exit(fr.Resource):
 
     def put(self):
 class Exit(fr.Resource):
 
     def put(self):
@@ -1068,7 +1149,6 @@ def initialize_GK():
     GK = Gatekeeper()
 
 
     GK = Gatekeeper()
 
 
-
 # create a single, global GK object
 GK = None
 initialize_GK()
 # create a single, global GK object
 GK = None
 initialize_GK()
@@ -1078,11 +1158,11 @@ app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024  # 512 MB max upload
 api = fr.Api(app)
 # define endpoints
 api.add_resource(Packages, '/packages', '/api/v2/packages')
 api = fr.Api(app)
 # define endpoints
 api.add_resource(Packages, '/packages', '/api/v2/packages')
-api.add_resource(Instantiations, '/instantiations', '/api/v2/instantiations', '/api/v2/requests')
+api.add_resource(Instantiations, '/instantiations',
+                 '/api/v2/instantiations', '/api/v2/requests')
 api.add_resource(Exit, '/emulator/exit')
 
 
 api.add_resource(Exit, '/emulator/exit')
 
 
-
 def start_rest_api(host, port, datacenters=dict()):
     GK.dcs = datacenters
     GK.net = get_dc_network()
 def start_rest_api(host, port, datacenters=dict()):
     GK.dcs = datacenters
     GK.net = get_dc_network()
@@ -1104,7 +1184,7 @@ def load_yaml(path):
         try:
             r = yaml.load(f)
         except yaml.YAMLError as exc:
         try:
             r = yaml.load(f)
         except yaml.YAMLError as exc:
-            LOG.exception("YAML parse error")
+            LOG.exception("YAML parse error: %r" % str(exc))
             r = dict()
     return r
 
             r = dict()
     return r
 
@@ -1144,6 +1224,7 @@ def parse_interface(interface_name):
 
     return vnf_id, vnf_interface, vnf_sap_docker_name
 
 
     return vnf_id, vnf_interface, vnf_sap_docker_name
 
+
 def reset_subnets():
     # private subnet definitions for the generated interfaces
     # 10.10.xxx.0/24
 def reset_subnets():
     # private subnet definitions for the generated interfaces
     # 10.10.xxx.0/24
@@ -1156,6 +1237,7 @@ def reset_subnets():
     global ELINE_SUBNETS
     ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
 
     global ELINE_SUBNETS
     ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
 
+
 if __name__ == '__main__':
     """
     Lets allow to run the API in standalone mode.
 if __name__ == '__main__':
     """
     Lets allow to run the API in standalone mode.
@@ -1163,4 +1245,3 @@ if __name__ == '__main__':
     GK_STANDALONE_MODE = True
     logging.getLogger("werkzeug").setLevel(logging.INFO)
     start_rest_api("0.0.0.0", 8000)
     GK_STANDALONE_MODE = True
     logging.getLogger("werkzeug").setLevel(logging.INFO)
     start_rest_api("0.0.0.0", 8000)
-
index dba0b3c..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 58969d1..82fa5bd 100755 (executable)
@@ -1,38 +1,35 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import requests
 
 
 # set this to localhost for now
 # this is correct for son-emu started outside of a container or as a container with net=host
 import requests
 
 
 # set this to localhost for now
 # this is correct for son-emu started outside of a container or as a container with net=host
-#TODO if prometheus sdk DB is started outside of emulator, place these globals in an external SDK config file?
+# TODO if prometheus sdk DB is started outside of emulator, place these
+# globals in an external SDK config file?
 prometheus_ip = 'localhost'
 # when sdk is started with docker-compose, we could use
 # prometheus_ip = 'prometheus'
 prometheus_ip = 'localhost'
 # when sdk is started with docker-compose, we could use
 # prometheus_ip = 'prometheus'
@@ -49,8 +46,8 @@ def query_Prometheus(query):
         # logging.info('return:{0}'.format(ret))
         try:
             ret = ret['data']['result'][0]['value']
         # logging.info('return:{0}'.format(ret))
         try:
             ret = ret['data']['result'][0]['value']
-        except:
+        except BaseException:
             ret = None
     else:
         ret = None
             ret = None
     else:
         ret = None
-    return ret
\ No newline at end of file
+    return ret
index dba0b3c..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index fdfc11b..5abaa28 100755 (executable)
@@ -1,35 +1,32 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from requests import get, put, delete
 from tabulate import tabulate
 import pprint
 import argparse
 from requests import get, put, delete
 from tabulate import tabulate
 import pprint
 import argparse
-import json
 from subprocess import Popen
 
 pp = pprint.PrettyPrinter(indent=4)
 from subprocess import Popen
 
 pp = pprint.PrettyPrinter(indent=4)
@@ -70,7 +67,8 @@ class RestApiClient():
 
     def list(self, args):
 
 
     def list(self, args):
 
-        list = get('%s/restapi/compute/%s' % (args.get("endpoint"), args.get('datacenter'))).json()
+        list = get('%s/restapi/compute/%s' %
+                   (args.get("endpoint"), args.get('datacenter'))).json()
 
         table = []
         for c in list:
 
         table = []
         for c in list:
@@ -78,15 +76,17 @@ class RestApiClient():
             if len(c) > 1:
                 name = c[0]
                 status = c[1]
             if len(c) > 1:
                 name = c[0]
                 status = c[1]
-                #eth0ip = status.get("docker_network", "-")
-                netw_list = [netw_dict['intf_name'] for netw_dict in status.get("network")]
-                dc_if_list = [netw_dict['dc_portname'] for netw_dict in status.get("network")]
+                # eth0ip = status.get("docker_network", "-")
+                netw_list = [netw_dict['intf_name']
+                             for netw_dict in status.get("network")]
+                dc_if_list = [netw_dict['dc_portname']
+                              for netw_dict in status.get("network")]
                 table.append([status.get("datacenter"),
                               name,
                               status.get("image"),
                               ','.join(netw_list),
                               ','.join(dc_if_list)])
                 table.append([status.get("datacenter"),
                               name,
                               status.get("image"),
                               ','.join(netw_list),
                               ','.join(dc_if_list)])
-                #status.get("state").get("Status")]
+                # status.get("state").get("Status")]
 
         headers = ["Datacenter",
                    "Container",
 
         headers = ["Datacenter",
                    "Container",
@@ -110,8 +110,9 @@ class RestApiClient():
             Popen(['xterm', '-xrm', 'XTerm.vt100.allowTitleOps: false', '-T', vnf_name,
                    '-e', "docker exec -it mn.{0} /bin/bash".format(vnf_name)])
 
             Popen(['xterm', '-xrm', 'XTerm.vt100.allowTitleOps: false', '-T', vnf_name,
                    '-e', "docker exec -it mn.{0} /bin/bash".format(vnf_name)])
 
+
 parser = argparse.ArgumentParser(description="""son-emu-cli compute
 parser = argparse.ArgumentParser(description="""son-emu-cli compute
-    
+
     Examples:
     - son-emu-cli compute start -d dc2 -n client -i sonatanfv/sonata-iperf3-vnf
     - son-emu-cli list
     Examples:
     - son-emu-cli compute start -d dc2 -n client -i sonatanfv/sonata-iperf3-vnf
     - son-emu-cli list
index 651c55c..f4f92c3 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 from requests import get
 from tabulate import tabulate
 import pprint
 from requests import get
 from tabulate import tabulate
 import pprint
@@ -32,6 +30,7 @@ import argparse
 
 pp = pprint.PrettyPrinter(indent=4)
 
 
 pp = pprint.PrettyPrinter(indent=4)
 
+
 class RestApiClient():
 
     def __init__(self):
 class RestApiClient():
 
     def __init__(self):
@@ -44,40 +43,41 @@ class RestApiClient():
         else:
             print("Command not implemented.")
 
         else:
             print("Command not implemented.")
 
-    def list(self,args):
+    def list(self, args):
         list = get('%s/restapi/datacenter' % args.get('endpoint')).json()
         table = []
         for d in list:
             # for each dc add a line to the output table
             if len(d) > 0:
                 table.append([d.get("label"),
         list = get('%s/restapi/datacenter' % args.get('endpoint')).json()
         table = []
         for d in list:
             # for each dc add a line to the output table
             if len(d) > 0:
                 table.append([d.get("label"),
-                           d.get("internalname"),
-                           d.get("switch"),
-                           d.get("n_running_containers"),
-                           len(d.get("metadata"))])
+                              d.get("internalname"),
+                              d.get("switch"),
+                              d.get("n_running_containers"),
+                              len(d.get("metadata"))])
         headers = ["Label",
         headers = ["Label",
-               "Internal Name",
-               "Switch",
-               "# Containers",
-               "# Metadata Items"]
-        print (tabulate(table, headers=headers, tablefmt="grid"))
-
-    def status(self,args):
-        list = get('%s/restapi/datacenter/%s' % ( args.get("endpoint"), args.get("datacenter"))).json()
+                   "Internal Name",
+                   "Switch",
+                   "# Containers",
+                   "# Metadata Items"]
+        print(tabulate(table, headers=headers, tablefmt="grid"))
+
+    def status(self, args):
+        list = get('%s/restapi/datacenter/%s' %
+                   (args.get("endpoint"), args.get("datacenter"))).json()
         table = []
         table.append([list.get('label'),
         table = []
         table.append([list.get('label'),
-                  list.get('internalname'),
-                  list.get('switch'),
-                  list.get('n_running_containers'),
-                  len(list.get('metadata'))])
+                      list.get('internalname'),
+                      list.get('switch'),
+                      list.get('n_running_containers'),
+                      len(list.get('metadata'))])
 
         headers = ["Label",
 
         headers = ["Label",
-               "Internal Name",
-               "Switch",
-               "# Containers",
-               "# Metadata Items"]
+                   "Internal Name",
+                   "Switch",
+                   "# Containers",
+                   "# Metadata Items"]
 
 
-        print (tabulate(table, headers=headers, tablefmt="grid"))
+        print(tabulate(table, headers=headers, tablefmt="grid"))
 
 
 parser = argparse.ArgumentParser(description='son-emu-cli datacenter')
 
 
 parser = argparse.ArgumentParser(description='son-emu-cli datacenter')
@@ -98,4 +98,3 @@ def main(argv):
     args = vars(parser.parse_args(argv))
     c = RestApiClient()
     c.execute_command(args)
     args = vars(parser.parse_args(argv))
     c = RestApiClient()
     c.execute_command(args)
-
index 8be1e7f..3787887 100755 (executable)
@@ -1,38 +1,36 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-from requests import get, put, delete
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+from requests import get, put
 import pprint
 import argparse
 from emuvim.cli import prometheus
 
 pp = pprint.PrettyPrinter(indent=4)
 
 import pprint
 import argparse
 from emuvim.cli import prometheus
 
 pp = pprint.PrettyPrinter(indent=4)
 
+
 class RestApiClient():
 
     def __init__(self):
 class RestApiClient():
 
     def __init__(self):
@@ -49,8 +47,8 @@ class RestApiClient():
 
         params = self._create_dict(
             vnf_name=self._parse_vnf_name(args.get("vnf_name")),
 
         params = self._create_dict(
             vnf_name=self._parse_vnf_name(args.get("vnf_name")),
-            vnf_interface = self._parse_vnf_interface(args.get("vnf_name")),
-            metric = args.get("metric"))
+            vnf_interface=self._parse_vnf_interface(args.get("vnf_name")),
+            metric=args.get("metric"))
 
         url = "{0}/restapi/monitor/interface".format(args.get("endpoint"))
         response = put(url, params=params)
 
         url = "{0}/restapi/monitor/interface".format(args.get("endpoint"))
         response = put(url, params=params)
@@ -92,14 +90,12 @@ class RestApiClient():
         # This functions makes it more user-friendly to create the correct prometheus query
         # <uuid> is replaced by the correct uuid of the deployed vnf container
         vnf_name = self._parse_vnf_name(args.get("vnf_name"))
         # This functions makes it more user-friendly to create the correct prometheus query
         # <uuid> is replaced by the correct uuid of the deployed vnf container
         vnf_name = self._parse_vnf_name(args.get("vnf_name"))
-        vnf_interface = self._parse_vnf_interface(args.get("vnf_name"))
-        dc_label = args.get("datacenter")
         query = args.get("query")
 
         vnf_status = get("%s/restapi/compute/%s/%s" %
         query = args.get("query")
 
         vnf_status = get("%s/restapi/compute/%s/%s" %
-            (args.get("endpoint"),
-             args.get("datacenter"),
-             vnf_name)).json()
+                         (args.get("endpoint"),
+                          args.get("datacenter"),
+                             vnf_name)).json()
         uuid = vnf_status['id']
         query = query.replace('<uuid>', uuid)
 
         uuid = vnf_status['id']
         query = query.replace('<uuid>', uuid)
 
@@ -113,7 +109,7 @@ class RestApiClient():
     def _parse_vnf_interface(self, vnf_name_str):
         try:
             vnf_interface = vnf_name_str.split(':')[1]
     def _parse_vnf_interface(self, vnf_name_str):
         try:
             vnf_interface = vnf_name_str.split(':')[1]
-        except:
+        except BaseException:
             vnf_interface = None
 
         return vnf_interface
             vnf_interface = None
 
         return vnf_interface
@@ -121,10 +117,12 @@ class RestApiClient():
     def _create_dict(self, **kwargs):
         return kwargs
 
     def _create_dict(self, **kwargs):
         return kwargs
 
+
 parser = argparse.ArgumentParser(description='son-emu-cli monitor')
 parser.add_argument(
     "command",
 parser = argparse.ArgumentParser(description='son-emu-cli monitor')
 parser.add_argument(
     "command",
-    choices=['setup_metric', 'stop_metric', 'setup_flow', 'stop_flow','prometheus'],
+    choices=['setup_metric', 'stop_metric',
+             'setup_flow', 'stop_flow', 'prometheus'],
     help="setup/stop a metric/flow to be monitored or query Prometheus")
 parser.add_argument(
     "--vnf_name", "-vnf", dest="vnf_name",
     help="setup/stop a metric/flow to be monitored or query Prometheus")
 parser.add_argument(
     "--vnf_name", "-vnf", dest="vnf_name",
@@ -146,7 +144,8 @@ parser.add_argument(
     default="http://127.0.0.1:5001",
     help="REST API endpoint of son-emu (default:http://127.0.0.1:5001)")
 
     default="http://127.0.0.1:5001",
     help="REST API endpoint of son-emu (default:http://127.0.0.1:5001)")
 
+
 def main(argv):
     args = vars(parser.parse_args(argv))
     c = RestApiClient()
 def main(argv):
     args = vars(parser.parse_args(argv))
     c = RestApiClient()
-    c.execute_command(args)
\ No newline at end of file
+    c.execute_command(args)
index 82fe99f..7566f44 100755 (executable)
@@ -1,31 +1,29 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from requests import get,put, delete
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+from requests import put, delete
 import argparse
 
 
 import argparse
 
 
@@ -44,9 +42,10 @@ class RestApiClient():
     def add(self, args):
         params = self._create_dict(
             vnf_src_name=self._parse_vnf_name(args.get("source")),
     def add(self, args):
         params = self._create_dict(
             vnf_src_name=self._parse_vnf_name(args.get("source")),
-            vnf_dst_name = self._parse_vnf_name(args.get("destination")),
+            vnf_dst_name=self._parse_vnf_name(args.get("destination")),
             vnf_src_interface=self._parse_vnf_interface(args.get("source")),
             vnf_src_interface=self._parse_vnf_interface(args.get("source")),
-            vnf_dst_interface=self._parse_vnf_interface(args.get("destination")),
+            vnf_dst_interface=self._parse_vnf_interface(
+                args.get("destination")),
             weight=args.get("weight"),
             match=args.get("match"),
             bidirectional=args.get("bidirectional"),
             weight=args.get("weight"),
             match=args.get("match"),
             bidirectional=args.get("bidirectional"),
@@ -59,10 +58,11 @@ class RestApiClient():
 
     def remove(self, args):
         params = self._create_dict(
 
     def remove(self, args):
         params = self._create_dict(
-            vnf_src_name = self._parse_vnf_name(args.get("source")),
-            vnf_dst_name = self._parse_vnf_name(args.get("destination")),
+            vnf_src_name=self._parse_vnf_name(args.get("source")),
+            vnf_dst_name=self._parse_vnf_name(args.get("destination")),
             vnf_src_interface=self._parse_vnf_interface(args.get("source")),
             vnf_src_interface=self._parse_vnf_interface(args.get("source")),
-            vnf_dst_interface=self._parse_vnf_interface(args.get("destination")),
+            vnf_dst_interface=self._parse_vnf_interface(
+                args.get("destination")),
             weight=args.get("weight"),
             match=args.get("match"),
             bidirectional=args.get("bidirectional"),
             weight=args.get("weight"),
             match=args.get("match"),
             bidirectional=args.get("bidirectional"),
@@ -80,7 +80,7 @@ class RestApiClient():
     def _parse_vnf_interface(self, vnf_name_str):
         try:
             vnf_interface = vnf_name_str.split(':')[1]
     def _parse_vnf_interface(self, vnf_name_str):
         try:
             vnf_interface = vnf_name_str.split(':')[1]
-        except:
+        except BaseException:
             vnf_interface = None
 
         return vnf_interface
             vnf_interface = None
 
         return vnf_interface
@@ -89,11 +89,13 @@ class RestApiClient():
         return kwargs
 
     def _nice_print(self, text):
         return kwargs
 
     def _nice_print(self, text):
-        # some modules seem to return unicode strings where newlines, other special characters are escaped
+        # some modules seem to return unicode strings where newlines, other
+        # special characters are escaped
         text = str(text).replace('\\n', '\n')
         text = str(text).replace('\\"', '"')
         return text
 
         text = str(text).replace('\\n', '\n')
         text = str(text).replace('\\"', '"')
         return text
 
+
 parser = argparse.ArgumentParser(description='son-emu-cli network')
 parser.add_argument(
     "command",
 parser = argparse.ArgumentParser(description='son-emu-cli network')
 parser.add_argument(
     "command",
@@ -128,6 +130,7 @@ parser.add_argument(
     default="http://127.0.0.1:5001",
     help="REST API endpoint of son-emu (default:http://127.0.0.1:5001)")
 
     default="http://127.0.0.1:5001",
     help="REST API endpoint of son-emu (default:http://127.0.0.1:5001)")
 
+
 def main(argv):
     args = vars(parser.parse_args(argv))
     c = RestApiClient()
 def main(argv):
     args = vars(parser.parse_args(argv))
     c = RestApiClient()
index 0223818..801c60b 100755 (executable)
@@ -1,48 +1,36 @@
 #!/usr/bin/python
 #!/usr/bin/python
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
- Simple CLI client to interact with a running emulator.
-
- The CLI offers different tools, e.g., compute, network, ...
- Each of these tools is implemented as an independent Python
- module.
-
- cli compute start dc1 my_name flavor_a
- cli network create dc1 11.0.0.0/24
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import sys
 from emuvim.cli.rest import compute as restcom
 from emuvim.cli.rest import datacenter as restdc
 from emuvim.cli.rest import monitor as restmon
 from emuvim.cli.rest import network as restnetw
 
 import sys
 from emuvim.cli.rest import compute as restcom
 from emuvim.cli.rest import datacenter as restdc
 from emuvim.cli.rest import monitor as restmon
 from emuvim.cli.rest import network as restnetw
 
+
 def help():
     print("Missing arguments.\n")
     print("Usage: son-emu-cli compute|datacenter|network|monitor <arguments>\n")
 def help():
     print("Missing arguments.\n")
     print("Usage: son-emu-cli compute|datacenter|network|monitor <arguments>\n")
@@ -53,6 +41,7 @@ def help():
     print("\tson-emu-cli monitor --help")
     exit(0)
 
     print("\tson-emu-cli monitor --help")
     exit(0)
 
+
 def main():
     if len(sys.argv) < 2:
         help()
 def main():
     if len(sys.argv) < 2:
         help()
index 45ad698..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2017 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
\ No newline at end of file
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 395c0ce..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 395c0ce..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 21985cb..6ed87cb 100755 (executable)
@@ -1,38 +1,34 @@
-"""\r
-Copyright (c) 2015 SONATA-NFV\r
-ALL RIGHTS RESERVED.\r
-\r
-Licensed under the Apache License, Version 2.0 (the "License");\r
-you may not use this file except in compliance with the License.\r
-You may obtain a copy of the License at\r
-\r
-    http://www.apache.org/licenses/LICENSE-2.0\r
-\r
-Unless required by applicable law or agreed to in writing, software\r
-distributed under the License is distributed on an "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
-See the License for the specific language governing permissions and\r
-limitations under the License.\r
-\r
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]\r
-nor the names of its contributors may be used to endorse or promote\r
-products derived from this software without specific prior written\r
-permission.\r
-\r
-This work has been performed in the framework of the SONATA project,\r
-funded by the European Commission under Grant number 671517 through\r
-the Horizon 2020 and 5G-PPP programmes. The authors would like to\r
-acknowledge the contributions of their colleagues of the SONATA\r
-partner consortium (www.sonata-nfv.eu).\r
-"""\r
-\r
+# Copyright (c) 2015 SONATA-NFV and Paderborn University\r
+# ALL RIGHTS RESERVED.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+#    http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+#\r
+# Neither the name of the SONATA-NFV, Paderborn University\r
+# nor the names of its contributors may be used to endorse or promote\r
+# products derived from this software without specific prior written\r
+# permission.\r
+#\r
+# This work has been performed in the framework of the SONATA project,\r
+# funded by the European Commission under Grant number 671517 through\r
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to\r
+# acknowledge the contributions of their colleagues of the SONATA\r
+# partner consortium (www.sonata-nfv.eu).\r
 import logging\r
 import logging\r
-import sys\r
-from mininet.node import  OVSSwitch\r
+from mininet.node import OVSSwitch\r
 import ast\r
 import time\r
 import ast\r
 import time\r
-from prometheus_client import start_http_server, Summary, Histogram, Gauge, Counter, REGISTRY, CollectorRegistry, \\r
-    pushadd_to_gateway, push_to_gateway, delete_from_gateway\r
+from prometheus_client import Gauge, CollectorRegistry, \\r
+    pushadd_to_gateway, delete_from_gateway\r
 import threading\r
 from subprocess import Popen\r
 import os\r
 import threading\r
 from subprocess import Popen\r
 import os\r
@@ -52,6 +48,7 @@ CADVISOR_PORT = 8081
 \r
 COOKIE_MASK = 0xffffffff\r
 \r
 \r
 COOKIE_MASK = 0xffffffff\r
 \r
+\r
 class DCNetworkMonitor():\r
     def __init__(self, net):\r
         self.net = net\r
 class DCNetworkMonitor():\r
     def __init__(self, net):\r
         self.net = net\r
@@ -71,8 +68,8 @@ class DCNetworkMonitor():
         self.prom_rx_byte_count = Gauge('sonemu_rx_count_bytes', 'Total number of bytes received',\r
                                         ['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)\r
 \r
         self.prom_rx_byte_count = Gauge('sonemu_rx_count_bytes', 'Total number of bytes received',\r
                                         ['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)\r
 \r
-        self.prom_metrics={'tx_packets':self.prom_tx_packet_count, 'rx_packets':self.prom_rx_packet_count,\r
-                           'tx_bytes':self.prom_tx_byte_count,'rx_bytes':self.prom_rx_byte_count}\r
+        self.prom_metrics = {'tx_packets': self.prom_tx_packet_count, 'rx_packets': self.prom_rx_packet_count,\r
+                             'tx_bytes': self.prom_tx_byte_count, 'rx_bytes': self.prom_rx_byte_count}\r
 \r
         # list of installed metrics to monitor\r
         # each entry can contain this data\r
 \r
         # list of installed metrics to monitor\r
         # each entry can contain this data\r
@@ -98,17 +95,20 @@ class DCNetworkMonitor():
         self.monitor_thread = threading.Thread(target=self.get_network_metrics)\r
         self.monitor_thread.start()\r
 \r
         self.monitor_thread = threading.Thread(target=self.get_network_metrics)\r
         self.monitor_thread.start()\r
 \r
-        self.monitor_flow_thread = threading.Thread(target=self.get_flow_metrics)\r
+        self.monitor_flow_thread = threading.Thread(\r
+            target=self.get_flow_metrics)\r
         self.monitor_flow_thread.start()\r
 \r
         # helper tools\r
         self.monitor_flow_thread.start()\r
 \r
         # helper tools\r
-        # cAdvisor, Prometheus pushgateway are started as external container, to gather monitoring metric in son-emu\r
+        # cAdvisor, Prometheus pushgateway are started as external container,\r
+        # to gather monitoring metric in son-emu\r
         self.pushgateway_process = self.start_PushGateway()\r
         self.cadvisor_process = self.start_cAdvisor()\r
 \r
         self.pushgateway_process = self.start_PushGateway()\r
         self.cadvisor_process = self.start_cAdvisor()\r
 \r
-\r
     # first set some parameters, before measurement can start\r
     # first set some parameters, before measurement can start\r
-    def setup_flow(self, vnf_name, vnf_interface=None, metric='tx_packets', cookie=0):\r
+\r
+    def setup_flow(self, vnf_name, vnf_interface=None,\r
+                   metric='tx_packets', cookie=0):\r
 \r
         flow_metric = {}\r
 \r
 \r
         flow_metric = {}\r
 \r
@@ -133,8 +133,10 @@ class DCNetworkMonitor():
                     break\r
 \r
         if not vnf_switch:\r
                     break\r
 \r
         if not vnf_switch:\r
-            logging.exception("vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface))\r
-            return "vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface)\r
+            logging.exception("vnf switch of {0}:{1} not found!".format(\r
+                vnf_name, vnf_interface))\r
+            return "vnf switch of {0}:{1} not found!".format(\r
+                vnf_name, vnf_interface)\r
 \r
         try:\r
             # default port direction to monitor\r
 \r
         try:\r
             # default port direction to monitor\r
@@ -144,7 +146,8 @@ class DCNetworkMonitor():
             next_node = self.net.getNodeByName(vnf_switch)\r
 \r
             if not isinstance(next_node, OVSSwitch):\r
             next_node = self.net.getNodeByName(vnf_switch)\r
 \r
             if not isinstance(next_node, OVSSwitch):\r
-                logging.info("vnf: {0} is not connected to switch".format(vnf_name))\r
+                logging.info(\r
+                    "vnf: {0} is not connected to switch".format(vnf_name))\r
                 return\r
 \r
             flow_metric['previous_measurement'] = 0\r
                 return\r
 \r
             flow_metric['previous_measurement'] = 0\r
@@ -158,8 +161,10 @@ class DCNetworkMonitor():
             self.flow_metrics.append(flow_metric)\r
             self.monitor_flow_lock.release()\r
 \r
             self.flow_metrics.append(flow_metric)\r
             self.monitor_flow_lock.release()\r
 \r
-            logging.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))\r
-            return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)\r
+            logging.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(\r
+                vnf_name, vnf_interface, metric, cookie))\r
+            return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(\r
+                vnf_name, vnf_interface, metric, cookie)\r
 \r
         except Exception as ex:\r
             logging.exception("setup_metric error.")\r
 \r
         except Exception as ex:\r
             logging.exception("setup_metric error.")\r
@@ -187,17 +192,21 @@ class DCNetworkMonitor():
                     labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
                     set(float('nan'))\r
 \r
                     labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
                     set(float('nan'))\r
 \r
-                delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
+                delete_from_gateway(\r
+                    self.pushgateway, job='sonemu-SDNcontroller')\r
 \r
                 self.monitor_flow_lock.release()\r
 \r
 \r
                 self.monitor_flow_lock.release()\r
 \r
-                logging.info('Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))\r
-                return 'Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)\r
-\r
-        return 'Error stopping monitoring flow: {0} on {1}:{2}'.format(metric, vnf_name, vnf_interface)\r
+                logging.info('Stopped monitoring flow {3}: {2} on {0}:{1}'.format(\r
+                    vnf_name, vnf_interface, metric, cookie))\r
+                return 'Stopped monitoring flow {3}: {2} on {0}:{1}'.format(\r
+                    vnf_name, vnf_interface, metric, cookie)\r
 \r
 \r
+        return 'Error stopping monitoring flow: {0} on {1}:{2}'.format(\r
+            metric, vnf_name, vnf_interface)\r
 \r
     # first set some parameters, before measurement can start\r
 \r
     # first set some parameters, before measurement can start\r
+\r
     def setup_metric(self, vnf_name, vnf_interface=None, metric='tx_packets'):\r
 \r
         network_metric = {}\r
     def setup_metric(self, vnf_name, vnf_interface=None, metric='tx_packets'):\r
 \r
         network_metric = {}\r
@@ -221,8 +230,10 @@ class DCNetworkMonitor():
                     break\r
 \r
         if 'mon_port' not in network_metric:\r
                     break\r
 \r
         if 'mon_port' not in network_metric:\r
-            logging.exception("vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface))\r
-            return "vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface)\r
+            logging.exception("vnf interface {0}:{1} not found!".format(\r
+                vnf_name, vnf_interface))\r
+            return "vnf interface {0}:{1} not found!".format(\r
+                vnf_name, vnf_interface)\r
 \r
         try:\r
             # default port direction to monitor\r
 \r
         try:\r
             # default port direction to monitor\r
@@ -242,13 +253,13 @@ class DCNetworkMonitor():
             next_node = self.net.getNodeByName(vnf_switch)\r
 \r
             if not isinstance(next_node, OVSSwitch):\r
             next_node = self.net.getNodeByName(vnf_switch)\r
 \r
             if not isinstance(next_node, OVSSwitch):\r
-                logging.info("vnf: {0} is not connected to switch".format(vnf_name))\r
+                logging.info(\r
+                    "vnf: {0} is not connected to switch".format(vnf_name))\r
                 return\r
 \r
             network_metric['previous_measurement'] = 0\r
             network_metric['previous_monitor_time'] = 0\r
 \r
                 return\r
 \r
             network_metric['previous_measurement'] = 0\r
             network_metric['previous_monitor_time'] = 0\r
 \r
-\r
             network_metric['switch_dpid'] = int(str(next_node.dpid), 16)\r
             network_metric['metric_key'] = metric\r
 \r
             network_metric['switch_dpid'] = int(str(next_node.dpid), 16)\r
             network_metric['metric_key'] = metric\r
 \r
@@ -256,9 +267,10 @@ class DCNetworkMonitor():
             self.network_metrics.append(network_metric)\r
             self.monitor_lock.release()\r
 \r
             self.network_metrics.append(network_metric)\r
             self.monitor_lock.release()\r
 \r
-\r
-            logging.info('Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))\r
-            return 'Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)\r
+            logging.info('Started monitoring: {2} on {0}:{1}'.format(\r
+                vnf_name, vnf_interface, metric))\r
+            return 'Started monitoring: {2} on {0}:{1}'.format(\r
+                vnf_name, vnf_interface, metric)\r
 \r
         except Exception as ex:\r
             logging.exception("setup_metric error.")\r
 \r
         except Exception as ex:\r
             logging.exception("setup_metric error.")\r
@@ -282,7 +294,7 @@ class DCNetworkMonitor():
                 self.network_metrics.remove(metric_dict)\r
 \r
                 # set values to NaN, prometheus api currently does not support removal of metrics\r
                 self.network_metrics.remove(metric_dict)\r
 \r
                 # set values to NaN, prometheus api currently does not support removal of metrics\r
-                #self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))\r
+                # self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))\r
                 self.prom_metrics[metric_dict['metric_key']]. \\r
                     labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=None). \\r
                     set(float('nan'))\r
                 self.prom_metrics[metric_dict['metric_key']]. \\r
                     labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=None). \\r
                     set(float('nan'))\r
@@ -291,21 +303,28 @@ class DCNetworkMonitor():
                 # 1 single monitor job for all metrics of the SDN controller\r
                 # we can only  remove from the pushgateway grouping keys(labels) which we have defined for the add_to_pushgateway\r
                 # we can not specify labels from the metrics to be removed\r
                 # 1 single monitor job for all metrics of the SDN controller\r
                 # we can only  remove from the pushgateway grouping keys(labels) which we have defined for the add_to_pushgateway\r
                 # we can not specify labels from the metrics to be removed\r
-                # if we need to remove the metrics seperatelty, we need to give them a separate grouping key, and probably a diffferent registry also\r
-                delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
+                # if we need to remove the metrics seperatelty, we need to give\r
+                # them a separate grouping key, and probably a diffferent\r
+                # registry also\r
+                delete_from_gateway(\r
+                    self.pushgateway, job='sonemu-SDNcontroller')\r
 \r
                 self.monitor_lock.release()\r
 \r
 \r
                 self.monitor_lock.release()\r
 \r
-                logging.info('Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))\r
-                return 'Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)\r
+                logging.info('Stopped monitoring: {2} on {0}:{1}'.format(\r
+                    vnf_name, vnf_interface, metric))\r
+                return 'Stopped monitoring: {2} on {0}:{1}'.format(\r
+                    vnf_name, vnf_interface, metric)\r
 \r
             # delete everything from this vnf\r
             elif metric_dict['vnf_name'] == vnf_name and vnf_interface is None and metric is None:\r
                 self.monitor_lock.acquire()\r
                 self.network_metrics.remove(metric_dict)\r
 \r
             # delete everything from this vnf\r
             elif metric_dict['vnf_name'] == vnf_name and vnf_interface is None and metric is None:\r
                 self.monitor_lock.acquire()\r
                 self.network_metrics.remove(metric_dict)\r
-                logging.info('remove metric from monitor: vnf_name:{0} vnf_interface:{1} mon_port:{2}'.format(metric_dict['vnf_name'], metric_dict['vnf_interface'], metric_dict['mon_port']))\r
+                logging.info('remove metric from monitor: vnf_name:{0} vnf_interface:{1} mon_port:{2}'.format(\r
+                    metric_dict['vnf_name'], metric_dict['vnf_interface'], metric_dict['mon_port']))\r
 \r
 \r
-                delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
+                delete_from_gateway(\r
+                    self.pushgateway, job='sonemu-SDNcontroller')\r
                 self.monitor_lock.release()\r
                 continue\r
 \r
                 self.monitor_lock.release()\r
                 continue\r
 \r
@@ -313,10 +332,12 @@ class DCNetworkMonitor():
             logging.info('Stopped monitoring vnf: {0}'.format(vnf_name))\r
             return 'Stopped monitoring: {0}'.format(vnf_name)\r
         else:\r
             logging.info('Stopped monitoring vnf: {0}'.format(vnf_name))\r
             return 'Stopped monitoring: {0}'.format(vnf_name)\r
         else:\r
-            return 'Error stopping monitoring metric: {0} on {1}:{2}'.format(metric, vnf_name, vnf_interface)\r
+            return 'Error stopping monitoring metric: {0} on {1}:{2}'.format(\r
+                metric, vnf_name, vnf_interface)\r
 \r
 \r
 # get all metrics defined in the list and export it to Prometheus\r
 \r
 \r
 # get all metrics defined in the list and export it to Prometheus\r
+\r
     def get_flow_metrics(self):\r
         while self.start_monitoring:\r
 \r
     def get_flow_metrics(self):\r
         while self.start_monitoring:\r
 \r
@@ -329,13 +350,13 @@ class DCNetworkMonitor():
                 data['cookie_mask'] = COOKIE_MASK\r
 \r
                 if 'tx' in flow_dict['metric_key']:\r
                 data['cookie_mask'] = COOKIE_MASK\r
 \r
                 if 'tx' in flow_dict['metric_key']:\r
-                    data['match'] = {'in_port':flow_dict['mon_port']}\r
+                    data['match'] = {'in_port': flow_dict['mon_port']}\r
                 elif 'rx' in flow_dict['metric_key']:\r
                     data['out_port'] = flow_dict['mon_port']\r
 \r
                 elif 'rx' in flow_dict['metric_key']:\r
                     data['out_port'] = flow_dict['mon_port']\r
 \r
-\r
                 # query Ryu\r
                 # query Ryu\r
-                ret = self.net.ryu_REST('stats/flow', dpid=flow_dict['switch_dpid'], data=data)\r
+                ret = self.net.ryu_REST(\r
+                    'stats/flow', dpid=flow_dict['switch_dpid'], data=data)\r
                 if isinstance(ret, dict):\r
                     flow_stat_dict = ret\r
                 elif isinstance(ret, basestring):\r
                 if isinstance(ret, dict):\r
                     flow_stat_dict = ret\r
                 elif isinstance(ret, basestring):\r
@@ -347,12 +368,13 @@ class DCNetworkMonitor():
 \r
                 self.set_flow_metric(flow_dict, flow_stat_dict)\r
 \r
 \r
                 self.set_flow_metric(flow_dict, flow_stat_dict)\r
 \r
-\r
             try:\r
                 if len(self.flow_metrics) > 0:\r
             try:\r
                 if len(self.flow_metrics) > 0:\r
-                    pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
-            except Exception, e:\r
-                logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))\r
+                    pushadd_to_gateway(\r
+                        self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
+            except Exception as e:\r
+                logging.warning(\r
+                    "Pushgateway not reachable: {0} {1}".format(Exception, e))\r
 \r
             self.monitor_flow_lock.release()\r
             time.sleep(1)\r
 \r
             self.monitor_flow_lock.release()\r
             time.sleep(1)\r
@@ -363,7 +385,8 @@ class DCNetworkMonitor():
             self.monitor_lock.acquire()\r
 \r
             # group metrics by dpid to optimize the rest api calls\r
             self.monitor_lock.acquire()\r
 \r
             # group metrics by dpid to optimize the rest api calls\r
-            dpid_list = [metric_dict['switch_dpid'] for metric_dict in self.network_metrics]\r
+            dpid_list = [metric_dict['switch_dpid']\r
+                         for metric_dict in self.network_metrics]\r
             dpid_set = set(dpid_list)\r
 \r
             for dpid in dpid_set:\r
             dpid_set = set(dpid_list)\r
 \r
             for dpid in dpid_set:\r
@@ -378,28 +401,30 @@ class DCNetworkMonitor():
                     port_stat_dict = None\r
 \r
                 metric_list = [metric_dict for metric_dict in self.network_metrics\r
                     port_stat_dict = None\r
 \r
                 metric_list = [metric_dict for metric_dict in self.network_metrics\r
-                               if int(metric_dict['switch_dpid'])==int(dpid)]\r
+                               if int(metric_dict['switch_dpid']) == int(dpid)]\r
 \r
                 for metric_dict in metric_list:\r
                     self.set_network_metric(metric_dict, port_stat_dict)\r
 \r
             try:\r
                 if len(self.network_metrics) > 0:\r
 \r
                 for metric_dict in metric_list:\r
                     self.set_network_metric(metric_dict, port_stat_dict)\r
 \r
             try:\r
                 if len(self.network_metrics) > 0:\r
-                    pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
-            except Exception, e:\r
-                logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))\r
+                    pushadd_to_gateway(\r
+                        self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
+            except Exception as e:\r
+                logging.warning(\r
+                    "Pushgateway not reachable: {0} {1}".format(Exception, e))\r
 \r
             self.monitor_lock.release()\r
             time.sleep(1)\r
 \r
 \r
             self.monitor_lock.release()\r
             time.sleep(1)\r
 \r
-    # add metric to the list to export to Prometheus, parse the Ryu port-stats reply\r
+    # add metric to the list to export to Prometheus, parse the Ryu port-stats\r
+    # reply\r
     def set_network_metric(self, metric_dict, port_stat_dict):\r
         # vnf tx is the datacenter switch rx and vice-versa\r
         metric_key = self.switch_tx_rx(metric_dict['metric_key'])\r
         switch_dpid = metric_dict['switch_dpid']\r
         vnf_name = metric_dict['vnf_name']\r
         vnf_interface = metric_dict['vnf_interface']\r
     def set_network_metric(self, metric_dict, port_stat_dict):\r
         # vnf tx is the datacenter switch rx and vice-versa\r
         metric_key = self.switch_tx_rx(metric_dict['metric_key'])\r
         switch_dpid = metric_dict['switch_dpid']\r
         vnf_name = metric_dict['vnf_name']\r
         vnf_interface = metric_dict['vnf_interface']\r
-        previous_measurement = metric_dict['previous_measurement']\r
         previous_monitor_time = metric_dict['previous_monitor_time']\r
         mon_port = metric_dict['mon_port']\r
         for port_stat in port_stat_dict[str(switch_dpid)]:\r
         previous_monitor_time = metric_dict['previous_monitor_time']\r
         mon_port = metric_dict['mon_port']\r
         for port_stat in port_stat_dict[str(switch_dpid)]:\r
@@ -407,7 +432,8 @@ class DCNetworkMonitor():
             if port_stat['port_no'] == 'LOCAL':\r
                 continue\r
             if int(port_stat['port_no']) == int(mon_port):\r
             if port_stat['port_no'] == 'LOCAL':\r
                 continue\r
             if int(port_stat['port_no']) == int(mon_port):\r
-                port_uptime = port_stat['duration_sec'] + port_stat['duration_nsec'] * 10 ** (-9)\r
+                port_uptime = port_stat['duration_sec'] + \\r
+                    port_stat['duration_nsec'] * 10 ** (-9)\r
                 this_measurement = int(port_stat[metric_key])\r
 \r
                 # set prometheus metric\r
                 this_measurement = int(port_stat[metric_key])\r
 \r
                 # set prometheus metric\r
@@ -418,27 +444,26 @@ class DCNetworkMonitor():
                 # also the rate is calculated here, but not used for now\r
                 # (rate can be easily queried from prometheus also)\r
                 if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:\r
                 # also the rate is calculated here, but not used for now\r
                 # (rate can be easily queried from prometheus also)\r
                 if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:\r
-                    metric_dict['previous_measurement'] = int(port_stat[metric_key])\r
+                    metric_dict['previous_measurement'] = int(\r
+                        port_stat[metric_key])\r
                     metric_dict['previous_monitor_time'] = port_uptime\r
                     # do first measurement\r
                     metric_dict['previous_monitor_time'] = port_uptime\r
                     # do first measurement\r
-                    #time.sleep(1)\r
-                    #self.monitor_lock.release()\r
+                    # time.sleep(1)\r
+                    # self.monitor_lock.release()\r
                     # rate cannot be calculated yet (need a first measurement)\r
                     # rate cannot be calculated yet (need a first measurement)\r
-                    metric_rate = None\r
-\r
-                else:\r
-                    time_delta = (port_uptime - metric_dict['previous_monitor_time'])\r
-                    #metric_rate = (this_measurement - metric_dict['previous_measurement']) / float(time_delta)\r
-\r
                 metric_dict['previous_measurement'] = this_measurement\r
                 metric_dict['previous_monitor_time'] = port_uptime\r
                 return\r
 \r
                 metric_dict['previous_measurement'] = this_measurement\r
                 metric_dict['previous_monitor_time'] = port_uptime\r
                 return\r
 \r
-        logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))\r
-        logging.exception('monport:{0}, dpid:{1}'.format(mon_port, switch_dpid))\r
-        logging.exception('monitored network_metrics:{0}'.format(self.network_metrics))\r
+        logging.exception('metric {0} not found on {1}:{2}'.format(\r
+            metric_key, vnf_name, vnf_interface))\r
+        logging.exception(\r
+            'monport:{0}, dpid:{1}'.format(mon_port, switch_dpid))\r
+        logging.exception(\r
+            'monitored network_metrics:{0}'.format(self.network_metrics))\r
         logging.exception('port dict:{0}'.format(port_stat_dict))\r
         logging.exception('port dict:{0}'.format(port_stat_dict))\r
-        return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)\r
+        return 'metric {0} not found on {1}:{2}'.format(\r
+            metric_key, vnf_name, vnf_interface)\r
 \r
     def set_flow_metric(self, metric_dict, flow_stat_dict):\r
         # vnf tx is the datacenter switch rx and vice-versa\r
 \r
     def set_flow_metric(self, metric_dict, flow_stat_dict):\r
         # vnf tx is the datacenter switch rx and vice-versa\r
@@ -446,8 +471,6 @@ class DCNetworkMonitor():
         switch_dpid = metric_dict['switch_dpid']\r
         vnf_name = metric_dict['vnf_name']\r
         vnf_interface = metric_dict['vnf_interface']\r
         switch_dpid = metric_dict['switch_dpid']\r
         vnf_name = metric_dict['vnf_name']\r
         vnf_interface = metric_dict['vnf_interface']\r
-        previous_measurement = metric_dict['previous_measurement']\r
-        previous_monitor_time = metric_dict['previous_monitor_time']\r
         cookie = metric_dict['cookie']\r
 \r
         counter = 0\r
         cookie = metric_dict['cookie']\r
 \r
         counter = 0\r
@@ -458,21 +481,24 @@ class DCNetworkMonitor():
                 counter += flow_stat['packet_count']\r
 \r
         # flow_uptime disabled for now (can give error)\r
                 counter += flow_stat['packet_count']\r
 \r
         # flow_uptime disabled for now (can give error)\r
-        #flow_stat = flow_stat_dict[str(switch_dpid)][0]\r
-        #flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)\r
+        # flow_stat = flow_stat_dict[str(switch_dpid)][0]\r
+        # flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)\r
 \r
         self.prom_metrics[metric_dict['metric_key']]. \\r
             labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
             set(counter)\r
 \r
     def start_Prometheus(self, port=9090):\r
 \r
         self.prom_metrics[metric_dict['metric_key']]. \\r
             labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
             set(counter)\r
 \r
     def start_Prometheus(self, port=9090):\r
-        # prometheus.yml configuration file is located in the same directory as this file\r
+        # prometheus.yml configuration file is located in the same directory as\r
+        # this file\r
         cmd = ["docker",\r
                "run",\r
                "--rm",\r
                "-p", "{0}:9090".format(port),\r
         cmd = ["docker",\r
                "run",\r
                "--rm",\r
                "-p", "{0}:9090".format(port),\r
-               "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(os.path.dirname(os.path.abspath(__file__))),\r
-               "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(os.path.dirname(os.path.abspath(__file__))),\r
+               "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(\r
+                   os.path.dirname(os.path.abspath(__file__))),\r
+               "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(\r
+                   os.path.dirname(os.path.abspath(__file__))),\r
                "--name", "prometheus",\r
                "prom/prometheus"\r
                ]\r
                "--name", "prometheus",\r
                "prom/prometheus"\r
                ]\r
@@ -502,12 +528,12 @@ class DCNetworkMonitor():
                "--volume=/var/lib/docker/:/var/lib/docker:ro",\r
                "--publish={0}:8080".format(port),\r
                "--name=cadvisor",\r
                "--volume=/var/lib/docker/:/var/lib/docker:ro",\r
                "--publish={0}:8080".format(port),\r
                "--name=cadvisor",\r
-               "--label",'com.containernet=""',\r
+               "--label", 'com.containernet=""',\r
                "--detach=true",\r
                "google/cadvisor:latest",\r
                "--detach=true",\r
                "google/cadvisor:latest",\r
-               #"--storage_duration=1m0s",\r
-               #"--allow_dynamic_housekeeping=true",\r
-               #"--housekeeping_interval=1s",\r
+               # "--storage_duration=1m0s",\r
+               # "--allow_dynamic_housekeeping=true",\r
+               # "--housekeeping_interval=1s",\r
                ]\r
         logging.info('Start cAdvisor container {0}'.format(cmd))\r
         return Popen(cmd)\r
                ]\r
         logging.info('Start cAdvisor container {0}'.format(cmd))\r
         return Popen(cmd)\r
@@ -518,7 +544,8 @@ class DCNetworkMonitor():
         self.monitor_thread.join()\r
         self.monitor_flow_thread.join()\r
 \r
         self.monitor_thread.join()\r
         self.monitor_flow_thread.join()\r
 \r
-        # these containers are used for monitoring but are started now outside of son-emu\r
+        # these containers are used for monitoring but are started now outside\r
+        # of son-emu\r
 \r
         if self.pushgateway_process is not None:\r
             logging.info('stopping pushgateway container')\r
 \r
         if self.pushgateway_process is not None:\r
             logging.info('stopping pushgateway container')\r
@@ -528,28 +555,28 @@ class DCNetworkMonitor():
             logging.info('stopping cadvisor container')\r
             self._stop_container('cadvisor')\r
 \r
             logging.info('stopping cadvisor container')\r
             self._stop_container('cadvisor')\r
 \r
-    def switch_tx_rx(self,metric=''):\r
+    def switch_tx_rx(self, metric=''):\r
         # when monitoring vnfs, the tx of the datacenter switch is actually the rx of the vnf\r
         # when monitoring vnfs, the tx of the datacenter switch is actually the rx of the vnf\r
-        # so we need to change the metric name to be consistent with the vnf rx or tx\r
+        # so we need to change the metric name to be consistent with the vnf rx\r
+        # or tx\r
         if 'tx' in metric:\r
         if 'tx' in metric:\r
-            metric = metric.replace('tx','rx')\r
+            metric = metric.replace('tx', 'rx')\r
         elif 'rx' in metric:\r
         elif 'rx' in metric:\r
-            metric = metric.replace('rx','tx')\r
+            metric = metric.replace('rx', 'tx')\r
 \r
         return metric\r
 \r
     def _stop_container(self, name):\r
 \r
 \r
         return metric\r
 \r
     def _stop_container(self, name):\r
 \r
-        #container = self.dockercli.containers.get(name)\r
-        #container.stop()\r
-        #container.remove(force=True)\r
+        # container = self.dockercli.containers.get(name)\r
+        # container.stop()\r
+        # container.remove(force=True)\r
 \r
         # the only robust way to stop these containers is via Popen, it seems\r
         time.sleep(1)\r
         cmd = ['docker', 'rm', '-f', name]\r
         Popen(cmd)\r
 \r
 \r
         # the only robust way to stop these containers is via Popen, it seems\r
         time.sleep(1)\r
         cmd = ['docker', 'rm', '-f', name]\r
         Popen(cmd)\r
 \r
-\r
     def update_skewmon(self, vnf_name, resource_name, action):\r
 \r
         ret = ''\r
     def update_skewmon(self, vnf_name, resource_name, action):\r
 \r
         ret = ''\r
@@ -558,11 +585,11 @@ class DCNetworkMonitor():
         configfile = open(config_file_path, 'a+')\r
         try:\r
             config = json.load(configfile)\r
         configfile = open(config_file_path, 'a+')\r
         try:\r
             config = json.load(configfile)\r
-        except:\r
-            #not a valid json file or empty\r
+        except BaseException:\r
+            # not a valid json file or empty\r
             config = {}\r
 \r
             config = {}\r
 \r
-        #initialize config file\r
+        # initialize config file\r
         if len(self.skewmon_metrics) == 0:\r
             config = {}\r
         json.dump(config, configfile)\r
         if len(self.skewmon_metrics) == 0:\r
             config = {}\r
         json.dump(config, configfile)\r
@@ -576,14 +603,16 @@ class DCNetworkMonitor():
         if action == 'start':\r
             # add a new vnf to monitor\r
             config[key] = dict(VNF_NAME=vnf_name,\r
         if action == 'start':\r
             # add a new vnf to monitor\r
             config[key] = dict(VNF_NAME=vnf_name,\r
-                                VNF_ID=vnf_id,\r
-                                VNF_METRIC=resource_name)\r
-            ret = 'adding to skewness monitor: {0} {1} '.format(vnf_name, resource_name)\r
+                               VNF_ID=vnf_id,\r
+                               VNF_METRIC=resource_name)\r
+            ret = 'adding to skewness monitor: {0} {1} '.format(\r
+                vnf_name, resource_name)\r
             logging.info(ret)\r
         elif action == 'stop':\r
             # remove vnf to monitor\r
             config.pop(key)\r
             logging.info(ret)\r
         elif action == 'stop':\r
             # remove vnf to monitor\r
             config.pop(key)\r
-            ret = 'removing from skewness monitor: {0} {1} '.format(vnf_name, resource_name)\r
+            ret = 'removing from skewness monitor: {0} {1} '.format(\r
+                vnf_name, resource_name)\r
             logging.info(ret)\r
 \r
         self.skewmon_metrics = config\r
             logging.info(ret)\r
 \r
         self.skewmon_metrics = config\r
@@ -604,8 +633,8 @@ class DCNetworkMonitor():
             # start container if not running\r
             ret += 'starting skewness monitor'\r
             logging.info('starting skewness monitor')\r
             # start container if not running\r
             ret += 'starting skewness monitor'\r
             logging.info('starting skewness monitor')\r
-            volumes = {'/sys/fs/cgroup':{'bind':'/sys/fs/cgroup', 'mode':'ro'},\r
-                       '/tmp/skewmon.cfg':{'bind':'/config.txt', 'mode':'ro'}}\r
+            volumes = {'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'},\r
+                       '/tmp/skewmon.cfg': {'bind': '/config.txt', 'mode': 'ro'}}\r
             self.dockercli.containers.run('skewmon',\r
                                           detach=True,\r
                                           volumes=volumes,\r
             self.dockercli.containers.run('skewmon',\r
                                           detach=True,\r
                                           volumes=volumes,\r
@@ -616,7 +645,8 @@ class DCNetworkMonitor():
             started = False\r
             wait_time = 0\r
             while not started:\r
             started = False\r
             wait_time = 0\r
             while not started:\r
-                list1 = self.dockercli.containers.list(filters={'status': 'running', 'name': 'prometheus'})\r
+                list1 = self.dockercli.containers.list(\r
+                    filters={'status': 'running', 'name': 'prometheus'})\r
                 if len(list1) >= 1:\r
                     time.sleep(1)\r
                     started = True\r
                 if len(list1) >= 1:\r
                     time.sleep(1)\r
                     started = True\r
@@ -634,7 +664,6 @@ class DCNetworkMonitor():
         :return:\r
         """\r
 \r
         :return:\r
         """\r
 \r
-\r
         if vnf_list is None:\r
             vnf_list = []\r
         if not isinstance(vnf_list, list):\r
         if vnf_list is None:\r
             vnf_list = []\r
         if not isinstance(vnf_list, list):\r
@@ -644,8 +673,8 @@ class DCNetworkMonitor():
 \r
         return self.start_xterm(vnf_list)\r
 \r
 \r
         return self.start_xterm(vnf_list)\r
 \r
-\r
     # start an xterm for the specfified vnfs\r
     # start an xterm for the specfified vnfs\r
+\r
     def start_xterm(self, vnf_names):\r
         # start xterm for all vnfs\r
         for vnf_name in vnf_names:\r
     def start_xterm(self, vnf_names):\r
         # start xterm for all vnfs\r
         for vnf_name in vnf_names:\r
@@ -660,13 +689,3 @@ class DCNetworkMonitor():
         if len(vnf_names) == 0:\r
             ret = 'vnf list is empty, no xterms started'\r
         return ret\r
         if len(vnf_names) == 0:\r
             ret = 'vnf list is empty, no xterms started'\r
         return ret\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
index ea9fd1c..005e272 100755 (executable)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import logging
 
 import site
 import logging
 
 import site
@@ -36,13 +34,13 @@ import os
 import json
 
 from mininet.net import Containernet
 import json
 
 from mininet.net import Containernet
-from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
+from mininet.node import OVSSwitch, OVSKernelSwitch, Docker, RemoteController
 from mininet.cli import CLI
 from mininet.link import TCLink
 from mininet.clean import cleanup
 import networkx as nx
 from emuvim.dcemulator.monitoring import DCNetworkMonitor
 from mininet.cli import CLI
 from mininet.link import TCLink
 from mininet.clean import cleanup
 import networkx as nx
 from emuvim.dcemulator.monitoring import DCNetworkMonitor
-from emuvim.dcemulator.node import Datacenter, EmulatorCompute, EmulatorExtSAP
+from emuvim.dcemulator.node import Datacenter, EmulatorCompute
 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
 
 LOG = logging.getLogger("dcemulator.net")
 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
 
 LOG = logging.getLogger("dcemulator.net")
@@ -56,6 +54,7 @@ DEFAULT_PRIORITY = 1000
 # default cookie number for new flow-rules
 DEFAULT_COOKIE = 10
 
 # default cookie number for new flow-rules
 DEFAULT_COOKIE = 10
 
+
 class DCNetwork(Containernet):
     """
     Wraps the original Mininet/Containernet class and provides
 class DCNetwork(Containernet):
     """
     Wraps the original Mininet/Containernet class and provides
@@ -65,7 +64,10 @@ class DCNetwork(Containernet):
     """
 
     def __init__(self, controller=RemoteController, monitor=False,
     """
 
     def __init__(self, controller=RemoteController, monitor=False,
-                 enable_learning=False, # learning switch behavior of the default ovs switches icw Ryu controller can be turned off/on, needed for E-LAN functionality
+                 enable_learning=False,
+                 # learning switch behavior of the default ovs switches icw Ryu
+                 # controller can be turned off/on, needed for E-LAN
+                 # functionality
                  dc_emulation_max_cpu=1.0,  # fraction of overall CPU time for emulation
                  dc_emulation_max_mem=512,  # emulation max mem in MB
                  **kwargs):
                  dc_emulation_max_cpu=1.0,  # fraction of overall CPU time for emulation
                  dc_emulation_max_mem=512,  # emulation max mem in MB
                  **kwargs):
@@ -78,13 +80,13 @@ class DCNetwork(Containernet):
         # members
         self.dcs = {}
         self.ryu_process = None
         # members
         self.dcs = {}
         self.ryu_process = None
-        #list of deployed nsds.E_Lines and E_LANs (uploaded from the dummy gatekeeper)
+        # list of deployed nsds.E_Lines and E_LANs (uploaded from the dummy
+        # gatekeeper)
         self.deployed_nsds = []
         self.deployed_elines = []
         self.deployed_elans = []
         self.installed_chains = []
 
         self.deployed_nsds = []
         self.deployed_elines = []
         self.deployed_elans = []
         self.installed_chains = []
 
-
         # always cleanup environment before we start the emulator
         self.killRyu()
         cleanup()
         # always cleanup environment before we start the emulator
         self.killRyu()
         cleanup()
@@ -95,7 +97,7 @@ class DCNetwork(Containernet):
 
         # default switch configuration
         enable_ryu_learning = False
 
         # default switch configuration
         enable_ryu_learning = False
-        if enable_learning :
+        if enable_learning:
             self.failMode = 'standalone'
             enable_ryu_learning = True
         else:
             self.failMode = 'standalone'
             enable_ryu_learning = True
         else:
@@ -138,7 +140,8 @@ class DCNetwork(Containernet):
         """
         if label in self.dcs:
             raise Exception("Data center label already exists: %s" % label)
         """
         if label in self.dcs:
             raise Exception("Data center label already exists: %s" % label)
-        dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
+        dc = Datacenter(label, metadata=metadata,
+                        resource_log_path=resource_log_path)
         dc.net = self  # set reference to network
         self.dcs[label] = dc
         dc.create()  # finally create the data center in our Mininet instance
         dc.net = self  # set reference to network
         self.dcs[label] = dc
         dc.create()  # finally create the data center in our Mininet instance
@@ -154,31 +157,32 @@ class DCNetwork(Containernet):
         assert node2 is not None
 
         # ensure type of node1
         assert node2 is not None
 
         # ensure type of node1
-        if isinstance( node1, basestring ):
+        if isinstance(node1, basestring):
             if node1 in self.dcs:
                 node1 = self.dcs[node1].switch
             if node1 in self.dcs:
                 node1 = self.dcs[node1].switch
-        if isinstance( node1, Datacenter ):
+        if isinstance(node1, Datacenter):
             node1 = node1.switch
         # ensure type of node2
             node1 = node1.switch
         # ensure type of node2
-        if isinstance( node2, basestring ):
+        if isinstance(node2, basestring):
             if node2 in self.dcs:
                 node2 = self.dcs[node2].switch
             if node2 in self.dcs:
                 node2 = self.dcs[node2].switch
-        if isinstance( node2, Datacenter ):
+        if isinstance(node2, Datacenter):
             node2 = node2.switch
         # try to give containers a default IP
             node2 = node2.switch
         # try to give containers a default IP
-        if isinstance( node1, Docker ):
+        if isinstance(node1, Docker):
             if "params1" not in params:
                 params["params1"] = {}
             if "ip" not in params["params1"]:
                 params["params1"]["ip"] = self.getNextIp()
             if "params1" not in params:
                 params["params1"] = {}
             if "ip" not in params["params1"]:
                 params["params1"]["ip"] = self.getNextIp()
-        if isinstance( node2, Docker ):
+        if isinstance(node2, Docker):
             if "params2" not in params:
                 params["params2"] = {}
             if "ip" not in params["params2"]:
                 params["params2"]["ip"] = self.getNextIp()
         # ensure that we allow TCLinks between data centers
         # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
             if "params2" not in params:
                 params["params2"] = {}
             if "ip" not in params["params2"]:
                 params["params2"]["ip"] = self.getNextIp()
         # ensure that we allow TCLinks between data centers
         # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
-        # see Containernet issue: https://github.com/mpeuster/containernet/issues/3
+        # see Containernet issue:
+        # https://github.com/mpeuster/containernet/issues/3
         if "cls" not in params:
             params["cls"] = TCLink
 
         if "cls" not in params:
             params["cls"] = TCLink
 
@@ -197,7 +201,6 @@ class DCNetwork(Containernet):
                 node2_port_id = params["params2"]["id"]
         node2_port_name = link.intf2.name
 
                 node2_port_id = params["params2"]["id"]
         node2_port_name = link.intf2.name
 
-
         # add edge and assigned port number to graph in both directions between node1 and node2
         # port_id: id given in descriptor (if available, otherwise same as port)
         # port: portnumber assigned by Containernet
         # add edge and assigned port number to graph in both directions between node1 and node2
         # port_id: id given in descriptor (if available, otherwise same as port)
         # port: portnumber assigned by Containernet
@@ -215,23 +218,24 @@ class DCNetwork(Containernet):
                 attr_number = None
             attr_dict[attr] = attr_number
 
                 attr_number = None
             attr_dict[attr] = attr_number
 
-
         attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],
                       'src_port_name': node1_port_name,
         attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],
                       'src_port_name': node1_port_name,
-                     'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
+                      'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
                       'dst_port_name': node2_port_name}
         attr_dict2.update(attr_dict)
                       'dst_port_name': node2_port_name}
         attr_dict2.update(attr_dict)
-        self.DCNetwork_graph.add_edge(node1.name, node2.name, attr_dict=attr_dict2)
+        self.DCNetwork_graph.add_edge(
+            node1.name, node2.name, attr_dict=attr_dict2)
 
         attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],
                       'src_port_name': node2_port_name,
 
         attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],
                       'src_port_name': node2_port_name,
-                     'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
+                      'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
                       'dst_port_name': node1_port_name}
         attr_dict2.update(attr_dict)
                       'dst_port_name': node1_port_name}
         attr_dict2.update(attr_dict)
-        self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)
+        self.DCNetwork_graph.add_edge(
+            node2.name, node1.name, attr_dict=attr_dict2)
 
         LOG.debug("addLink: n1={0} intf1={1} -- n2={2} intf2={3}".format(
 
         LOG.debug("addLink: n1={0} intf1={1} -- n2={2} intf2={3}".format(
-            str(node1),node1_port_name, str(node2), node2_port_name))
+            str(node1), node1_port_name, str(node2), node2_port_name))
 
         return link
 
 
         return link
 
@@ -248,21 +252,24 @@ class DCNetwork(Containernet):
         # TODO we might decrease the loglevel to debug:
         try:
             self.DCNetwork_graph.remove_edge(node2.name, node1.name)
         # TODO we might decrease the loglevel to debug:
         try:
             self.DCNetwork_graph.remove_edge(node2.name, node1.name)
-        except:
-            LOG.warning("%s, %s not found in DCNetwork_graph." % ((node2.name, node1.name)))
+        except BaseException:
+            LOG.warning("%s, %s not found in DCNetwork_graph." %
+                        ((node2.name, node1.name)))
         try:
             self.DCNetwork_graph.remove_edge(node1.name, node2.name)
         try:
             self.DCNetwork_graph.remove_edge(node1.name, node2.name)
-        except:
-            LOG.warning("%s, %s not found in DCNetwork_graph." % ((node1.name, node2.name)))
+        except BaseException:
+            LOG.warning("%s, %s not found in DCNetwork_graph." %
+                        ((node1.name, node2.name)))
 
 
-    def addDocker( self, label, **params ):
+    def addDocker(self, label, **params):
         """
         Wrapper for addDocker method to use custom container class.
         """
         self.DCNetwork_graph.add_node(label, type=params.get('type', 'docker'))
         """
         Wrapper for addDocker method to use custom container class.
         """
         self.DCNetwork_graph.add_node(label, type=params.get('type', 'docker'))
-        return Containernet.addDocker(self, label, cls=EmulatorCompute, **params)
+        return Containernet.addDocker(
+            self, label, cls=EmulatorCompute, **params)
 
 
-    def removeDocker( self, label, **params):
+    def removeDocker(self, label, **params):
         """
         Wrapper for removeDocker method to update graph.
         """
         """
         Wrapper for removeDocker method to update graph.
         """
@@ -274,7 +281,7 @@ class DCNetwork(Containernet):
         Wrapper for addExtSAP method to store SAP  also in graph.
         """
         # make sure that 'type' is set
         Wrapper for addExtSAP method to store SAP  also in graph.
         """
         # make sure that 'type' is set
-        params['type'] = params.get('type','sap_ext')
+        params['type'] = params.get('type', 'sap_ext')
         self.DCNetwork_graph.add_node(sap_name, type=params['type'])
         return Containernet.addExtSAP(self, sap_name, sap_ip, **params)
 
         self.DCNetwork_graph.add_node(sap_name, type=params['type'])
         return Containernet.addExtSAP(self, sap_name, sap_ip, **params)
 
@@ -285,22 +292,24 @@ class DCNetwork(Containernet):
         self.DCNetwork_graph.remove_node(sap_name)
         return Containernet.removeExtSAP(self, sap_name)
 
         self.DCNetwork_graph.remove_node(sap_name)
         return Containernet.removeExtSAP(self, sap_name)
 
-    def addSwitch( self, name, add_to_graph=True, **params ):
+    def addSwitch(self, name, add_to_graph=True, **params):
         """
         Wrapper for addSwitch method to store switch also in graph.
         """
 
         # add this switch to the global topology overview
         if add_to_graph:
         """
         Wrapper for addSwitch method to store switch also in graph.
         """
 
         # add this switch to the global topology overview
         if add_to_graph:
-            self.DCNetwork_graph.add_node(name, type=params.get('type','switch'))
+            self.DCNetwork_graph.add_node(
+                name, type=params.get('type', 'switch'))
 
         # set the learning switch behavior
 
         # set the learning switch behavior
-        if 'failMode' in params :
+        if 'failMode' in params:
             failMode = params['failMode']
             failMode = params['failMode']
-        else :
+        else:
             failMode = self.failMode
 
             failMode = self.failMode
 
-        s = Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)
+        s = Containernet.addSwitch(
+            self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)
 
         return s
 
 
         return s
 
@@ -331,7 +340,6 @@ class DCNetwork(Containernet):
         # stop Ryu controller
         self.killRyu()
 
         # stop Ryu controller
         self.killRyu()
 
-
     def CLI(self):
         CLI(self)
 
     def CLI(self):
         CLI(self)
 
@@ -343,7 +351,6 @@ class DCNetwork(Containernet):
         :return:
         """
         src_sw = None
         :return:
         """
         src_sw = None
-        src_sw_inport_nr = 0
         src_sw_inport_name = None
 
         # get a vlan tag for this E-LAN
         src_sw_inport_name = None
 
         # get a vlan tag for this E-LAN
@@ -364,20 +371,20 @@ class DCNetwork(Containernet):
                 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
                 for link in link_dict:
                     if (link_dict[link]['src_port_id'] == vnf_src_interface or
                 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
                 for link in link_dict:
                     if (link_dict[link]['src_port_id'] == vnf_src_interface or
-                                link_dict[link]['src_port_name'] == vnf_src_interface):  # Fix: we might also get interface names, e.g, from a son-emu-cli call
+                            link_dict[link]['src_port_name'] == vnf_src_interface):  # Fix: we might also get interface names, e.g, from a son-emu-cli call
                         # found the right link and connected switch
                         src_sw = connected_sw
                         # found the right link and connected switch
                         src_sw = connected_sw
-                        src_sw_inport_nr = link_dict[link]['dst_port_nr']
                         src_sw_inport_name = link_dict[link]['dst_port_name']
                         break
 
             # set the tag on the dc switch interface
                         src_sw_inport_name = link_dict[link]['dst_port_name']
                         break
 
             # set the tag on the dc switch interface
-            LOG.debug('set E-LAN: vnf name: {0} interface: {1} tag: {2}'.format(vnf_src_name, vnf_src_interface,vlan))
+            LOG.debug('set E-LAN: vnf name: {0} interface: {1} tag: {2}'.format(
+                vnf_src_name, vnf_src_interface, vlan))
             switch_node = self.getNodeByName(src_sw)
             self._set_vlan_tag(switch_node, src_sw_inport_name, vlan)
 
     def _addMonitorFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None,
             switch_node = self.getNodeByName(src_sw)
             self._set_vlan_tag(switch_node, src_sw_inport_name, vlan)
 
     def _addMonitorFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None,
-                       tag=None, **kwargs):
+                        tag=None, **kwargs):
         """
         Add a monitoring flow entry that adds a special flowentry/counter at the begin or end of a chain.
         So this monitoring flowrule exists on top of a previously defined chain rule and uses the same vlan tag/routing.
         """
         Add a monitoring flow entry that adds a special flowentry/counter at the begin or end of a chain.
         So this monitoring flowrule exists on top of a previously defined chain rule and uses the same vlan tag/routing.
@@ -400,7 +407,7 @@ class DCNetwork(Containernet):
         LOG.debug("call AddMonitorFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
                   vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
 
         LOG.debug("call AddMonitorFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
                   vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
 
-        #check if port is specified (vnf:port)
+        # check if port is specified (vnf:port)
         if vnf_src_interface is None:
             # take first interface by default
             connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
         if vnf_src_interface is None:
             # take first interface by default
             connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
@@ -443,31 +450,34 @@ class DCNetwork(Containernet):
         try:
             # returns the first found shortest path
             # if all shortest paths are wanted, use: all_shortest_paths
         try:
             # returns the first found shortest path
             # if all shortest paths are wanted, use: all_shortest_paths
-            path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
-        except:
+            path = nx.shortest_path(
+                self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
+        except BaseException:
             LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
                 vnf_src_name, vnf_dst_name, src_sw, dst_sw))
             LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
             LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
             for e, v in self.DCNetwork_graph.edges():
                 LOG.debug("%r" % self.DCNetwork_graph[e][v])
             LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
                 vnf_src_name, vnf_dst_name, src_sw, dst_sw))
             LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
             LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
             for e, v in self.DCNetwork_graph.edges():
                 LOG.debug("%r" % self.DCNetwork_graph[e][v])
-            return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+            return "No path could be found between {0} and {1}".format(
+                vnf_src_name, vnf_dst_name)
 
 
-        LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
+        LOG.info("Path between {0} and {1}: {2}".format(
+            vnf_src_name, vnf_dst_name, path))
 
         current_hop = src_sw
         switch_inport_nr = src_sw_inport_nr
 
         cmd = kwargs.get('cmd')
 
 
         current_hop = src_sw
         switch_inport_nr = src_sw_inport_nr
 
         cmd = kwargs.get('cmd')
 
-        #iterate through the path to install the flow-entries
-        for i in range(0,len(path)):
+        # iterate through the path to install the flow-entries
+        for i in range(0, len(path)):
             current_node = self.getNodeByName(current_hop)
 
             current_node = self.getNodeByName(current_hop)
 
-            if path.index(current_hop) < len(path)-1:
-                next_hop = path[path.index(current_hop)+1]
+            if path.index(current_hop) < len(path) - 1:
+                next_hop = path[path.index(current_hop) + 1]
             else:
             else:
-                #last switch reached
+                # last switch reached
                 next_hop = vnf_dst_name
 
             next_node = self.getNodeByName(next_hop)
                 next_hop = vnf_dst_name
 
             next_node = self.getNodeByName(next_hop)
@@ -475,7 +485,7 @@ class DCNetwork(Containernet):
             if next_hop == vnf_dst_name:
                 switch_outport_nr = dst_sw_outport_nr
                 LOG.info("end node reached: {0}".format(vnf_dst_name))
             if next_hop == vnf_dst_name:
                 switch_outport_nr = dst_sw_outport_nr
                 LOG.info("end node reached: {0}".format(vnf_dst_name))
-            elif not isinstance( next_node, OVSSwitch ):
+            elif not isinstance(next_node, OVSSwitch):
                 LOG.info("Next node: {0} is not a switch".format(next_hop))
                 return "Next node: {0} is not a switch".format(next_hop)
             else:
                 LOG.info("Next node: {0} is not a switch".format(next_hop))
                 return "Next node: {0} is not a switch".format(next_hop)
             else:
@@ -483,9 +493,8 @@ class DCNetwork(Containernet):
                 index_edge_out = 0
                 switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
 
                 index_edge_out = 0
                 switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
 
-
-           # set of entry via ovs-ofctl
-            if isinstance( current_node, OVSSwitch ):
+            # set of entry via ovs-ofctl
+            if isinstance(current_node, OVSSwitch):
                 kwargs['vlan'] = tag
                 kwargs['path'] = path
                 kwargs['current_hop'] = current_hop
                 kwargs['vlan'] = tag
                 kwargs['path'] = path
                 kwargs['current_hop'] = current_hop
@@ -497,33 +506,38 @@ class DCNetwork(Containernet):
                 monitor_placement = kwargs.get('monitor_placement').strip()
                 # put monitor flow at the dst switch
                 insert_flow = False
                 monitor_placement = kwargs.get('monitor_placement').strip()
                 # put monitor flow at the dst switch
                 insert_flow = False
-                if monitor_placement == 'tx' and path.index(current_hop) == 0:  # first node:
+                # first node:
+                if monitor_placement == 'tx' and path.index(current_hop) == 0:
                     insert_flow = True
                 # put monitoring flow at the src switch
                     insert_flow = True
                 # put monitoring flow at the src switch
-                elif monitor_placement == 'rx' and path.index(current_hop) == len(path) - 1:  # last node:
+                # last node:
+                elif monitor_placement == 'rx' and path.index(current_hop) == len(path) - 1:
                     insert_flow = True
                 elif monitor_placement not in ['rx', 'tx']:
                     insert_flow = True
                 elif monitor_placement not in ['rx', 'tx']:
-                    LOG.exception('invalid monitor command: {0}'.format(monitor_placement))
-
+                    LOG.exception(
+                        'invalid monitor command: {0}'.format(monitor_placement))
 
                 if self.controller == RemoteController and insert_flow:
 
                 if self.controller == RemoteController and insert_flow:
-                    ## set flow entry via ryu rest api
-                    self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+                    # set flow entry via ryu rest api
+                    self._set_flow_entry_ryu_rest(
+                        current_node, switch_inport_nr, switch_outport_nr, **kwargs)
                     break
                 elif insert_flow:
                     break
                 elif insert_flow:
-                    ## set flow entry via ovs-ofctl
-                    self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+                    # set flow entry via ovs-ofctl
+                    self._set_flow_entry_dpctl(
+                        current_node, switch_inport_nr, switch_outport_nr, **kwargs)
                     break
 
             # take first link between switches by default
                     break
 
             # take first link between switches by default
-            if isinstance( next_node, OVSSwitch ):
+            if isinstance(next_node, OVSSwitch):
                 switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
                 current_hop = next_hop
 
                 switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
                 current_hop = next_hop
 
-        return "path {2} between {0} and {1}".format(vnf_src_name, vnf_dst_name, cmd)
-
+        return "path {2} between {0} and {1}".format(
+            vnf_src_name, vnf_dst_name, cmd)
 
 
-    def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
+    def setChain(self, vnf_src_name, vnf_dst_name,
+                 vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
         """
         Chain 2 vnf interfaces together by installing the flowrules in the switches along their path.
         Currently the path is found using the default networkx shortest path function.
         """
         Chain 2 vnf interfaces together by installing the flowrules in the switches along their path.
         Currently the path is found using the default networkx shortest path function.
@@ -549,17 +563,19 @@ class DCNetwork(Containernet):
 
             # check if chain already exists
             found_chains = [chain_dict for chain_dict in self.installed_chains if
 
             # check if chain already exists
             found_chains = [chain_dict for chain_dict in self.installed_chains if
-             (chain_dict['vnf_src_name'] == vnf_src_name and chain_dict['vnf_src_interface'] == vnf_src_interface
-             and chain_dict['vnf_dst_name'] == vnf_dst_name and chain_dict['vnf_dst_interface'] == vnf_dst_interface)]
+                            (chain_dict['vnf_src_name'] == vnf_src_name and
+                             chain_dict['vnf_src_interface'] == vnf_src_interface and
+                             chain_dict['vnf_dst_name'] == vnf_dst_name and
+                             chain_dict['vnf_dst_interface'] == vnf_dst_interface)]
 
             if len(found_chains) > 0:
                 # this chain exists, so need an extra monitoring flow
                 # assume only 1 chain per vnf/interface pair
                 LOG.debug('*** installing monitoring chain on top of pre-defined chain from {0}:{1} -> {2}:{3}'.
 
             if len(found_chains) > 0:
                 # this chain exists, so need an extra monitoring flow
                 # assume only 1 chain per vnf/interface pair
                 LOG.debug('*** installing monitoring chain on top of pre-defined chain from {0}:{1} -> {2}:{3}'.
-                            format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
+                          format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
                 tag = found_chains[0]['tag']
                 ret = self._addMonitorFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface,
                 tag = found_chains[0]['tag']
                 ret = self._addMonitorFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface,
-                                     tag=tag, table_id=0, **kwargs)
+                                           tag=tag, table_id=0, **kwargs)
                 return ret
             else:
                 # no chain existing (or E-LAN) -> install normal chain
                 return ret
             else:
                 # no chain existing (or E-LAN) -> install normal chain
@@ -567,22 +583,24 @@ class DCNetwork(Containernet):
                             format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
                 pass
 
                             format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
                 pass
 
-
         cmd = kwargs.get('cmd', 'add-flow')
         if cmd == 'add-flow' or cmd == 'del-flows':
         cmd = kwargs.get('cmd', 'add-flow')
         if cmd == 'add-flow' or cmd == 'del-flows':
-            ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
+            ret = self._chainAddFlow(
+                vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
             if kwargs.get('bidirectional'):
                 if kwargs.get('path') is not None:
                     kwargs['path'] = list(reversed(kwargs.get('path')))
             if kwargs.get('bidirectional'):
                 if kwargs.get('path') is not None:
                     kwargs['path'] = list(reversed(kwargs.get('path')))
-                ret = ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
+                ret = ret + '\n' + \
+                    self._chainAddFlow(
+                        vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
 
         else:
             ret = "Command unknown"
 
         return ret
 
 
         else:
             ret = "Command unknown"
 
         return ret
 
-
-    def _chainAddFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
+    def _chainAddFlow(self, vnf_src_name, vnf_dst_name,
+                      vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
 
         src_sw = None
         src_sw_inport_nr = 0
 
         src_sw = None
         src_sw_inport_nr = 0
@@ -594,7 +612,7 @@ class DCNetwork(Containernet):
         LOG.debug("call chainAddFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
                   vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
 
         LOG.debug("call chainAddFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
                   vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
 
-        #check if port is specified (vnf:port)
+        # check if port is specified (vnf:port)
         if vnf_src_interface is None:
             # take first interface by default
             connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
         if vnf_src_interface is None:
             # take first interface by default
             connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
@@ -636,17 +654,20 @@ class DCNetwork(Containernet):
             try:
                 # returns the first found shortest path
                 # if all shortest paths are wanted, use: all_shortest_paths
             try:
                 # returns the first found shortest path
                 # if all shortest paths are wanted, use: all_shortest_paths
-                path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
-            except:
+                path = nx.shortest_path(
+                    self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
+            except BaseException:
                 LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
                     vnf_src_name, vnf_dst_name, src_sw, dst_sw))
                 LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
                 LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
                 for e, v in self.DCNetwork_graph.edges():
                     LOG.debug("%r" % self.DCNetwork_graph[e][v])
                 LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
                     vnf_src_name, vnf_dst_name, src_sw, dst_sw))
                 LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
                 LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
                 for e, v in self.DCNetwork_graph.edges():
                     LOG.debug("%r" % self.DCNetwork_graph[e][v])
-                return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+                return "No path could be found between {0} and {1}".format(
+                    vnf_src_name, vnf_dst_name)
 
 
-        LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
+        LOG.info("Path between {0} and {1}: {2}".format(
+            vnf_src_name, vnf_dst_name, path))
 
         current_hop = src_sw
         switch_inport_nr = src_sw_inport_nr
 
         current_hop = src_sw
         switch_inport_nr = src_sw_inport_nr
@@ -671,8 +692,8 @@ class DCNetwork(Containernet):
             chain_dict['tag'] = vlan
             self.installed_chains.append(chain_dict)
 
             chain_dict['tag'] = vlan
             self.installed_chains.append(chain_dict)
 
-        #iterate through the path to install the flow-entries
-        for i in range(0,len(path)):
+        # iterate through the path to install the flow-entries
+        for i in range(0, len(path)):
             current_node = self.getNodeByName(current_hop)
 
             if i < len(path) - 1:
             current_node = self.getNodeByName(current_hop)
 
             if i < len(path) - 1:
@@ -686,7 +707,7 @@ class DCNetwork(Containernet):
             if next_hop == vnf_dst_name:
                 switch_outport_nr = dst_sw_outport_nr
                 LOG.info("end node reached: {0}".format(vnf_dst_name))
             if next_hop == vnf_dst_name:
                 switch_outport_nr = dst_sw_outport_nr
                 LOG.info("end node reached: {0}".format(vnf_dst_name))
-            elif not isinstance( next_node, OVSSwitch ):
+            elif not isinstance(next_node, OVSSwitch):
                 LOG.info("Next node: {0} is not a switch".format(next_hop))
                 return "Next node: {0} is not a switch".format(next_hop)
             else:
                 LOG.info("Next node: {0} is not a switch".format(next_hop))
                 return "Next node: {0} is not a switch".format(next_hop)
             else:
@@ -694,9 +715,8 @@ class DCNetwork(Containernet):
                 index_edge_out = 0
                 switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
 
                 index_edge_out = 0
                 switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
 
-
-           # set OpenFlow entry
-            if isinstance( current_node, OVSSwitch ):
+            # set OpenFlow entry
+            if isinstance(current_node, OVSSwitch):
                 kwargs['vlan'] = vlan
                 kwargs['path'] = path
                 kwargs['current_hop'] = current_hop
                 kwargs['vlan'] = vlan
                 kwargs['path'] = path
                 kwargs['current_hop'] = current_hop
@@ -705,28 +725,32 @@ class DCNetwork(Containernet):
                 kwargs['pathindex'] = i
 
                 if self.controller == RemoteController:
                 kwargs['pathindex'] = i
 
                 if self.controller == RemoteController:
-                    ## set flow entry via ryu rest api
-                    self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+                    # set flow entry via ryu rest api
+                    self._set_flow_entry_ryu_rest(
+                        current_node, switch_inport_nr, switch_outport_nr, **kwargs)
                 else:
                 else:
-                    ## set flow entry via ovs-ofctl
-                    self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+                    # set flow entry via ovs-ofctl
+                    self._set_flow_entry_dpctl(
+                        current_node, switch_inport_nr, switch_outport_nr, **kwargs)
 
             # take first link between switches by default
 
             # take first link between switches by default
-            if isinstance( next_node, OVSSwitch ):
+            if isinstance(next_node, OVSSwitch):
                 switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
                 current_hop = next_hop
 
         flow_options = {
                 switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
                 current_hop = next_hop
 
         flow_options = {
-            'priority':kwargs.get('priority', DEFAULT_PRIORITY),
-            'cookie':kwargs.get('cookie', DEFAULT_COOKIE),
-            'vlan':kwargs['vlan'],
-            'path':kwargs['path'],
-            'match_input':kwargs.get('match')
+            'priority': kwargs.get('priority', DEFAULT_PRIORITY),
+            'cookie': kwargs.get('cookie', DEFAULT_COOKIE),
+            'vlan': kwargs['vlan'],
+            'path': kwargs['path'],
+            'match_input': kwargs.get('match')
         }
         flow_options_str = json.dumps(flow_options, indent=1)
         }
         flow_options_str = json.dumps(flow_options, indent=1)
-        return "success: {2} between {0} and {1} with options: {3}".format(vnf_src_name, vnf_dst_name, cmd, flow_options_str)
+        return "success: {2} between {0} and {1} with options: {3}".format(
+            vnf_src_name, vnf_dst_name, cmd, flow_options_str)
 
 
-    def _set_flow_entry_ryu_rest(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
+    def _set_flow_entry_ryu_rest(
+            self, node, switch_inport_nr, switch_outport_nr, **kwargs):
         match = 'in_port=%s' % switch_inport_nr
 
         cookie = kwargs.get('cookie')
         match = 'in_port=%s' % switch_inport_nr
 
         cookie = kwargs.get('cookie')
@@ -764,7 +788,7 @@ class DCNetwork(Containernet):
         # http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#add-a-flow-entry
         if cmd == 'add-flow':
             prefix = 'stats/flowentry/add'
         # http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#add-a-flow-entry
         if cmd == 'add-flow':
             prefix = 'stats/flowentry/add'
-            if vlan != None:
+            if vlan is not None:
                 if index == 0:  # first node
                     # set vlan tag in ovs instance (to isolate E-LANs)
                     if not skip_vlan_tag:
                 if index == 0:  # first node
                     # set vlan tag in ovs instance (to isolate E-LANs)
                     if not skip_vlan_tag:
@@ -773,8 +797,12 @@ class DCNetwork(Containernet):
                     # set vlan push action if more than 1 switch in the path
                     if len(path) > 1:
                         action = {}
                     # set vlan push action if more than 1 switch in the path
                     if len(path) > 1:
                         action = {}
-                        action['type'] = 'PUSH_VLAN'  # Push a new VLAN tag if a input frame is non-VLAN-tagged
-                        action['ethertype'] = 33024   # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame
+                        # Push a new VLAN tag if a input frame is
+                        # non-VLAN-tagged
+                        action['type'] = 'PUSH_VLAN'
+                        # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged
+                        # frame
+                        action['ethertype'] = 33024
                         flow['actions'].append(action)
                         action = {}
                         action['type'] = 'SET_FIELD'
                         flow['actions'].append(action)
                         action = {}
                         action['type'] = 'SET_FIELD'
@@ -809,7 +837,8 @@ class DCNetwork(Containernet):
 
             if cookie:
                 # TODO: add cookie_mask as argument
 
             if cookie:
                 # TODO: add cookie_mask as argument
-                flow['cookie_mask'] = int('0xffffffffffffffff', 16)  # need full mask to match complete cookie
+                # need full mask to match complete cookie
+                flow['cookie_mask'] = int('0xffffffffffffffff', 16)
 
             action = {}
             action['type'] = 'OUTPUT'
 
             action = {}
             action['type'] = 'OUTPUT'
@@ -820,10 +849,12 @@ class DCNetwork(Containernet):
         self.ryu_REST(prefix, data=flow)
 
     def _set_vlan_tag(self, node, switch_port, tag):
         self.ryu_REST(prefix, data=flow)
 
     def _set_vlan_tag(self, node, switch_port, tag):
-        node.vsctl('set', 'port {0} tag={1}'.format(switch_port,tag))
-        LOG.debug("set vlan in switch: {0} in_port: {1} vlan tag: {2}".format(node.name, switch_port, tag))
+        node.vsctl('set', 'port {0} tag={1}'.format(switch_port, tag))
+        LOG.debug("set vlan in switch: {0} in_port: {1} vlan tag: {2}".format(
+            node.name, switch_port, tag))
 
 
-    def _set_flow_entry_dpctl(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
+    def _set_flow_entry_dpctl(
+            self, node, switch_inport_nr, switch_outport_nr, **kwargs):
 
         match = 'in_port=%s' % switch_inport_nr
 
 
         match = 'in_port=%s' % switch_inport_nr
 
@@ -842,9 +873,10 @@ class DCNetwork(Containernet):
             match = s.join([match, match_input])
         if cmd == 'add-flow':
             action = 'action=%s' % switch_outport_nr
             match = s.join([match, match_input])
         if cmd == 'add-flow':
             action = 'action=%s' % switch_outport_nr
-            if vlan != None:
-                if index == 0: # first node
-                    action = ('action=mod_vlan_vid:%s' % vlan) + (',output=%s' % switch_outport_nr)
+            if vlan is not None:
+                if index == 0:  # first node
+                    action = ('action=mod_vlan_vid:%s' % vlan) + \
+                        (',output=%s' % switch_outport_nr)
                     match = '-O OpenFlow13 ' + match
                 elif index == len(path) - 1:  # last node
                     match += ',dl_vlan=%s' % vlan
                     match = '-O OpenFlow13 ' + match
                 elif index == len(path) - 1:  # last node
                     match += ',dl_vlan=%s' % vlan
@@ -859,15 +891,16 @@ class DCNetwork(Containernet):
 
         node.dpctl(cmd, ofcmd)
         LOG.info("{3} in switch: {0} in_port: {1} out_port: {2}".format(node.name, switch_inport_nr,
 
         node.dpctl(cmd, ofcmd)
         LOG.info("{3} in switch: {0} in_port: {1} out_port: {2}".format(node.name, switch_inport_nr,
-                                                                                 switch_outport_nr, cmd))
+                                                                        switch_outport_nr, cmd))
 
     # start Ryu Openflow controller as Remote Controller for the DCNetwork
     def startRyu(self, learning_switch=True):
         # start Ryu controller with rest-API
         python_install_path = site.getsitepackages()[0]
         # ryu default learning switch
 
     # start Ryu Openflow controller as Remote Controller for the DCNetwork
     def startRyu(self, learning_switch=True):
         # start Ryu controller with rest-API
         python_install_path = site.getsitepackages()[0]
         # ryu default learning switch
-        #ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
-        #custom learning switch that installs a default NORMAL action in the ovs switches
+        # ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
+        # custom learning switch that installs a default NORMAL action in the
+        # ovs switches
         dir_path = os.path.dirname(os.path.realpath(__file__))
         ryu_path = dir_path + '/son_emu_simple_switch_13.py'
         ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
         dir_path = os.path.dirname(os.path.realpath(__file__))
         ryu_path = dir_path + '/son_emu_simple_switch_13.py'
         ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
@@ -878,12 +911,14 @@ class DCNetwork(Containernet):
         ryu_cmd = 'ryu-manager'
         FNULL = open("/tmp/ryu.log", 'w')
         if learning_switch:
         ryu_cmd = 'ryu-manager'
         FNULL = open("/tmp/ryu.log", 'w')
         if learning_switch:
-            self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+            self.ryu_process = Popen(
+                [ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
             LOG.debug('starting ryu-controller with {0}'.format(ryu_path))
             LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
         else:
             # no learning switch, but with rest api
             LOG.debug('starting ryu-controller with {0}'.format(ryu_path))
             LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
         else:
             # no learning switch, but with rest api
-            self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+            self.ryu_process = Popen(
+                [ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
             LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
         time.sleep(1)
 
             LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
         time.sleep(1)
 
@@ -910,7 +945,6 @@ class DCNetwork(Containernet):
         else:
             req = self.RyuSession.get(url)
 
         else:
             req = self.RyuSession.get(url)
 
-
         # do extra logging if status code is not 200 (OK)
         if req.status_code is not requests.codes.ok:
             logging.info(
         # do extra logging if status code is not 200 (OK)
         if req.status_code is not requests.codes.ok:
             logging.info(
@@ -918,9 +952,10 @@ class DCNetwork(Containernet):
                                                                                      req.encoding, req.text,
                                                                                      req.headers, req.history))
             LOG.info('url: {0}'.format(str(url)))
                                                                                      req.encoding, req.text,
                                                                                      req.headers, req.history))
             LOG.info('url: {0}'.format(str(url)))
-            if data: LOG.info('POST: {0}'.format(str(data)))
-            LOG.info('status: {0} reason: {1}'.format(req.status_code, req.reason))
-
+            if data:
+                LOG.info('POST: {0}'.format(str(data)))
+            LOG.info('status: {0} reason: {1}'.format(
+                req.status_code, req.reason))
 
         if 'json' in req.headers['content-type']:
             ret = req.json()
 
         if 'json' in req.headers['content-type']:
             ret = req.json()
@@ -929,9 +964,9 @@ class DCNetwork(Containernet):
         ret = req.text.rstrip()
         return ret
 
         ret = req.text.rstrip()
         return ret
 
-
     # need to respect that some match fields must be integers
     # http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#description-of-match-and-actions
     # need to respect that some match fields must be integers
     # http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#description-of-match-and-actions
+
     def _parse_match(self, match):
         matches = match.split(',')
         dict = {}
     def _parse_match(self, match):
         matches = match.split(',')
         dict = {}
@@ -940,13 +975,14 @@ class DCNetwork(Containernet):
             if len(match) == 2:
                 try:
                     m2 = int(match[1], 0)
             if len(match) == 2:
                 try:
                     m2 = int(match[1], 0)
-                except:
+                except BaseException:
                     m2 = match[1]
 
                     m2 = match[1]
 
-                dict.update({match[0]:m2})
+                dict.update({match[0]: m2})
         return dict
 
         return dict
 
-    def find_connected_dc_interface(self, vnf_src_name, vnf_src_interface=None):
+    def find_connected_dc_interface(
+            self, vnf_src_name, vnf_src_interface=None):
 
         if vnf_src_interface is None:
             # take first interface by default
 
         if vnf_src_interface is None:
             # take first interface by default
@@ -958,9 +994,8 @@ class DCNetwork(Containernet):
             link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
             for link in link_dict:
                 if (link_dict[link]['src_port_id'] == vnf_src_interface or
             link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
             for link in link_dict:
                 if (link_dict[link]['src_port_id'] == vnf_src_interface or
-                        link_dict[link]['src_port_name'] == vnf_src_interface):  # Fix: we might also get interface names, e.g, from a son-emu-cli call
+                        link_dict[link]['src_port_name'] == vnf_src_interface):
+                    # Fix: we might also get interface names, e.g, from a son-emu-cli call
                     # found the right link and connected switch
                     # found the right link and connected switch
-                    src_sw = connected_sw
-                    src_sw_inport_nr = link_dict[link]['dst_port_nr']
                     src_sw_inport_name = link_dict[link]['dst_port_name']
                     return src_sw_inport_name
                     src_sw_inport_name = link_dict[link]['dst_port_name']
                     return src_sw_inport_name
index 77a71a0..5a9c048 100755 (executable)
@@ -1,31 +1,29 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from mininet.node import Docker, OVSBridge
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+from mininet.node import Docker
 from mininet.link import Link
 from emuvim.dcemulator.resourcemodel import NotEnoughResourcesAvailable
 import logging
 from mininet.link import Link
 from emuvim.dcemulator.resourcemodel import NotEnoughResourcesAvailable
 import logging
@@ -38,6 +36,7 @@ LOG.setLevel(logging.DEBUG)
 DCDPID_BASE = 1000  # start of switch dpid's used for data center switches
 EXTSAPDPID_BASE = 2000  # start of switch dpid's used for external SAP switches
 
 DCDPID_BASE = 1000  # start of switch dpid's used for data center switches
 EXTSAPDPID_BASE = 2000  # start of switch dpid's used for external SAP switches
 
+
 class EmulatorCompute(Docker):
     """
     Emulator specific compute node class.
 class EmulatorCompute(Docker):
     """
     Emulator specific compute node class.
@@ -51,7 +50,8 @@ class EmulatorCompute(Docker):
             self, name, dimage, **kwargs):
         self.datacenter = kwargs.get("datacenter")  # pointer to current DC
         self.flavor_name = kwargs.get("flavor_name")
             self, name, dimage, **kwargs):
         self.datacenter = kwargs.get("datacenter")  # pointer to current DC
         self.flavor_name = kwargs.get("flavor_name")
-        LOG.debug("Starting compute instance %r in data center %r" % (name, str(self.datacenter)))
+        LOG.debug("Starting compute instance %r in data center %r" %
+                  (name, str(self.datacenter)))
         # call original Docker.__init__
         Docker.__init__(self, name, dimage, **kwargs)
 
         # call original Docker.__init__
         Docker.__init__(self, name, dimage, **kwargs)
 
@@ -65,9 +65,11 @@ class EmulatorCompute(Docker):
         for i in self.intfList():
             vnf_name = self.name
             vnf_interface = str(i)
         for i in self.intfList():
             vnf_name = self.name
             vnf_interface = str(i)
-            dc_port_name = self.datacenter.net.find_connected_dc_interface(vnf_name, vnf_interface)
+            dc_port_name = self.datacenter.net.find_connected_dc_interface(
+                vnf_name, vnf_interface)
             # format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
             # format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
-            intf_dict = {'intf_name': str(i), 'ip': "{0}/{1}".format(i.IP(), i.prefixLen), 'netmask': i.prefixLen, 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
+            intf_dict = {'intf_name': str(i), 'ip': "{0}/{1}".format(i.IP(), i.prefixLen), 'netmask': i.prefixLen,
+                         'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
             networkStatusList.append(intf_dict)
 
         return networkStatusList
             networkStatusList.append(intf_dict)
 
         return networkStatusList
@@ -91,7 +93,8 @@ class EmulatorCompute(Docker):
         status["state"] = self.dcli.inspect_container(self.dc)["State"]
         status["id"] = self.dcli.inspect_container(self.dc)["Id"]
         status["short_id"] = self.dcli.inspect_container(self.dc)["Id"][:12]
         status["state"] = self.dcli.inspect_container(self.dc)["State"]
         status["id"] = self.dcli.inspect_container(self.dc)["Id"]
         status["short_id"] = self.dcli.inspect_container(self.dc)["Id"][:12]
-        status["hostname"] = self.dcli.inspect_container(self.dc)["Config"]['Hostname']
+        status["hostname"] = self.dcli.inspect_container(self.dc)[
+            "Config"]['Hostname']
         status["datacenter"] = (None if self.datacenter is None
                                 else self.datacenter.label)
 
         status["datacenter"] = (None if self.datacenter is None
                                 else self.datacenter.label)
 
@@ -113,14 +116,16 @@ class EmulatorExtSAP(object):
         self.net = self.datacenter.net
         self.name = sap_name
 
         self.net = self.datacenter.net
         self.name = sap_name
 
-        LOG.debug("Starting ext SAP instance %r in data center %r" % (sap_name, str(self.datacenter)))
+        LOG.debug("Starting ext SAP instance %r in data center %r" %
+                  (sap_name, str(self.datacenter)))
 
         # create SAP as separate OVS switch with an assigned ip address
         self.ip = str(sap_net[1]) + '/' + str(sap_net.prefixlen)
         self.subnet = sap_net
         # allow connection to the external internet through the host
         params = dict(NAT=True)
 
         # create SAP as separate OVS switch with an assigned ip address
         self.ip = str(sap_net[1]) + '/' + str(sap_net.prefixlen)
         self.subnet = sap_net
         # allow connection to the external internet through the host
         params = dict(NAT=True)
-        self.switch = self.net.addExtSAP(sap_name, self.ip, dpid=hex(self._get_next_extSAP_dpid())[2:], **params)
+        self.switch = self.net.addExtSAP(sap_name, self.ip, dpid=hex(
+            self._get_next_extSAP_dpid())[2:], **params)
         self.switch.start()
 
     def _get_next_extSAP_dpid(self):
         self.switch.start()
 
     def _get_next_extSAP_dpid(self):
@@ -140,9 +145,11 @@ class EmulatorExtSAP(object):
             vnf_interface = str(i)
             if vnf_interface == 'lo':
                 continue
             vnf_interface = str(i)
             if vnf_interface == 'lo':
                 continue
-            dc_port_name = self.datacenter.net.find_connected_dc_interface(vnf_name, vnf_interface)
+            dc_port_name = self.datacenter.net.find_connected_dc_interface(
+                vnf_name, vnf_interface)
             # format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
             # format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
-            intf_dict = {'intf_name': str(i), 'ip': self.ip, 'netmask': i.prefixLen, 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
+            intf_dict = {'intf_name': str(i), 'ip': self.ip, 'netmask': i.prefixLen, 'mac': i.MAC(
+            ), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
             networkStatusList.append(intf_dict)
 
         return networkStatusList
             networkStatusList.append(intf_dict)
 
         return networkStatusList
@@ -154,6 +161,7 @@ class EmulatorExtSAP(object):
             "network": self.getNetworkStatus()
         }
 
             "network": self.getNetworkStatus()
         }
 
+
 class Datacenter(object):
     """
     Represents a logical data center to which compute resources
 class Datacenter(object):
     """
     Represents a logical data center to which compute resources
@@ -174,7 +182,8 @@ class Datacenter(object):
         self.label = label
         # dict to store arbitrary metadata (e.g. latitude and longitude)
         self.metadata = metadata
         self.label = label
         # dict to store arbitrary metadata (e.g. latitude and longitude)
         self.metadata = metadata
-        # path to which resource information should be logged (e.g. for experiments). None = no logging
+        # path to which resource information should be logged (e.g. for
+        # experiments). None = no logging
         self.resource_log_path = resource_log_path
         # first prototype assumes one "bigswitch" per DC
         self.switch = None
         self.resource_log_path = resource_log_path
         # first prototype assumes one "bigswitch" per DC
         self.switch = None
@@ -208,7 +217,8 @@ class Datacenter(object):
     def start(self):
         pass
 
     def start(self):
         pass
 
-    def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny", properties=dict(), **params):
+    def startCompute(self, name, image=None, command=None, network=None,
+                     flavor_name="tiny", properties=dict(), **params):
         """
         Create a new container as compute resource and connect it to this
         data center.
         """
         Create a new container as compute resource and connect it to this
         data center.
@@ -230,7 +240,8 @@ class Datacenter(object):
         if network is None:
             network = {}  # {"ip": "10.0.0.254/8"}
         if isinstance(network, dict):
         if network is None:
             network = {}  # {"ip": "10.0.0.254/8"}
         if isinstance(network, dict):
-            network = [network]  # if we have only one network, put it in a list
+            # if we have only one network, put it in a list
+            network = [network]
         if isinstance(network, list):
             if len(network) < 1:
                 network.append({})
         if isinstance(network, list):
             if len(network) < 1:
                 network.append({})
@@ -250,19 +261,19 @@ class Datacenter(object):
             dcmd=command,
             datacenter=self,
             flavor_name=flavor_name,
             dcmd=command,
             datacenter=self,
             flavor_name=flavor_name,
-            environment = env,
+            environment=env,
             **params
         )
 
             **params
         )
 
-
-
         # apply resource limits to container if a resource model is defined
         if self._resource_model is not None:
             try:
                 self._resource_model.allocate(d)
         # apply resource limits to container if a resource model is defined
         if self._resource_model is not None:
             try:
                 self._resource_model.allocate(d)
-                self._resource_model.write_allocation_log(d, self.resource_log_path)
+                self._resource_model.write_allocation_log(
+                    d, self.resource_log_path)
             except NotEnoughResourcesAvailable as ex:
             except NotEnoughResourcesAvailable as ex:
-                LOG.warning("Allocation of container %r was blocked by resource model." % name)
+                LOG.warning(
+                    "Allocation of container %r was blocked by resource model." % name)
                 LOG.info(ex.message)
                 # ensure that we remove the container
                 self.net.removeDocker(name)
                 LOG.info(ex.message)
                 # ensure that we remove the container
                 self.net.removeDocker(name)
@@ -272,11 +283,14 @@ class Datacenter(object):
         # if no --net option is given, network = [{}], so 1 empty dict in the list
         # this results in 1 default interface with a default ip address
         for nw in network:
         # if no --net option is given, network = [{}], so 1 empty dict in the list
         # this results in 1 default interface with a default ip address
         for nw in network:
-            # clean up network configuration (e.g. RTNETLINK does not allow ':' in intf names
+            # clean up network configuration (e.g. RTNETLINK does not allow ':'
+            # in intf names
             if nw.get("id") is not None:
                 nw["id"] = self._clean_ifname(nw["id"])
             if nw.get("id") is not None:
                 nw["id"] = self._clean_ifname(nw["id"])
-            # TODO we cannot use TCLink here (see: https://github.com/mpeuster/containernet/issues/3)
-            self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id'))
+            # TODO we cannot use TCLink here (see:
+            # https://github.com/mpeuster/containernet/issues/3)
+            self.net.addLink(d, self.switch, params1=nw,
+                             cls=Link, intfName1=nw.get('id'))
         # do bookkeeping
         self.containers[name] = d
 
         # do bookkeeping
         self.containers[name] = d
 
@@ -289,7 +303,8 @@ class Datacenter(object):
         assert name is not None
         if name not in self.containers:
             raise Exception("Container with name %s not found." % name)
         assert name is not None
         if name not in self.containers:
             raise Exception("Container with name %s not found." % name)
-        LOG.debug("Stopping compute instance %r in data center %r" % (name, str(self)))
+        LOG.debug("Stopping compute instance %r in data center %r" %
+                  (name, str(self)))
 
         #  stop the monitored metrics
         if self.net.monitor_agent is not None:
 
         #  stop the monitored metrics
         if self.net.monitor_agent is not None:
@@ -298,7 +313,8 @@ class Datacenter(object):
         # call resource model and free resources
         if self._resource_model is not None:
             self._resource_model.free(self.containers[name])
         # call resource model and free resources
         if self._resource_model is not None:
             self._resource_model.free(self.containers[name])
-            self._resource_model.write_free_log(self.containers[name], self.resource_log_path)
+            self._resource_model.write_free_log(
+                self.containers[name], self.resource_log_path)
 
         # remove links
         self.net.removeLink(
 
         # remove links
         self.net.removeLink(
@@ -318,7 +334,7 @@ class Datacenter(object):
 
     def removeExternalSAP(self, sap_name):
         sap_switch = self.extSAPs[sap_name].switch
 
     def removeExternalSAP(self, sap_name):
         sap_switch = self.extSAPs[sap_name].switch
-        #sap_switch = self.net.getNodeByName(sap_name)
+        # sap_switch = self.net.getNodeByName(sap_name)
         # remove link of SAP to the DC switch
         self.net.removeLink(link=None, node1=sap_switch, node2=self.switch)
         self.net.removeExtSAP(sap_name)
         # remove link of SAP to the DC switch
         self.net.removeLink(link=None, node1=sap_switch, node2=self.switch)
         self.net.removeExtSAP(sap_name)
@@ -350,8 +366,8 @@ class Datacenter(object):
             "switch": self.switch.name,
             "n_running_containers": len(self.containers),
             "metadata": self.metadata,
             "switch": self.switch.name,
             "n_running_containers": len(self.containers),
             "metadata": self.metadata,
-            "vnf_list" : container_list,
-            "ext SAP list" : ext_saplist
+            "vnf_list": container_list,
+            "ext SAP list": ext_saplist
         }
 
     def assignResourceModel(self, rm):
         }
 
     def assignResourceModel(self, rm):
@@ -361,7 +377,8 @@ class Datacenter(object):
         :return:
         """
         if self._resource_model is not None:
         :return:
         """
         if self._resource_model is not None:
-            raise Exception("There is already an resource model assigned to this DC.")
+            raise Exception(
+                "There is already an resource model assigned to this DC.")
         self._resource_model = rm
         self.net.rm_registrar.register(self, rm)
         LOG.info("Assigned RM: %r to DC: %r" % (rm, self))
         self._resource_model = rm
         self.net.rm_registrar.register(self, rm)
         LOG.info("Assigned RM: %r to DC: %r" % (rm, self))
@@ -381,4 +398,3 @@ class Datacenter(object):
         name = name.replace(".", "-")
         name = name.replace("_", "-")
         return name
         name = name.replace(".", "-")
         name = name.replace("_", "-")
         return name
-
index 869eb1d..de100f7 100755 (executable)
@@ -1,34 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-Base classes needed for resource models support.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import logging
 LOG = logging.getLogger("resourcemodel")
 LOG.setLevel(logging.DEBUG)
 import logging
 LOG = logging.getLogger("resourcemodel")
 LOG.setLevel(logging.DEBUG)
@@ -55,7 +49,8 @@ class ResourceModelRegistrar(object):
         :return: None
         """
         if dc in self._resource_models:
         :return: None
         """
         if dc in self._resource_models:
-            raise Exception("There is already an resource model assigned to this DC.")
+            raise Exception(
+                "There is already an resource model assigned to this DC.")
         self._resource_models[dc] = rm
         rm.registrar = self
         rm.dcs.append(dc)
         self._resource_models[dc] = rm
         rm.registrar = self
         rm.dcs.append(dc)
@@ -75,7 +70,8 @@ class ResourceModelRegistrar(object):
         Total number of data centers that are connected to a resource model
         :return:
         """
         Total number of data centers that are connected to a resource model
         :return:
         """
-        return sum([len(rm.dcs) for rm in list(self._resource_models.itervalues())])
+        return sum([len(rm.dcs)
+                    for rm in list(self._resource_models.itervalues())])
 
 
 class ResourceFlavor(object):
 
 
 class ResourceFlavor(object):
@@ -83,6 +79,7 @@ class ResourceFlavor(object):
     Simple class that represents resource flavors (c.f. OpenStack).
     Can contain arbitrary metrics.
     """
     Simple class that represents resource flavors (c.f. OpenStack).
     Can contain arbitrary metrics.
     """
+
     def __init__(self, name, metrics):
         self.name = name
         self._metrics = metrics
     def __init__(self, name, metrics):
         self.name = name
         self._metrics = metrics
@@ -114,15 +111,15 @@ class BaseResourceModel(object):
         initialize some default flavours (naming/sizes inspired by OpenStack)
         """
         self.addFlavour(ResourceFlavor(
         initialize some default flavours (naming/sizes inspired by OpenStack)
         """
         self.addFlavour(ResourceFlavor(
-            "tiny",  {"compute": 0.5, "memory": 32, "disk": 1}))
+            "tiny", {"compute": 0.5, "memory": 32, "disk": 1}))
         self.addFlavour(ResourceFlavor(
         self.addFlavour(ResourceFlavor(
-            "small",  {"compute": 1.0, "memory": 128, "disk": 20}))
+            "small", {"compute": 1.0, "memory": 128, "disk": 20}))
         self.addFlavour(ResourceFlavor(
         self.addFlavour(ResourceFlavor(
-            "medium",  {"compute": 4.0, "memory": 256, "disk": 40}))
+            "medium", {"compute": 4.0, "memory": 256, "disk": 40}))
         self.addFlavour(ResourceFlavor(
         self.addFlavour(ResourceFlavor(
-            "large",  {"compute": 8.0, "memory": 512, "disk": 80}))
+            "large", {"compute": 8.0, "memory": 512, "disk": 80}))
         self.addFlavour(ResourceFlavor(
         self.addFlavour(ResourceFlavor(
-            "xlarge",  {"compute": 16.0, "memory": 1024, "disk": 160}))
+            "xlarge", {"compute": 16.0, "memory": 1024, "disk": 160}))
 
     def addFlavour(self, fl):
         """
 
     def addFlavour(self, fl):
         """
@@ -139,7 +136,8 @@ class BaseResourceModel(object):
         This method has to be overwritten by a real resource model.
         :param d: Container object
         """
         This method has to be overwritten by a real resource model.
         :param d: Container object
         """
-        LOG.warning("Allocating in BaseResourceModel: %r with flavor: %r" % (d.name, d.flavor_name))
+        LOG.warning("Allocating in BaseResourceModel: %r with flavor: %r" % (
+            d.name, d.flavor_name))
         self._allocated_compute_instances[d.name] = d.flavor_name
 
     def free(self, d):
         self._allocated_compute_instances[d.name] = d.flavor_name
 
     def free(self, d):
index 395c0ce..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index b812aad..01231fd 100755 (executable)
@@ -1,33 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-Playground for resource models created by University of Paderborn.
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import time
 import json
 import logging
 import time
 import json
 import logging
@@ -93,7 +88,8 @@ class UpbSimpleCloudDcRM(BaseResourceModel):
         fl_cu = self._get_flavor(d).get("compute")
         # check for over provisioning
         if self.dc_alloc_cu + fl_cu > self.dc_max_cu and self.raise_no_cpu_resources_left:
         fl_cu = self._get_flavor(d).get("compute")
         # check for over provisioning
         if self.dc_alloc_cu + fl_cu > self.dc_max_cu and self.raise_no_cpu_resources_left:
-            raise NotEnoughResourcesAvailable("Not enough compute resources left.")
+            raise NotEnoughResourcesAvailable(
+                "Not enough compute resources left.")
         self.dc_alloc_cu += fl_cu
 
     def _allocate_mem(self, d):
         self.dc_alloc_cu += fl_cu
 
     def _allocate_mem(self, d):
@@ -105,7 +101,8 @@ class UpbSimpleCloudDcRM(BaseResourceModel):
         fl_mu = self._get_flavor(d).get("memory")
         # check for over provisioning
         if self.dc_alloc_mu + fl_mu > self.dc_max_mu and self.raise_no_mem_resources_left:
         fl_mu = self._get_flavor(d).get("memory")
         # check for over provisioning
         if self.dc_alloc_mu + fl_mu > self.dc_max_mu and self.raise_no_mem_resources_left:
-            raise NotEnoughResourcesAvailable("Not enough memory resources left.")
+            raise NotEnoughResourcesAvailable(
+                "Not enough memory resources left.")
         self.dc_alloc_mu += fl_mu
 
     def free(self, d):
         self.dc_alloc_mu += fl_mu
 
     def free(self, d):
@@ -162,12 +159,14 @@ class UpbSimpleCloudDcRM(BaseResourceModel):
         # calculate cpu time fraction for container with given flavor
         cpu_time_percentage = self.single_cu * number_cu
         # calculate input values for CFS scheduler bandwidth limitation
         # calculate cpu time fraction for container with given flavor
         cpu_time_percentage = self.single_cu * number_cu
         # calculate input values for CFS scheduler bandwidth limitation
-        cpu_period, cpu_quota = self._calculate_cpu_cfs_values(cpu_time_percentage)
+        cpu_period, cpu_quota = self._calculate_cpu_cfs_values(
+            cpu_time_percentage)
         # apply limits to container if changed
         if d.resources['cpu_period'] != cpu_period or d.resources['cpu_quota'] != cpu_quota:
             LOG.debug("Setting CPU limit for %r: cpu_quota = cpu_period * limit = %f * %f = %f (op_factor=%f)" % (
                       d.name, cpu_period, cpu_time_percentage, cpu_quota, self.cpu_op_factor))
         # apply limits to container if changed
         if d.resources['cpu_period'] != cpu_period or d.resources['cpu_quota'] != cpu_quota:
             LOG.debug("Setting CPU limit for %r: cpu_quota = cpu_period * limit = %f * %f = %f (op_factor=%f)" % (
                       d.name, cpu_period, cpu_time_percentage, cpu_quota, self.cpu_op_factor))
-            d.updateCpuLimit(cpu_period=int(cpu_period), cpu_quota=int(cpu_quota))
+            d.updateCpuLimit(cpu_period=int(cpu_period),
+                             cpu_quota=int(cpu_quota))
 
     def _compute_single_cu(self):
         """
 
     def _compute_single_cu(self):
         """
@@ -177,7 +176,8 @@ class UpbSimpleCloudDcRM(BaseResourceModel):
         # get cpu time fraction for entire emulation
         e_cpu = self.registrar.e_cpu
         # calculate
         # get cpu time fraction for entire emulation
         e_cpu = self.registrar.e_cpu
         # calculate
-        return float(e_cpu) / sum([rm.dc_max_cu for rm in list(self.registrar.resource_models)])
+        return float(
+            e_cpu) / sum([rm.dc_max_cu for rm in list(self.registrar.resource_models)])
 
     def _calculate_cpu_cfs_values(self, cpu_time_percentage):
         """
 
     def _calculate_cpu_cfs_values(self, cpu_time_percentage):
         """
@@ -188,8 +188,10 @@ class UpbSimpleCloudDcRM(BaseResourceModel):
         # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
         # Attention minimum cpu_quota is 1ms (micro)
         cpu_period = CPU_PERIOD  # lets consider a fixed period of 1000000 microseconds for now
         # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
         # Attention minimum cpu_quota is 1ms (micro)
         cpu_period = CPU_PERIOD  # lets consider a fixed period of 1000000 microseconds for now
-        cpu_quota = cpu_period * cpu_time_percentage  # calculate the fraction of cpu time for this container
-        # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
+        # calculate the fraction of cpu time for this container
+        cpu_quota = cpu_period * cpu_time_percentage
+        # ATTENTION >= 1000 to avoid a invalid argument system error ... no
+        # idea why
         if cpu_quota < 1000:
             cpu_quota = 1000
             LOG.warning("Increased CPU quota to avoid system error.")
         if cpu_quota < 1000:
             cpu_quota = 1000
             LOG.warning("Increased CPU quota to avoid system error.")
@@ -205,14 +207,15 @@ class UpbSimpleCloudDcRM(BaseResourceModel):
         # get memory amount for entire emulation
         e_mem = self.registrar.e_mem
         # calculate amount of memory for a single mu
         # get memory amount for entire emulation
         e_mem = self.registrar.e_mem
         # calculate amount of memory for a single mu
-        self.single_mu = float(e_mem) / sum([rm.dc_max_mu for rm in list(self.registrar.resource_models)])
+        self.single_mu = float(
+            e_mem) / sum([rm.dc_max_mu for rm in list(self.registrar.resource_models)])
         # calculate mem for given flavor
         mem_limit = self.single_mu * number_mu
         mem_limit = self._calculate_mem_limit_value(mem_limit)
         # apply to container if changed
         if d.resources['mem_limit'] != mem_limit:
             LOG.debug("Setting MEM limit for %r: mem_limit = %f MB (op_factor=%f)" %
         # calculate mem for given flavor
         mem_limit = self.single_mu * number_mu
         mem_limit = self._calculate_mem_limit_value(mem_limit)
         # apply to container if changed
         if d.resources['mem_limit'] != mem_limit:
             LOG.debug("Setting MEM limit for %r: mem_limit = %f MB (op_factor=%f)" %
-                      (d.name, mem_limit/1024/1024, self.mem_op_factor))
+                      (d.name, mem_limit / 1024 / 1024, self.mem_op_factor))
             d.updateMemoryLimit(mem_limit=mem_limit)
 
     def _calculate_mem_limit_value(self, mem_limit):
             d.updateMemoryLimit(mem_limit=mem_limit)
 
     def _calculate_mem_limit_value(self, mem_limit):
@@ -226,7 +229,7 @@ class UpbSimpleCloudDcRM(BaseResourceModel):
             mem_limit = 4
             LOG.warning("Increased MEM limit because it was less than 4.0 MB.")
         # to byte!
             mem_limit = 4
             LOG.warning("Increased MEM limit because it was less than 4.0 MB.")
         # to byte!
-        return int(mem_limit*1024*1024)
+        return int(mem_limit * 1024 * 1024)
 
     def get_state_dict(self):
         """
 
     def get_state_dict(self):
         """
@@ -281,14 +284,14 @@ class UpbSimpleCloudDcRM(BaseResourceModel):
         if path is None:
             return
         # we have a path: write out RM info
         if path is None:
             return
         # we have a path: write out RM info
-        l = dict()
-        l["t"] = time.time()
-        l["container_state"] = d.getStatus()
-        l["action"] = action
-        l["rm_state"] = self.get_state_dict()
+        logd = dict()
+        logd["t"] = time.time()
+        logd["container_state"] = d.getStatus()
+        logd["action"] = action
+        logd["rm_state"] = self.get_state_dict()
         # append to logfile
         with open(path, "a") as f:
         # append to logfile
         with open(path, "a") as f:
-            f.write("%s\n" % json.dumps(l))
+            f.write("%s\n" % json.dumps(logd))
 
 
 class UpbOverprovisioningCloudDcRM(UpbSimpleCloudDcRM):
 
 
 class UpbOverprovisioningCloudDcRM(UpbSimpleCloudDcRM):
@@ -299,6 +302,7 @@ class UpbOverprovisioningCloudDcRM(UpbSimpleCloudDcRM):
     containers whenever a data-center is over provisioned.
     """
     # TODO add parts for memory
     containers whenever a data-center is over provisioned.
     """
     # TODO add parts for memory
+
     def __init__(self, *args, **kvargs):
         super(UpbOverprovisioningCloudDcRM, self).__init__(*args, **kvargs)
         self.raise_no_cpu_resources_left = False
     def __init__(self, *args, **kvargs):
         super(UpbOverprovisioningCloudDcRM, self).__init__(*args, **kvargs)
         self.raise_no_cpu_resources_left = False
@@ -312,15 +316,18 @@ class UpbOverprovisioningCloudDcRM(UpbSimpleCloudDcRM):
         # get cpu time fraction for entire emulation
         e_cpu = self.registrar.e_cpu
         # calculate over provisioning scale factor
         # get cpu time fraction for entire emulation
         e_cpu = self.registrar.e_cpu
         # calculate over provisioning scale factor
-        self.cpu_op_factor = float(self.dc_max_cu) / (max(self.dc_max_cu, self.dc_alloc_cu))
+        self.cpu_op_factor = float(self.dc_max_cu) / \
+            (max(self.dc_max_cu, self.dc_alloc_cu))
         # calculate
         # calculate
-        return float(e_cpu) / sum([rm.dc_max_cu for rm in list(self.registrar.resource_models)]) * self.cpu_op_factor
+        return float(e_cpu) / sum([rm.dc_max_cu for rm in list(
+            self.registrar.resource_models)]) * self.cpu_op_factor
 
 
 class UpbDummyRM(UpbSimpleCloudDcRM):
     """
     No limits. But log allocations.
     """
 
 
 class UpbDummyRM(UpbSimpleCloudDcRM):
     """
     No limits. But log allocations.
     """
+
     def __init__(self, *args, **kvargs):
         super(UpbDummyRM, self).__init__(*args, **kvargs)
         self.raise_no_cpu_resources_left = False
     def __init__(self, *args, **kvargs):
         super(UpbDummyRM, self).__init__(*args, **kvargs)
         self.raise_no_cpu_resources_left = False
@@ -328,4 +335,3 @@ class UpbDummyRM(UpbSimpleCloudDcRM):
     def _apply_limits(self):
         # do nothing here
         pass
     def _apply_limits(self):
         # do nothing here
         pass
-
index 53d1a2e..2adaf80 100755 (executable)
@@ -21,7 +21,8 @@ from ryu.ofproto import ofproto_v1_3
 from ryu.lib.packet import packet
 from ryu.lib.packet import ethernet
 from ryu.lib.packet import ether_types
 from ryu.lib.packet import packet
 from ryu.lib.packet import ethernet
 from ryu.lib.packet import ether_types
-from ryu.topology.event import EventSwitchEnter, EventSwitchLeave, EventSwitchReconnected
+from ryu.topology.event import EventSwitchEnter, EventSwitchReconnected
+
 
 class SimpleSwitch13(app_manager.RyuApp):
     OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
 
 class SimpleSwitch13(app_manager.RyuApp):
     OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
@@ -44,12 +45,13 @@ class SimpleSwitch13(app_manager.RyuApp):
         # truncated packet data. In that case, we cannot output packets
         # correctly.  The bug has been fixed in OVS v2.1.0.
         match = parser.OFPMatch()
         # truncated packet data. In that case, we cannot output packets
         # correctly.  The bug has been fixed in OVS v2.1.0.
         match = parser.OFPMatch()
-        #actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
+        # actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
         #                                  ofproto.OFPCML_NO_BUFFER)]
         actions = [parser.OFPActionOutput(ofproto.OFPCML_NO_BUFFER)]
         self.add_flow(datapath, 0, match, actions)
 
         #                                  ofproto.OFPCML_NO_BUFFER)]
         actions = [parser.OFPActionOutput(ofproto.OFPCML_NO_BUFFER)]
         self.add_flow(datapath, 0, match, actions)
 
-    def add_flow(self, datapath, priority, match, actions, buffer_id=None, table_id=0):
+    def add_flow(self, datapath, priority, match,
+                 actions, buffer_id=None, table_id=0):
         ofproto = datapath.ofproto
         parser = datapath.ofproto_parser
 
         ofproto = datapath.ofproto
         parser = datapath.ofproto_parser
 
@@ -77,7 +79,6 @@ class SimpleSwitch13(app_manager.RyuApp):
         actions = [ofp_parser.OFPActionOutput(ofproto_v1_3.OFPP_NORMAL)]
         self.add_flow(datapath, 0, None, actions, table_id=0)
 
         actions = [ofp_parser.OFPActionOutput(ofproto_v1_3.OFPP_NORMAL)]
         self.add_flow(datapath, 0, None, actions, table_id=0)
 
-
     @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
     def _packet_in_handler(self, ev):
         # If you hit this you might want to increase
     @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
     def _packet_in_handler(self, ev):
         # If you hit this you might want to increase
index 9574d17..6ae0ae2 100644 (file)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import logging
 from mininet.log import setLogLevel
 from emuvim.dcemulator.net import DCNetwork
 import logging
 from mininet.log import setLogLevel
 from emuvim.dcemulator.net import DCNetwork
index e30ff19..7070cda 100644 (file)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import logging
 from mininet.log import setLogLevel
 from emuvim.dcemulator.net import DCNetwork
 import logging
 from mininet.log import setLogLevel
 from emuvim.dcemulator.net import DCNetwork
index 4f44ddd..164ecb4 100644 (file)
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import logging
 import time
 import signal
 import logging
 import time
 import signal
@@ -47,11 +45,12 @@ logging.getLogger('api.openstack.heat.parser').setLevel(logging.DEBUG)
 logging.getLogger('api.openstack.glance').setLevel(logging.DEBUG)
 logging.getLogger('api.openstack.helper').setLevel(logging.DEBUG)
 
 logging.getLogger('api.openstack.glance').setLevel(logging.DEBUG)
 logging.getLogger('api.openstack.helper').setLevel(logging.DEBUG)
 
+
 class DaemonTopology(object):
     """
     Topology with two datacenters:
 
 class DaemonTopology(object):
     """
     Topology with two datacenters:
 
-        dc1 <-- 50ms --> dc2 
+        dc1 <-- 50ms --> dc2
     """
 
     def __init__(self):
     """
 
     def __init__(self):
@@ -105,7 +104,7 @@ class DaemonTopology(object):
 
 
 def main():
 
 
 def main():
-    t = DaemonTopology()
+    DaemonTopology()
 
 
 if __name__ == '__main__':
 
 
 if __name__ == '__main__':
index 7e60065..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 0d994ca..bf0f4c0 100755 (executable)
@@ -1,35 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Helper module that implements helpers for test implementations.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import unittest
 import os
 import subprocess
 import unittest
 import os
 import subprocess
@@ -39,6 +32,7 @@ from emuvim.api.rest.rest_api_endpoint import RestApiEndpoint
 from mininet.clean import cleanup
 from mininet.node import Controller
 
 from mininet.clean import cleanup
 from mininet.node import Controller
 
+
 class SimpleTestTopology(unittest.TestCase):
     """
         Helper class to do basic test setups.
 class SimpleTestTopology(unittest.TestCase):
     """
         Helper class to do basic test setups.
@@ -72,7 +66,7 @@ class SimpleTestTopology(unittest.TestCase):
         # add some switches
         # start from s1 because ovs does not like to have dpid = 0
         # and switch name-number is being used by mininet to set the dpid
         # add some switches
         # start from s1 because ovs does not like to have dpid = 0
         # and switch name-number is being used by mininet to set the dpid
-        for i in range(1, nswitches+1):
+        for i in range(1, nswitches + 1):
             self.s.append(self.net.addSwitch('s%d' % i))
         # if specified, chain all switches
         if autolinkswitches:
             self.s.append(self.net.addSwitch('s%d' % i))
         # if specified, chain all switches
         if autolinkswitches:
@@ -92,14 +86,15 @@ class SimpleTestTopology(unittest.TestCase):
             self.h.append(self.net.addHost('h%d' % i))
         # add some dockers
         for i in range(0, ndockers):
             self.h.append(self.net.addHost('h%d' % i))
         # add some dockers
         for i in range(0, ndockers):
-            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
+            self.d.append(self.net.addDocker('d%d' %
+                                             i, dimage="ubuntu:trusty"))
 
     def startApi(self):
         self.api.start()
 
     def stopApi(self):
         self.api.stop()
 
     def startApi(self):
         self.api.start()
 
     def stopApi(self):
         self.api.stop()
-        
+
     def startNet(self):
         self.net.start()
 
     def startNet(self):
         self.net.start()
 
@@ -119,7 +114,8 @@ class SimpleTestTopology(unittest.TestCase):
         """
         List the containers managed by containernet
         """
         """
         List the containers managed by containernet
         """
-        return self.getDockerCli().containers(filters={"label": "com.containernet"})
+        return self.getDockerCli().containers(
+            filters={"label": "com.containernet"})
 
     @staticmethod
     def setUp():
 
     @staticmethod
     def setUp():
index c951665..3909e1a 100755 (executable)
@@ -1,35 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Helper module that implements helpers for test implementations.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import unittest
 import os
 import subprocess
 import unittest
 import os
 import subprocess
@@ -40,6 +33,7 @@ from emuvim.api.openstack.openstack_api_endpoint import OpenstackApiEndpoint
 from mininet.clean import cleanup
 from mininet.node import Controller
 
 from mininet.clean import cleanup
 from mininet.node import Controller
 
+
 class ApiBaseOpenStack(unittest.TestCase):
     """
         Helper class to do basic test setups.
 class ApiBaseOpenStack(unittest.TestCase):
     """
         Helper class to do basic test setups.
@@ -70,18 +64,19 @@ class ApiBaseOpenStack(unittest.TestCase):
         """
         self.net = DCNetwork(controller=controller, **kwargs)
         for i in range(0, ndatacenter):
         """
         self.net = DCNetwork(controller=controller, **kwargs)
         for i in range(0, ndatacenter):
-            self.api.append(OpenstackApiEndpoint("0.0.0.0", 15000+i))
+            self.api.append(OpenstackApiEndpoint("0.0.0.0", 15000 + i))
 
         # add some switches
         # start from s1 because ovs does not like to have dpid = 0
         # and switch name-number is being used by mininet to set the dpid
 
         # add some switches
         # start from s1 because ovs does not like to have dpid = 0
         # and switch name-number is being used by mininet to set the dpid
-        for i in range(1, nswitches+1):
+        for i in range(1, nswitches + 1):
             self.s.append(self.net.addSwitch('s%d' % i))
         # if specified, chain all switches
         if autolinkswitches:
             for i in range(0, len(self.s) - 1):
                 self.net.addLink(self.s[i], self.s[i + 1])
             self.s.append(self.net.addSwitch('s%d' % i))
         # if specified, chain all switches
         if autolinkswitches:
             for i in range(0, len(self.s) - 1):
                 self.net.addLink(self.s[i], self.s[i + 1])
-            self.net.addLink(self.s[2], self.s[0]) # link switches s1, s2 and s3
+            # link switches s1, s2 and s3
+            self.net.addLink(self.s[2], self.s[0])
 
         # add some data centers
         for i in range(0, ndatacenter):
 
         # add some data centers
         for i in range(0, ndatacenter):
@@ -89,7 +84,8 @@ class ApiBaseOpenStack(unittest.TestCase):
                 self.net.addDatacenter(
                     'dc%d' % i,
                     metadata={"unittest_dc": i}))
                 self.net.addDatacenter(
                     'dc%d' % i,
                     metadata={"unittest_dc": i}))
-        self.net.addLink(self.dc[0].switch, self.s[0])  # link switches dc0.s1 with s1
+        # link switches dc0.s1 with s1
+        self.net.addLink(self.dc[0].switch, self.s[0])
         # connect data centers to the endpoint
         for i in range(0, ndatacenter):
             self.api[i].connect_datacenter(self.dc[i])
         # connect data centers to the endpoint
         for i in range(0, ndatacenter):
             self.api[i].connect_datacenter(self.dc[i])
@@ -99,7 +95,8 @@ class ApiBaseOpenStack(unittest.TestCase):
             self.h.append(self.net.addHost('h%d' % i))
         # add some dockers
         for i in range(0, ndockers):
             self.h.append(self.net.addHost('h%d' % i))
         # add some dockers
         for i in range(0, ndockers):
-            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
+            self.d.append(self.net.addDocker('d%d' %
+                                             i, dimage="ubuntu:trusty"))
 
     def startApi(self):
         for i in self.api:
 
     def startApi(self):
         for i in self.api:
@@ -129,25 +126,26 @@ class ApiBaseOpenStack(unittest.TestCase):
         """
         List the containers managed by containernet
         """
         """
         List the containers managed by containernet
         """
-        return self.getDockerCli().containers(filters={"label": "com.containernet"})
+        return self.getDockerCli().containers(
+            filters={"label": "com.containernet"})
 
     @staticmethod
     def setUp():
         pass
 
 
     @staticmethod
     def setUp():
         pass
 
-
     def tearDown(self):
         time.sleep(2)
         print('->>>>>>> tear everything down ->>>>>>>>>>>>>>>')
     def tearDown(self):
         time.sleep(2)
         print('->>>>>>> tear everything down ->>>>>>>>>>>>>>>')
-        self.stopApi() # stop all flask threads
-        self.stopNet() # stop some mininet and containernet stuff
+        self.stopApi()  # stop all flask threads
+        self.stopNet()  # stop some mininet and containernet stuff
         cleanup()
         # make sure that all pending docker containers are killed
         cleanup()
         # make sure that all pending docker containers are killed
-        with open(os.devnull, 'w') as devnull: # kill a possibly running docker process that blocks the open ports
+        # kill a possibly running docker process that blocks the open ports
+        with open(os.devnull, 'w') as devnull:
             subprocess.call("kill $(netstat -npl | grep '15000' | grep -o -e'[0-9]\+/docker' | grep -o -e '[0-9]\+')",
             subprocess.call("kill $(netstat -npl | grep '15000' | grep -o -e'[0-9]\+/docker' | grep -o -e '[0-9]\+')",
-                stdout=devnull,
-                stderr=devnull,
-                shell=True)
+                            stdout=devnull,
+                            stderr=devnull,
+                            shell=True)
 
         with open(os.devnull, 'w') as devnull:
             subprocess.call(
 
         with open(os.devnull, 'w') as devnull:
             subprocess.call(
@@ -156,8 +154,3 @@ class ApiBaseOpenStack(unittest.TestCase):
                 stderr=devnull,
                 shell=True)
         time.sleep(2)
                 stderr=devnull,
                 shell=True)
         time.sleep(2)
-
-
-
-
-
index 397e2a1..6221765 100755 (executable)
@@ -1,35 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Helper module that implements helpers for test implementations.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import unittest
 import os
 import subprocess
 import unittest
 import os
 import subprocess
@@ -38,6 +31,7 @@ from emuvim.dcemulator.net import DCNetwork
 from mininet.clean import cleanup
 from mininet.node import Controller
 
 from mininet.clean import cleanup
 from mininet.node import Controller
 
+
 class SimpleTestTopology(unittest.TestCase):
     """
         Helper class to do basic test setups.
 class SimpleTestTopology(unittest.TestCase):
     """
         Helper class to do basic test setups.
@@ -70,7 +64,7 @@ class SimpleTestTopology(unittest.TestCase):
         # add some switches
         # start from s1 because ovs does not like to have dpid = 0
         # and switch name-number is being used by mininet to set the dpid
         # add some switches
         # start from s1 because ovs does not like to have dpid = 0
         # and switch name-number is being used by mininet to set the dpid
-        for i in range(1, nswitches+1):
+        for i in range(1, nswitches + 1):
             self.s.append(self.net.addSwitch('s%d' % i))
         # if specified, chain all switches
         if autolinkswitches:
             self.s.append(self.net.addSwitch('s%d' % i))
         # if specified, chain all switches
         if autolinkswitches:
@@ -87,7 +81,8 @@ class SimpleTestTopology(unittest.TestCase):
             self.h.append(self.net.addHost('h%d' % i))
         # add some dockers
         for i in range(0, ndockers):
             self.h.append(self.net.addHost('h%d' % i))
         # add some dockers
         for i in range(0, ndockers):
-            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
+            self.d.append(self.net.addDocker('d%d' %
+                                             i, dimage="ubuntu:trusty"))
 
     def startNet(self):
         self.net.start()
 
     def startNet(self):
         self.net.start()
@@ -108,7 +103,8 @@ class SimpleTestTopology(unittest.TestCase):
         """
         List the containers managed by containernet
         """
         """
         List the containers managed by containernet
         """
-        return self.getDockerCli().containers(filters={"label": "com.containernet"})
+        return self.getDockerCli().containers(
+            filters={"label": "com.containernet"})
 
     @staticmethod
     def setUp():
 
     @staticmethod
     def setUp():
@@ -123,4 +119,4 @@ class SimpleTestTopology(unittest.TestCase):
                 "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
                 stdout=devnull,
                 stderr=devnull,
                 "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
                 stdout=devnull,
                 stderr=devnull,
-                shell=True)
\ No newline at end of file
+                shell=True)
index 7e60065..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 7e60065..d888119 100755 (executable)
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
index 94db55f..fc06a69 100755 (executable)
@@ -1,47 +1,36 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-Test suite to automatically test emulator functionalities.
-Directly interacts with the emulator through the Mininet-like
-Python API.
-
-Does not test API endpoints. This is done in separated test suites.
-"""
-
-import time
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import unittest
 from emuvim.dcemulator.node import EmulatorCompute
 from emuvim.test.base import SimpleTestTopology
 from mininet.node import RemoteController
 
 
 import unittest
 from emuvim.dcemulator.node import EmulatorCompute
 from emuvim.test.base import SimpleTestTopology
 from mininet.node import RemoteController
 
 
-#@unittest.skip("disabled topology tests for development")
-class testEmulatorTopology( SimpleTestTopology ):
+# @unittest.skip("disabled topology tests for development")
+class testEmulatorTopology(SimpleTestTopology):
     """
     Tests to check the topology API of the emulator.
     """
     """
     Tests to check the topology API of the emulator.
     """
@@ -68,7 +57,7 @@ class testEmulatorTopology( SimpleTestTopology ):
         # stop Mininet network
         self.stopNet()
 
         # stop Mininet network
         self.stopNet()
 
-    #@unittest.skip("disabled to test if CI fails because this is the first test.")
+    # @unittest.skip("disabled to test if CI fails because this is the first test.")
     def testMultipleDatacenterDirect(self):
         """
         Create a two data centers and interconnect them.
     def testMultipleDatacenterDirect(self):
         """
         Create a two data centers and interconnect them.
@@ -115,7 +104,8 @@ class testEmulatorTopology( SimpleTestTopology ):
         # stop Mininet network
         self.stopNet()
 
         # stop Mininet network
         self.stopNet()
 
-class testEmulatorNetworking( SimpleTestTopology ):
+
+class testEmulatorNetworking(SimpleTestTopology):
 
     def testSDNChainingSingleService_withLearning(self):
         """
 
     def testSDNChainingSingleService_withLearning(self):
         """
@@ -137,8 +127,10 @@ class testEmulatorNetworking( SimpleTestTopology ):
         self.startNet()
 
         # add compute resources
         self.startNet()
 
         # add compute resources
-        vnf1 = self.dc[0].startCompute("vnf1", network=[{'id':'intf1', 'ip':'10.0.10.1/24'}])
-        vnf2 = self.dc[1].startCompute("vnf2", network=[{'id':'intf2', 'ip':'10.0.10.2/24'}])
+        vnf1 = self.dc[0].startCompute(
+            "vnf1", network=[{'id': 'intf1', 'ip': '10.0.10.1/24'}])
+        vnf2 = self.dc[1].startCompute(
+            "vnf2", network=[{'id': 'intf2', 'ip': '10.0.10.2/24'}])
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 2)
         self.assertTrue(len(self.net.hosts) == 2)
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 2)
         self.assertTrue(len(self.net.hosts) == 2)
@@ -162,7 +154,8 @@ class testEmulatorNetworking( SimpleTestTopology ):
         # should be connected because learning = True
         self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
         # setup links
         # should be connected because learning = True
         self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
         # setup links
-        self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2', bidirectional=True, cmd='add-flow')
+        self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
+                          bidirectional=True, cmd='add-flow')
         # should still be connected
         self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
         # stop Mininet network
         # should still be connected
         self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
         # stop Mininet network
@@ -188,8 +181,10 @@ class testEmulatorNetworking( SimpleTestTopology ):
         self.startNet()
 
         # add compute resources
         self.startNet()
 
         # add compute resources
-        vnf1 = self.dc[0].startCompute("vnf1", network=[{'id':'intf1', 'ip':'10.0.10.1/24'}])
-        vnf2 = self.dc[1].startCompute("vnf2", network=[{'id':'intf2', 'ip':'10.0.10.2/24'}])
+        vnf1 = self.dc[0].startCompute(
+            "vnf1", network=[{'id': 'intf1', 'ip': '10.0.10.1/24'}])
+        vnf2 = self.dc[1].startCompute(
+            "vnf2", network=[{'id': 'intf2', 'ip': '10.0.10.2/24'}])
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 2)
         self.assertTrue(len(self.net.hosts) == 2)
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 2)
         self.assertTrue(len(self.net.hosts) == 2)
@@ -213,7 +208,8 @@ class testEmulatorNetworking( SimpleTestTopology ):
         # should be not not yet connected
         self.assertTrue(self.net.ping([vnf1, vnf2]) > 0.0)
         # setup links
         # should be not not yet connected
         self.assertTrue(self.net.ping([vnf1, vnf2]) > 0.0)
         # setup links
-        self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2', bidirectional=True, cmd='add-flow')
+        self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
+                          bidirectional=True, cmd='add-flow')
         # check connectivity by using ping
         self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
         # stop Mininet network
         # check connectivity by using ping
         self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
         # stop Mininet network
@@ -239,19 +235,24 @@ class testEmulatorNetworking( SimpleTestTopology ):
         # start Mininet network
         self.startNet()
 
         # start Mininet network
         self.startNet()
 
-        ## First Service
+        # First Service
         # add compute resources
         # add compute resources
-        vnf1 = self.dc[0].startCompute("vnf1", network=[{'id': 'intf1', 'ip': '10.0.10.1/24'}])
-        vnf2 = self.dc[1].startCompute("vnf2", network=[{'id': 'intf2', 'ip': '10.0.10.2/24'}])
+        vnf1 = self.dc[0].startCompute(
+            "vnf1", network=[{'id': 'intf1', 'ip': '10.0.10.1/24'}])
+        vnf2 = self.dc[1].startCompute(
+            "vnf2", network=[{'id': 'intf2', 'ip': '10.0.10.2/24'}])
         # setup links
         # setup links
-        self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2', bidirectional=True, cmd='add-flow', cookie=1)
+        self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
+                          bidirectional=True, cmd='add-flow', cookie=1)
         # check connectivity by using ping
         self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
 
         # check connectivity by using ping
         self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
 
-        ## Second Service
+        # Second Service
         # add compute resources
         # add compute resources
-        vnf11 = self.dc[0].startCompute("vnf11", network=[{'id': 'intf1', 'ip': '10.0.20.1/24'}])
-        vnf22 = self.dc[1].startCompute("vnf22", network=[{'id': 'intf2', 'ip': '10.0.20.2/24'}])
+        vnf11 = self.dc[0].startCompute(
+            "vnf11", network=[{'id': 'intf1', 'ip': '10.0.20.1/24'}])
+        vnf22 = self.dc[1].startCompute(
+            "vnf22", network=[{'id': 'intf2', 'ip': '10.0.20.2/24'}])
 
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 4)
 
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 4)
@@ -259,7 +260,8 @@ class testEmulatorNetworking( SimpleTestTopology ):
         self.assertTrue(len(self.net.switches) == 5)
 
         # setup links
         self.assertTrue(len(self.net.switches) == 5)
 
         # setup links
-        self.net.setChain('vnf11', 'vnf22', 'intf1', 'intf2', bidirectional=True, cmd='add-flow', cookie=2)
+        self.net.setChain('vnf11', 'vnf22', 'intf1', 'intf2',
+                          bidirectional=True, cmd='add-flow', cookie=2)
         # check connectivity by using ping
         self.assertTrue(self.net.ping([vnf11, vnf22]) <= 0.0)
         # check first service cannot ping second service
         # check connectivity by using ping
         self.assertTrue(self.net.ping([vnf11, vnf22]) <= 0.0)
         # check first service cannot ping second service
@@ -267,18 +269,21 @@ class testEmulatorNetworking( SimpleTestTopology ):
         self.assertTrue(self.net.ping([vnf2, vnf11]) > 0.0)
 
         # delete the first service chain
         self.assertTrue(self.net.ping([vnf2, vnf11]) > 0.0)
 
         # delete the first service chain
-        self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2', bidirectional=True, cmd='del-flows', cookie=1)
+        self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
+                          bidirectional=True, cmd='del-flows', cookie=1)
         # check connectivity of first service is down
         self.assertTrue(self.net.ping([vnf1, vnf2]) > 0.0)
         # check connectivity of first service is down
         self.assertTrue(self.net.ping([vnf1, vnf2]) > 0.0)
-        #time.sleep(100)
+        # time.sleep(100)
         # check connectivity of second service is still up
         self.assertTrue(self.net.ping([vnf11, vnf22]) <= 0.0)
 
         # stop Mininet network
         self.stopNet()
 
         # check connectivity of second service is still up
         self.assertTrue(self.net.ping([vnf11, vnf22]) <= 0.0)
 
         # stop Mininet network
         self.stopNet()
 
-#@unittest.skip("disabled compute tests for development")
-class testEmulatorCompute( SimpleTestTopology ):
+# @unittest.skip("disabled compute tests for development")
+
+
+class testEmulatorCompute(SimpleTestTopology):
     """
     Tests to check the emulator's API to add and remove
     compute resources at runtime.
     """
     Tests to check the emulator's API to add and remove
     compute resources at runtime.
@@ -304,7 +309,8 @@ class testEmulatorCompute( SimpleTestTopology ):
         self.assertTrue(len(self.net.switches) == 1)
         # check compute list result
         self.assertTrue(len(self.dc[0].listCompute()) == 1)
         self.assertTrue(len(self.net.switches) == 1)
         # check compute list result
         self.assertTrue(len(self.dc[0].listCompute()) == 1)
-        self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+        self.assertTrue(isinstance(
+            self.dc[0].listCompute()[0], EmulatorCompute))
         self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
         # check connectivity by using ping
         self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
         self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
         # check connectivity by using ping
         self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
@@ -362,7 +368,8 @@ class testEmulatorCompute( SimpleTestTopology ):
         self.assertTrue(len(self.net.switches) == 1)
         # check compute list result
         self.assertTrue(len(self.dc[0].listCompute()) == 1)
         self.assertTrue(len(self.net.switches) == 1)
         # check compute list result
         self.assertTrue(len(self.dc[0].listCompute()) == 1)
-        self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+        self.assertTrue(isinstance(
+            self.dc[0].listCompute()[0], EmulatorCompute))
         self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
         # check connectivity by using ping
         self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
         self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
         # check connectivity by using ping
         self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
@@ -407,7 +414,7 @@ class testEmulatorCompute( SimpleTestTopology ):
         Test multiple, interleaved add and remove operations and ensure
         that always all expected compute instances are reachable.
         """
         Test multiple, interleaved add and remove operations and ensure
         that always all expected compute instances are reachable.
         """
-                # create network
+        # create network
         self.createNet(
             nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
             autolinkswitches=True)
         self.createNet(
             nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
             autolinkswitches=True)
@@ -455,5 +462,6 @@ class testEmulatorCompute( SimpleTestTopology ):
         # stop Mininet network
         self.stopNet()
 
         # stop Mininet network
         self.stopNet()
 
+
 if __name__ == '__main__':
     unittest.main()
 if __name__ == '__main__':
     unittest.main()
index 3e9037a..a9273fb 100755 (executable)
@@ -1,41 +1,33 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Test suite to automatically test emulator REST API endpoints.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import os
 import unittest
 import requests
 import simplejson as json
 import yaml
 import os
 import unittest
 import requests
 import simplejson as json
 import yaml
-import time
 
 from emuvim.test.api_base_openstack import ApiBaseOpenStack
 
 
 from emuvim.test.api_base_openstack import ApiBaseOpenStack
 
@@ -47,7 +39,8 @@ class testRestApi(ApiBaseOpenStack):
 
     def setUp(self):
         # create network
 
     def setUp(self):
         # create network
-        self.createNet(nswitches=3, ndatacenter=2, nhosts=2, ndockers=0, autolinkswitches=True)
+        self.createNet(nswitches=3, ndatacenter=2, nhosts=2,
+                       ndockers=0, autolinkswitches=True)
 
         # setup links
         self.net.addLink(self.dc[0], self.h[0])
 
         # setup links
         self.net.addLink(self.dc[0], self.h[0])
@@ -66,7 +59,8 @@ class testRestApi(ApiBaseOpenStack):
         print(" ")
 
         headers = {'Content-type': 'application/json'}
         print(" ")
 
         headers = {'Content-type': 'application/json'}
-        test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "templates/test_heatapi_template_create_stack.yml")).read()
+        test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(
+            __file__), "templates/test_heatapi_template_create_stack.yml")).read()
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
         requests.post(url, data=json.dumps(yaml.load(test_heatapi_template_create_stack)),
                       headers=headers)
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
         requests.post(url, data=json.dumps(yaml.load(test_heatapi_template_create_stack)),
                       headers=headers)
@@ -76,11 +70,16 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:18774/"
         listapiversionnovaresponse = requests.get(url, headers=headers)
         self.assertEqual(listapiversionnovaresponse.status_code, 200)
         url = "http://0.0.0.0:18774/"
         listapiversionnovaresponse = requests.get(url, headers=headers)
         self.assertEqual(listapiversionnovaresponse.status_code, 200)
-        self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["id"], "v2.1")
-        self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["status"], "CURRENT")
-        self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["version"], "2.38")
-        self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["min_version"], "2.1")
-        self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["updated"], "2013-07-23T11:33:21Z")
+        self.assertEqual(json.loads(listapiversionnovaresponse.content)[
+                         "versions"][0]["id"], "v2.1")
+        self.assertEqual(json.loads(listapiversionnovaresponse.content)[
+                         "versions"][0]["status"], "CURRENT")
+        self.assertEqual(json.loads(listapiversionnovaresponse.content)[
+                         "versions"][0]["version"], "2.38")
+        self.assertEqual(json.loads(listapiversionnovaresponse.content)[
+                         "versions"][0]["min_version"], "2.1")
+        self.assertEqual(json.loads(listapiversionnovaresponse.content)[
+                         "versions"][0]["updated"], "2013-07-23T11:33:21Z")
         print(" ")
 
         print('->>>>>>> test Nova Version Show ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Nova Version Show ->>>>>>>>>>>>>>>')
@@ -88,11 +87,16 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:18774/v2.1/id_bla"
         listapiversion21novaresponse = requests.get(url, headers=headers)
         self.assertEqual(listapiversion21novaresponse.status_code, 200)
         url = "http://0.0.0.0:18774/v2.1/id_bla"
         listapiversion21novaresponse = requests.get(url, headers=headers)
         self.assertEqual(listapiversion21novaresponse.status_code, 200)
-        self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["id"], "v2.1")
-        self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["status"], "CURRENT")
-        self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["version"], "2.38")
-        self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["min_version"], "2.1")
-        self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["updated"], "2013-07-23T11:33:21Z")
+        self.assertEqual(json.loads(listapiversion21novaresponse.content)[
+                         "version"]["id"], "v2.1")
+        self.assertEqual(json.loads(listapiversion21novaresponse.content)[
+                         "version"]["status"], "CURRENT")
+        self.assertEqual(json.loads(listapiversion21novaresponse.content)[
+                         "version"]["version"], "2.38")
+        self.assertEqual(json.loads(listapiversion21novaresponse.content)[
+                         "version"]["min_version"], "2.1")
+        self.assertEqual(json.loads(listapiversion21novaresponse.content)[
+                         "version"]["updated"], "2013-07-23T11:33:21Z")
         print(" ")
 
         print('->>>>>>> test Nova Version List Server APIs ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Nova Version List Server APIs ->>>>>>>>>>>>>>>')
@@ -100,12 +104,14 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
         listserverapisnovaresponse = requests.get(url, headers=headers)
         self.assertEqual(listserverapisnovaresponse.status_code, 200)
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
         listserverapisnovaresponse = requests.get(url, headers=headers)
         self.assertEqual(listserverapisnovaresponse.status_code, 200)
-        self.assertNotEqual(json.loads(listserverapisnovaresponse.content)["servers"][0]["name"], "")
+        self.assertNotEqual(json.loads(listserverapisnovaresponse.content)[
+                            "servers"][0]["name"], "")
         print(" ")
 
         print('->>>>>>> test Nova Delete Server APIs ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Nova Delete Server APIs ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (json.loads(listserverapisnovaresponse.content)["servers"][0]["id"])
+        url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (
+            json.loads(listserverapisnovaresponse.content)["servers"][0]["id"])
         deleteserverapisnovaresponse = requests.delete(url, headers=headers)
         self.assertEqual(deleteserverapisnovaresponse.status_code, 204)
         print(" ")
         deleteserverapisnovaresponse = requests.delete(url, headers=headers)
         self.assertEqual(deleteserverapisnovaresponse.status_code, 204)
         print(" ")
@@ -117,13 +123,13 @@ class testRestApi(ApiBaseOpenStack):
         self.assertEqual(deleteserverapisnovaresponse.status_code, 404)
         print(" ")
 
         self.assertEqual(deleteserverapisnovaresponse.status_code, 404)
         print(" ")
 
-
         print('->>>>>>> testNovaVersionListServerAPIs_withPortInformation ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers/andPorts"
         listserverapisnovaresponse = requests.get(url, headers=headers)
         self.assertEqual(listserverapisnovaresponse.status_code, 200)
         print('->>>>>>> testNovaVersionListServerAPIs_withPortInformation ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers/andPorts"
         listserverapisnovaresponse = requests.get(url, headers=headers)
         self.assertEqual(listserverapisnovaresponse.status_code, 200)
-        self.assertNotEqual(json.loads(listserverapisnovaresponse.content)["servers"][0]["name"], "")
+        self.assertNotEqual(json.loads(listserverapisnovaresponse.content)[
+                            "servers"][0]["name"], "")
         print(" ")
 
         print('->>>>>>> test Nova List Flavors ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Nova List Flavors ->>>>>>>>>>>>>>>')
@@ -131,9 +137,12 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:18774/v2.1/id_bla/flavors"
         listflavorsresponse = requests.get(url, headers=headers)
         self.assertEqual(listflavorsresponse.status_code, 200)
         url = "http://0.0.0.0:18774/v2.1/id_bla/flavors"
         listflavorsresponse = requests.get(url, headers=headers)
         self.assertEqual(listflavorsresponse.status_code, 200)
-        self.assertIn(json.loads(listflavorsresponse.content)["flavors"][0]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
-        self.assertIn(json.loads(listflavorsresponse.content)["flavors"][1]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
-        self.assertIn(json.loads(listflavorsresponse.content)["flavors"][2]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+        self.assertIn(json.loads(listflavorsresponse.content)["flavors"][0]["name"], [
+                      "m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+        self.assertIn(json.loads(listflavorsresponse.content)["flavors"][1]["name"], [
+                      "m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+        self.assertIn(json.loads(listflavorsresponse.content)["flavors"][2]["name"], [
+                      "m1.nano", "m1.tiny", "m1.micro", "m1.small"])
         print(" ")
 
         print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')
@@ -143,8 +152,10 @@ class testRestApi(ApiBaseOpenStack):
                                            data='{"flavor":{"name": "testFlavor", "vcpus": "test_vcpus", "ram": 1024, "disk": 10}}',
                                            headers=headers)
         self.assertEqual(addflavorsresponse.status_code, 200)
                                            data='{"flavor":{"name": "testFlavor", "vcpus": "test_vcpus", "ram": 1024, "disk": 10}}',
                                            headers=headers)
         self.assertEqual(addflavorsresponse.status_code, 200)
-        self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["id"])
-        self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["links"][0]['href'])
+        self.assertIsNotNone(json.loads(
+            addflavorsresponse.content)["flavor"]["id"])
+        self.assertIsNotNone(json.loads(addflavorsresponse.content)[
+                             "flavor"]["links"][0]['href'])
         print(" ")
 
         print('->>>>>>> test Nova List Flavors Detail ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Nova List Flavors Detail ->>>>>>>>>>>>>>>')
@@ -152,9 +163,12 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/detail"
         listflavorsdetailresponse = requests.get(url, headers=headers)
         self.assertEqual(listflavorsdetailresponse.status_code, 200)
         url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/detail"
         listflavorsdetailresponse = requests.get(url, headers=headers)
         self.assertEqual(listflavorsdetailresponse.status_code, 200)
-        self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][0]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
-        self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][1]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
-        self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][2]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+        self.assertIn(json.loads(listflavorsdetailresponse.content)[
+                      "flavors"][0]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+        self.assertIn(json.loads(listflavorsdetailresponse.content)[
+                      "flavors"][1]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+        self.assertIn(json.loads(listflavorsdetailresponse.content)[
+                      "flavors"][2]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
         print(" ")
 
         print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')
@@ -164,17 +178,21 @@ class testRestApi(ApiBaseOpenStack):
                                            data='{"flavor":{"name": "testFlavor", "vcpus": "test_vcpus", "ram": 1024, "disk": 10}}',
                                            headers=headers)
         self.assertEqual(addflavorsresponse.status_code, 200)
                                            data='{"flavor":{"name": "testFlavor", "vcpus": "test_vcpus", "ram": 1024, "disk": 10}}',
                                            headers=headers)
         self.assertEqual(addflavorsresponse.status_code, 200)
-        self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["id"])
-        self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["links"][0]['href'])
+        self.assertIsNotNone(json.loads(
+            addflavorsresponse.content)["flavor"]["id"])
+        self.assertIsNotNone(json.loads(addflavorsresponse.content)[
+                             "flavor"]["links"][0]['href'])
         print(" ")
 
         print('->>>>>>> test Nova List Flavor By Id ->>>>>>>>>>>>>>>')
 
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Nova List Flavor By Id ->>>>>>>>>>>>>>>')
 
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/%s" % (json.loads(listflavorsdetailresponse.content)["flavors"][0]["name"])
+        url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/%s" % (
+            json.loads(listflavorsdetailresponse.content)["flavors"][0]["name"])
         listflavorsbyidresponse = requests.get(url, headers=headers)
         self.assertEqual(listflavorsbyidresponse.status_code, 200)
         listflavorsbyidresponse = requests.get(url, headers=headers)
         self.assertEqual(listflavorsbyidresponse.status_code, 200)
-        self.assertEqual(json.loads(listflavorsbyidresponse.content)["flavor"]["id"], json.loads(listflavorsdetailresponse.content)["flavors"][0]["id"])
+        self.assertEqual(json.loads(listflavorsbyidresponse.content)[
+                         "flavor"]["id"], json.loads(listflavorsdetailresponse.content)["flavors"][0]["id"])
         print(" ")
 
         print('->>>>>>> test Nova List Images ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Nova List Images ->>>>>>>>>>>>>>>')
@@ -183,10 +201,6 @@ class testRestApi(ApiBaseOpenStack):
         listimagesresponse = requests.get(url, headers=headers)
         self.assertEqual(listimagesresponse.status_code, 200)
         print(listimagesresponse.content)
         listimagesresponse = requests.get(url, headers=headers)
         self.assertEqual(listimagesresponse.status_code, 200)
         print(listimagesresponse.content)
-        # deactivated: highly depends on the environment in which the tests are executed. one cannot make such an assumption.
-        #self.assertIn(json.loads(listimagesresponse.content)["images"][0]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
-        #self.assertIn(json.loads(listimagesresponse.content)["images"][1]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
-        #self.assertIn(json.loads(listimagesresponse.content)["images"][2]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
         print(" ")
 
         print('->>>>>>> test Nova List Images Details ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Nova List Images Details ->>>>>>>>>>>>>>>')
@@ -194,19 +208,18 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:18774/v2.1/id_bla/images/detail"
         listimagesdetailsresponse = requests.get(url, headers=headers)
         self.assertEqual(listimagesdetailsresponse.status_code, 200)
         url = "http://0.0.0.0:18774/v2.1/id_bla/images/detail"
         listimagesdetailsresponse = requests.get(url, headers=headers)
         self.assertEqual(listimagesdetailsresponse.status_code, 200)
-        # deactivated: highly depends on the environment in which the tests are executed. one cannot make such an assumption.
-        #self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][0]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
-        #self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][1]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
-        #self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][2]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
-        self.assertEqual(json.loads(listimagesdetailsresponse.content)["images"][0]["metadata"]["architecture"],"x86_64")
+        self.assertEqual(json.loads(listimagesdetailsresponse.content)[
+                         "images"][0]["metadata"]["architecture"], "x86_64")
         print(" ")
 
         print('->>>>>>> test Nova List Image By Id ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Nova List Image By Id ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:18774/v2.1/id_bla/images/%s" % (json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
+        url = "http://0.0.0.0:18774/v2.1/id_bla/images/%s" % (
+            json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
         listimagebyidresponse = requests.get(url, headers=headers)
         self.assertEqual(listimagebyidresponse.status_code, 200)
         listimagebyidresponse = requests.get(url, headers=headers)
         self.assertEqual(listimagebyidresponse.status_code, 200)
-        self.assertEqual(json.loads(listimagebyidresponse.content)["image"]["id"],json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
+        self.assertEqual(json.loads(listimagebyidresponse.content)[
+                         "image"]["id"], json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
         print(" ")
 
         print('->>>>>>> test Nova List Image By Non-Existend Id ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Nova List Image By Non-Existend Id ->>>>>>>>>>>>>>>')
@@ -216,7 +229,7 @@ class testRestApi(ApiBaseOpenStack):
         self.assertEqual(listimagebynonexistingidresponse.status_code, 404)
         print(" ")
 
         self.assertEqual(listimagebynonexistingidresponse.status_code, 404)
         print(" ")
 
-        #find ubuntu id
+        # find ubuntu id
         for image in json.loads(listimagesresponse.content)["images"]:
             if image["name"] == "ubuntu:trusty":
                 ubuntu_image_id = image["id"]
         for image in json.loads(listimagesresponse.content)["images"]:
             if image["name"] == "ubuntu:trusty":
                 ubuntu_image_id = image["id"]
@@ -224,16 +237,19 @@ class testRestApi(ApiBaseOpenStack):
         print('->>>>>>> test Nova Create Server Instance ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
         print('->>>>>>> test Nova Create Server Instance ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
-        data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
+        data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (
+            json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
         createserverinstance = requests.post(url, data=data, headers=headers)
         self.assertEqual(createserverinstance.status_code, 200)
         createserverinstance = requests.post(url, data=data, headers=headers)
         self.assertEqual(createserverinstance.status_code, 200)
-        self.assertEqual(json.loads(createserverinstance.content)["server"]["image"]["id"], ubuntu_image_id)
+        self.assertEqual(json.loads(createserverinstance.content)[
+                         "server"]["image"]["id"], ubuntu_image_id)
         print(" ")
 
         print('->>>>>>> test Nova Create Server Instance With Already Existing Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
         print(" ")
 
         print('->>>>>>> test Nova Create Server Instance With Already Existing Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
-        data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
+        data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (
+            json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
         createserverinstance = requests.post(url, data=data, headers=headers)
         self.assertEqual(createserverinstance.status_code, 409)
         print(" ")
         createserverinstance = requests.post(url, data=data, headers=headers)
         self.assertEqual(createserverinstance.status_code, 409)
         print(" ")
@@ -243,21 +259,25 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers/detail"
         listserverapisdetailedresponse = requests.get(url, headers=headers)
         self.assertEqual(listserverapisdetailedresponse.status_code, 200)
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers/detail"
         listserverapisdetailedresponse = requests.get(url, headers=headers)
         self.assertEqual(listserverapisdetailedresponse.status_code, 200)
-        self.assertEqual(json.loads(listserverapisdetailedresponse.content)["servers"][0]["status"], "ACTIVE")
+        self.assertEqual(json.loads(listserverapisdetailedresponse.content)[
+                         "servers"][0]["status"], "ACTIVE")
         print(" ")
 
         print('->>>>>>> test Nova Show Server Details ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Nova Show Server Details ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (json.loads(listserverapisdetailedresponse.content)["servers"][0]["id"])
+        url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (
+            json.loads(listserverapisdetailedresponse.content)["servers"][0]["id"])
         listserverdetailsresponse = requests.get(url, headers=headers)
         self.assertEqual(listserverdetailsresponse.status_code, 200)
         listserverdetailsresponse = requests.get(url, headers=headers)
         self.assertEqual(listserverdetailsresponse.status_code, 200)
-        self.assertEqual(json.loads(listserverdetailsresponse.content)["server"]["flavor"]["links"][0]["rel"], "bookmark")
+        self.assertEqual(json.loads(listserverdetailsresponse.content)[
+                         "server"]["flavor"]["links"][0]["rel"], "bookmark")
         print(" ")
 
         print('->>>>>>> test Nova Show Non-Existing Server Details ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers/non_existing_server_id"
         print(" ")
 
         print('->>>>>>> test Nova Show Non-Existing Server Details ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18774/v2.1/id_bla/servers/non_existing_server_id"
-        listnonexistingserverdetailsresponse = requests.get(url, headers=headers)
+        listnonexistingserverdetailsresponse = requests.get(
+            url, headers=headers)
         self.assertEqual(listnonexistingserverdetailsresponse.status_code, 404)
         print(" ")
 
         self.assertEqual(listnonexistingserverdetailsresponse.status_code, 404)
         print(" ")
 
@@ -267,9 +287,11 @@ class testRestApi(ApiBaseOpenStack):
         print(" ")
 
         headers = {'Content-type': 'application/json'}
         print(" ")
 
         headers = {'Content-type': 'application/json'}
-        test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "templates/test_heatapi_template_create_stack.yml")).read()
+        test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(
+            __file__), "templates/test_heatapi_template_create_stack.yml")).read()
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
-        requests.post(url, data=json.dumps(yaml.load(test_heatapi_template_create_stack)), headers=headers)
+        requests.post(url, data=json.dumps(
+            yaml.load(test_heatapi_template_create_stack)), headers=headers)
         # test_heatapi_keystone_get_token = open("test_heatapi_keystone_get_token.json").read()
 
         print('->>>>>>> test Neutron List Versions ->>>>>>>>>>>>>>>')
         # test_heatapi_keystone_get_token = open("test_heatapi_keystone_get_token.json").read()
 
         print('->>>>>>> test Neutron List Versions ->>>>>>>>>>>>>>>')
@@ -277,7 +299,8 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:19696/"
         listapiversionstackresponse = requests.get(url, headers=headers)
         self.assertEqual(listapiversionstackresponse.status_code, 200)
         url = "http://0.0.0.0:19696/"
         listapiversionstackresponse = requests.get(url, headers=headers)
         self.assertEqual(listapiversionstackresponse.status_code, 200)
-        self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"][0]["id"], "v2.0")
+        self.assertEqual(json.loads(listapiversionstackresponse.content)[
+                         "versions"][0]["id"], "v2.0")
         print(" ")
 
         print('->>>>>>> test Neutron Show API v2.0 ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron Show API v2.0 ->>>>>>>>>>>>>>>')
@@ -285,9 +308,12 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:19696/v2.0"
         listapiversionv20response = requests.get(url, headers=headers)
         self.assertEqual(listapiversionv20response.status_code, 200)
         url = "http://0.0.0.0:19696/v2.0"
         listapiversionv20response = requests.get(url, headers=headers)
         self.assertEqual(listapiversionv20response.status_code, 200)
-        self.assertEqual(json.loads(listapiversionv20response.content)["resources"][0]["name"], "subnet")
-        self.assertEqual(json.loads(listapiversionv20response.content)["resources"][1]["name"], "network")
-        self.assertEqual(json.loads(listapiversionv20response.content)["resources"][2]["name"], "ports")
+        self.assertEqual(json.loads(listapiversionv20response.content)[
+                         "resources"][0]["name"], "subnet")
+        self.assertEqual(json.loads(listapiversionv20response.content)[
+                         "resources"][1]["name"], "network")
+        self.assertEqual(json.loads(listapiversionv20response.content)[
+                         "resources"][2]["name"], "ports")
         print(" ")
 
         print('->>>>>>> test Neutron List Networks ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron List Networks ->>>>>>>>>>>>>>>')
@@ -295,50 +321,62 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:19696/v2.0/networks"
         listnetworksesponse1 = requests.get(url, headers=headers)
         self.assertEqual(listnetworksesponse1.status_code, 200)
         url = "http://0.0.0.0:19696/v2.0/networks"
         listnetworksesponse1 = requests.get(url, headers=headers)
         self.assertEqual(listnetworksesponse1.status_code, 200)
-        self.assertEqual(json.loads(listnetworksesponse1.content)["networks"][0]["status"], "ACTIVE")
-        listNetworksId = json.loads(listnetworksesponse1.content)["networks"][0]["id"]
-        listNetworksName = json.loads(listnetworksesponse1.content)["networks"][0]["name"]
-        listNetworksId2 = json.loads(listnetworksesponse1.content)["networks"][1]["id"]
+        self.assertEqual(json.loads(listnetworksesponse1.content)[
+                         "networks"][0]["status"], "ACTIVE")
+        listNetworksId = json.loads(listnetworksesponse1.content)[
+            "networks"][0]["id"]
+        listNetworksName = json.loads(listnetworksesponse1.content)[
+            "networks"][0]["name"]
+        listNetworksId2 = json.loads(listnetworksesponse1.content)[
+            "networks"][1]["id"]
         print(" ")
 
         print('->>>>>>> test Neutron List Non-Existing Networks ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/networks?name=non_existent_network_name"
         print(" ")
 
         print('->>>>>>> test Neutron List Non-Existing Networks ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/networks?name=non_existent_network_name"
-        listnetworksesponse2 = requests.get(url,headers=headers)
+        listnetworksesponse2 = requests.get(url, headers=headers)
         self.assertEqual(listnetworksesponse2.status_code, 404)
         print(" ")
 
         print('->>>>>>> test Neutron List Networks By Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         self.assertEqual(listnetworksesponse2.status_code, 404)
         print(" ")
 
         print('->>>>>>> test Neutron List Networks By Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:19696/v2.0/networks?name=" + listNetworksName #tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
+        # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
+        url = "http://0.0.0.0:19696/v2.0/networks?name=" + listNetworksName
         listnetworksesponse3 = requests.get(url, headers=headers)
         self.assertEqual(listnetworksesponse3.status_code, 200)
         listnetworksesponse3 = requests.get(url, headers=headers)
         self.assertEqual(listnetworksesponse3.status_code, 200)
-        self.assertEqual(json.loads(listnetworksesponse3.content)["networks"][0]["name"], listNetworksName)
+        self.assertEqual(json.loads(listnetworksesponse3.content)[
+                         "networks"][0]["name"], listNetworksName)
         print(" ")
 
         print('->>>>>>> test Neutron List Networks By Id ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron List Networks By Id ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId  # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
+        # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
+        url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId
         listnetworksesponse4 = requests.get(url, headers=headers)
         self.assertEqual(listnetworksesponse4.status_code, 200)
         listnetworksesponse4 = requests.get(url, headers=headers)
         self.assertEqual(listnetworksesponse4.status_code, 200)
-        self.assertEqual(json.loads(listnetworksesponse4.content)["networks"][0]["id"], listNetworksId)
+        self.assertEqual(json.loads(listnetworksesponse4.content)[
+                         "networks"][0]["id"], listNetworksId)
         print(" ")
 
         print('->>>>>>> test Neutron List Networks By Multiple Ids ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron List Networks By Multiple Ids ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId + "&id="+ listNetworksId2 # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
+        url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId + "&id=" + \
+            listNetworksId2  # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
         listnetworksesponse5 = requests.get(url, headers=headers)
         self.assertEqual(listnetworksesponse5.status_code, 200)
         listnetworksesponse5 = requests.get(url, headers=headers)
         self.assertEqual(listnetworksesponse5.status_code, 200)
-        self.assertEqual(json.loads(listnetworksesponse5.content)["networks"][0]["id"], listNetworksId)
-        self.assertEqual(json.loads(listnetworksesponse5.content)["networks"][1]["id"], listNetworksId2)
+        self.assertEqual(json.loads(listnetworksesponse5.content)[
+                         "networks"][0]["id"], listNetworksId)
+        self.assertEqual(json.loads(listnetworksesponse5.content)[
+                         "networks"][1]["id"], listNetworksId2)
         print(" ")
 
         print('->>>>>>> test Neutron Show Network ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron Show Network ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:19696/v2.0/networks/"+listNetworksId
+        url = "http://0.0.0.0:19696/v2.0/networks/" + listNetworksId
         shownetworksesponse = requests.get(url, headers=headers)
         self.assertEqual(shownetworksesponse.status_code, 200)
         shownetworksesponse = requests.get(url, headers=headers)
         self.assertEqual(shownetworksesponse.status_code, 200)
-        self.assertEqual(json.loads(shownetworksesponse.content)["network"]["status"], "ACTIVE")
+        self.assertEqual(json.loads(shownetworksesponse.content)[
+                         "network"]["status"], "ACTIVE")
         print(" ")
 
         print('->>>>>>> test Neutron Show Network Non-ExistendNetwork ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron Show Network Non-ExistendNetwork ->>>>>>>>>>>>>>>')
@@ -351,31 +389,39 @@ class testRestApi(ApiBaseOpenStack):
         print('->>>>>>> test Neutron Create Network ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/networks"
         print('->>>>>>> test Neutron Create Network ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/networks"
-        createnetworkresponse = requests.post(url, data='{"network": {"name": "sample_network","admin_state_up": true}}', headers=headers)
+        createnetworkresponse = requests.post(
+            url, data='{"network": {"name": "sample_network","admin_state_up": true}}', headers=headers)
         self.assertEqual(createnetworkresponse.status_code, 201)
         self.assertEqual(createnetworkresponse.status_code, 201)
-        self.assertEqual(json.loads(createnetworkresponse.content)["network"]["status"], "ACTIVE")
+        self.assertEqual(json.loads(createnetworkresponse.content)[
+                         "network"]["status"], "ACTIVE")
         print(" ")
 
         print('->>>>>>> test Neutron Create Network With Existing Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/networks"
         print(" ")
 
         print('->>>>>>> test Neutron Create Network With Existing Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/networks"
-        createnetworkresponsefailure = requests.post(url,data='{"network": {"name": "sample_network","admin_state_up": true}}',headers=headers)
+        createnetworkresponsefailure = requests.post(
+            url, data='{"network": {"name": "sample_network","admin_state_up": true}}', headers=headers)
         self.assertEqual(createnetworkresponsefailure.status_code, 400)
         print(" ")
 
         print('->>>>>>> test Neutron Update Network ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         self.assertEqual(createnetworkresponsefailure.status_code, 400)
         print(" ")
 
         print('->>>>>>> test Neutron Update Network ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:19696/v2.0/networks/%s" % (json.loads(createnetworkresponse.content)["network"]["id"])
-        updatenetworkresponse = requests.put(url, data='{"network": {"status": "ACTIVE", "admin_state_up":true, "tenant_id":"abcd123", "name": "sample_network_new_name", "shared":false}}' , headers=headers)
+        url = "http://0.0.0.0:19696/v2.0/networks/%s" % (
+            json.loads(createnetworkresponse.content)["network"]["id"])
+        updatenetworkresponse = requests.put(
+            url, data='{"network": {"status": "ACTIVE", "admin_state_up":true, "tenant_id":"abcd123", "name": "sample_network_new_name", "shared":false}}', headers=headers)
         self.assertEqual(updatenetworkresponse.status_code, 200)
         self.assertEqual(updatenetworkresponse.status_code, 200)
-        self.assertEqual(json.loads(updatenetworkresponse.content)["network"]["name"], "sample_network_new_name")
-        self.assertEqual(json.loads(updatenetworkresponse.content)["network"]["tenant_id"], "abcd123")
+        self.assertEqual(json.loads(updatenetworkresponse.content)[
+                         "network"]["name"], "sample_network_new_name")
+        self.assertEqual(json.loads(updatenetworkresponse.content)[
+                         "network"]["tenant_id"], "abcd123")
         print(" ")
 
         print('->>>>>>> test Neutron Update Non-Existing Network ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/networks/non-existing-name123"
         print(" ")
 
         print('->>>>>>> test Neutron Update Non-Existing Network ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/networks/non-existing-name123"
-        updatenetworkresponse = requests.put(url, data='{"network": {"name": "sample_network_new_name"}}', headers=headers)
+        updatenetworkresponse = requests.put(
+            url, data='{"network": {"name": "sample_network_new_name"}}', headers=headers)
         self.assertEqual(updatenetworkresponse.status_code, 404)
         print(" ")
 
         self.assertEqual(updatenetworkresponse.status_code, 404)
         print(" ")
 
@@ -383,19 +429,23 @@ class testRestApi(ApiBaseOpenStack):
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/subnets"
         listsubnetsresponse = requests.get(url, headers=headers)
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/subnets"
         listsubnetsresponse = requests.get(url, headers=headers)
-        listSubnetName = json.loads(listsubnetsresponse.content)["subnets"][0]["name"]
-        listSubnetId = json.loads(listsubnetsresponse.content)["subnets"][0]["id"]
-        listSubnetId2 = json.loads(listsubnetsresponse.content)["subnets"][1]["id"]
+        listSubnetName = json.loads(listsubnetsresponse.content)[
+            "subnets"][0]["name"]
+        listSubnetId = json.loads(listsubnetsresponse.content)[
+            "subnets"][0]["id"]
+        listSubnetId2 = json.loads(listsubnetsresponse.content)[
+            "subnets"][1]["id"]
         self.assertEqual(listsubnetsresponse.status_code, 200)
         self.assertNotIn('None', listSubnetName)
         print(" ")
 
         print('->>>>>>> test Neutron List Subnets By Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         self.assertEqual(listsubnetsresponse.status_code, 200)
         self.assertNotIn('None', listSubnetName)
         print(" ")
 
         print('->>>>>>> test Neutron List Subnets By Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:19696/v2.0/subnets?name="+listSubnetName
+        url = "http://0.0.0.0:19696/v2.0/subnets?name=" + listSubnetName
         listsubnetByNameresponse = requests.get(url, headers=headers)
         self.assertEqual(listsubnetByNameresponse.status_code, 200)
         listsubnetByNameresponse = requests.get(url, headers=headers)
         self.assertEqual(listsubnetByNameresponse.status_code, 200)
-        self.assertNotIn('None', json.loads(listsubnetByNameresponse.content)["subnets"][0]["name"])
+        self.assertNotIn('None', json.loads(
+            listsubnetByNameresponse.content)["subnets"][0]["name"])
         print(" ")
 
         print('->>>>>>> test Neutron List Subnets By Id ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron List Subnets By Id ->>>>>>>>>>>>>>>')
@@ -403,25 +453,28 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:19696/v2.0/subnets?id=" + listSubnetId
         listsubnetsbyidresponse = requests.get(url, headers=headers)
         self.assertEqual(listsubnetsbyidresponse.status_code, 200)
         url = "http://0.0.0.0:19696/v2.0/subnets?id=" + listSubnetId
         listsubnetsbyidresponse = requests.get(url, headers=headers)
         self.assertEqual(listsubnetsbyidresponse.status_code, 200)
-        self.assertNotIn("None", json.loads(listsubnetsbyidresponse.content)["subnets"][0]["name"])
+        self.assertNotIn("None", json.loads(
+            listsubnetsbyidresponse.content)["subnets"][0]["name"])
         print(" ")
 
         print('->>>>>>> test Neutron List Subnets By Multiple Id ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron List Subnets By Multiple Id ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:19696/v2.0/subnets?id=" + listSubnetId +"&id="+listSubnetId2
+        url = "http://0.0.0.0:19696/v2.0/subnets?id=" + \
+            listSubnetId + "&id=" + listSubnetId2
         listsubnetsbymultipleidsresponse = requests.get(url, headers=headers)
         self.assertEqual(listsubnetsbymultipleidsresponse.status_code, 200)
         listsubnetsbymultipleidsresponse = requests.get(url, headers=headers)
         self.assertEqual(listsubnetsbymultipleidsresponse.status_code, 200)
-        self.assertNotIn("None", json.loads(listsubnetsbymultipleidsresponse.content)["subnets"][0]["name"])
+        self.assertNotIn("None", json.loads(
+            listsubnetsbymultipleidsresponse.content)["subnets"][0]["name"])
         print(" ")
 
         print(" ")
 
-
-
         print('->>>>>>> test Neutron Show Subnet->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print('->>>>>>> test Neutron Show Subnet->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(listsubnetsresponse.content)["subnets"][0]["id"])
+        url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (
+            json.loads(listsubnetsresponse.content)["subnets"][0]["id"])
         showsubnetsresponse = requests.get(url, headers=headers)
         self.assertEqual(showsubnetsresponse.status_code, 200)
         showsubnetsresponse = requests.get(url, headers=headers)
         self.assertEqual(showsubnetsresponse.status_code, 200)
-        self.assertNotIn("None", json.loads(showsubnetsresponse.content)["subnet"]["name"])
+        self.assertNotIn("None", json.loads(
+            showsubnetsresponse.content)["subnet"]["name"])
         print(" ")
 
         print('->>>>>>> test Neutron Show Non-Existing Subnet->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron Show Non-Existing Subnet->>>>>>>>>>>>>>>')
@@ -431,38 +484,46 @@ class testRestApi(ApiBaseOpenStack):
         self.assertEqual(showsubnetsresponse.status_code, 404)
         print(" ")
 
         self.assertEqual(showsubnetsresponse.status_code, 404)
         print(" ")
 
-
         print('->>>>>>> test Neutron Create Subnet ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/subnets"
         print('->>>>>>> test Neutron Create Subnet ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/subnets"
-        createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
-        createsubnetresponse = requests.post(url, data=createsubnetdata, headers=headers)
+        createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (
+            json.loads(createnetworkresponse.content)["network"]["id"])
+        createsubnetresponse = requests.post(
+            url, data=createsubnetdata, headers=headers)
         self.assertEqual(createsubnetresponse.status_code, 201)
         self.assertEqual(createsubnetresponse.status_code, 201)
-        self.assertEqual(json.loads(createsubnetresponse.content)["subnet"]["name"], "new_subnet")
+        self.assertEqual(json.loads(createsubnetresponse.content)[
+                         "subnet"]["name"], "new_subnet")
         print(" ")
 
         print('->>>>>>> test Neutron Create Second Subnet ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/subnets"
         print(" ")
 
         print('->>>>>>> test Neutron Create Second Subnet ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/subnets"
-        createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
-        createsubnetfailureresponse = requests.post(url, data=createsubnetdata, headers=headers)
+        createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (
+            json.loads(createnetworkresponse.content)["network"]["id"])
+        createsubnetfailureresponse = requests.post(
+            url, data=createsubnetdata, headers=headers)
         self.assertEqual(createsubnetfailureresponse.status_code, 409)
         print(" ")
 
         print('->>>>>>> test Neutron Update Subnet ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         self.assertEqual(createsubnetfailureresponse.status_code, 409)
         print(" ")
 
         print('->>>>>>> test Neutron Update Subnet ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(createsubnetresponse.content)["subnet"]["id"])
+        url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (
+            json.loads(createsubnetresponse.content)["subnet"]["id"])
         updatesubnetdata = '{"subnet": {"name": "new_subnet_new_name", "network_id":"some_id", "tenant_id":"new_tenant_id", "allocation_pools":"change_me", "gateway_ip":"192.168.1.120", "ip_version":4, "cidr":"10.0.0.1/24", "id":"some_new_id", "enable_dhcp":true} }'
         updatesubnetdata = '{"subnet": {"name": "new_subnet_new_name", "network_id":"some_id", "tenant_id":"new_tenant_id", "allocation_pools":"change_me", "gateway_ip":"192.168.1.120", "ip_version":4, "cidr":"10.0.0.1/24", "id":"some_new_id", "enable_dhcp":true} }'
-        updatesubnetresponse = requests.put(url, data=updatesubnetdata, headers=headers)
+        updatesubnetresponse = requests.put(
+            url, data=updatesubnetdata, headers=headers)
         self.assertEqual(updatesubnetresponse.status_code, 200)
         self.assertEqual(updatesubnetresponse.status_code, 200)
-        self.assertEqual(json.loads(updatesubnetresponse.content)["subnet"]["name"], "new_subnet_new_name")
+        self.assertEqual(json.loads(updatesubnetresponse.content)[
+                         "subnet"]["name"], "new_subnet_new_name")
         print(" ")
 
         print('->>>>>>> test Neutron Update Non-Existing Subnet ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/subnets/non-existing-subnet-12345"
         updatenonexistingsubnetdata = '{"subnet": {"name": "new_subnet_new_name"} }'
         print(" ")
 
         print('->>>>>>> test Neutron Update Non-Existing Subnet ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/subnets/non-existing-subnet-12345"
         updatenonexistingsubnetdata = '{"subnet": {"name": "new_subnet_new_name"} }'
-        updatenonexistingsubnetresponse = requests.put(url, data=updatenonexistingsubnetdata, headers=headers)
+        updatenonexistingsubnetresponse = requests.put(
+            url, data=updatenonexistingsubnetdata, headers=headers)
         self.assertEqual(updatenonexistingsubnetresponse.status_code, 404)
         print(" ")
 
         self.assertEqual(updatenonexistingsubnetresponse.status_code, 404)
         print(" ")
 
@@ -471,8 +532,10 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:19696/v2.0/ports"
         listportsesponse = requests.get(url, headers=headers)
         self.assertEqual(listportsesponse.status_code, 200)
         url = "http://0.0.0.0:19696/v2.0/ports"
         listportsesponse = requests.get(url, headers=headers)
         self.assertEqual(listportsesponse.status_code, 200)
-        self.assertEqual(json.loads(listportsesponse.content)["ports"][0]["status"], "ACTIVE")
-        listPortsName = json.loads(listportsesponse.content)["ports"][0]["name"]
+        self.assertEqual(json.loads(listportsesponse.content)
+                         ["ports"][0]["status"], "ACTIVE")
+        listPortsName = json.loads(listportsesponse.content)[
+            "ports"][0]["name"]
         listPortsId1 = json.loads(listportsesponse.content)["ports"][0]["id"]
         listPortsId2 = json.loads(listportsesponse.content)["ports"][1]["id"]
         print(" ")
         listPortsId1 = json.loads(listportsesponse.content)["ports"][0]["id"]
         listPortsId2 = json.loads(listportsesponse.content)["ports"][1]["id"]
         print(" ")
@@ -482,7 +545,8 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:19696/v2.0/ports?name=" + listPortsName
         listportsbynameesponse = requests.get(url, headers=headers)
         self.assertEqual(listportsbynameesponse.status_code, 200)
         url = "http://0.0.0.0:19696/v2.0/ports?name=" + listPortsName
         listportsbynameesponse = requests.get(url, headers=headers)
         self.assertEqual(listportsbynameesponse.status_code, 200)
-        self.assertEqual(json.loads(listportsbynameesponse.content)["ports"][0]["name"], listPortsName)
+        self.assertEqual(json.loads(listportsbynameesponse.content)[
+                         "ports"][0]["name"], listPortsName)
         print(" ")
 
         print('->>>>>>> test Neutron List Ports By Id ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron List Ports By Id ->>>>>>>>>>>>>>>')
@@ -490,15 +554,18 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:19696/v2.0/ports?id=" + listPortsId1
         listportsbyidesponse = requests.get(url, headers=headers)
         self.assertEqual(listportsbyidesponse.status_code, 200)
         url = "http://0.0.0.0:19696/v2.0/ports?id=" + listPortsId1
         listportsbyidesponse = requests.get(url, headers=headers)
         self.assertEqual(listportsbyidesponse.status_code, 200)
-        self.assertEqual(json.loads(listportsbyidesponse.content)["ports"][0]["id"], listPortsId1)
+        self.assertEqual(json.loads(listportsbyidesponse.content)[
+                         "ports"][0]["id"], listPortsId1)
         print(" ")
 
         print('->>>>>>> test Neutron List Ports By Multiple Ids ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron List Ports By Multiple Ids ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:19696/v2.0/ports?id=" + listPortsId1 +"&id="+listPortsId2
+        url = "http://0.0.0.0:19696/v2.0/ports?id=" + \
+            listPortsId1 + "&id=" + listPortsId2
         listportsbymultipleidsesponse = requests.get(url, headers=headers)
         self.assertEqual(listportsbymultipleidsesponse.status_code, 200)
         listportsbymultipleidsesponse = requests.get(url, headers=headers)
         self.assertEqual(listportsbymultipleidsesponse.status_code, 200)
-        self.assertEqual(json.loads(listportsbymultipleidsesponse.content)["ports"][0]["id"], listPortsId1)
+        self.assertEqual(json.loads(listportsbymultipleidsesponse.content)[
+                         "ports"][0]["id"], listPortsId1)
         print(" ")
 
         print('->>>>>>> test Neutron List Non-Existing Ports ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron List Non-Existing Ports ->>>>>>>>>>>>>>>')
@@ -510,10 +577,12 @@ class testRestApi(ApiBaseOpenStack):
 
         print('->>>>>>> test Neutron Show Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
 
         print('->>>>>>> test Neutron Show Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(listportsesponse.content)["ports"][0]["id"])
+        url = "http://0.0.0.0:19696/v2.0/ports/%s" % (
+            json.loads(listportsesponse.content)["ports"][0]["id"])
         showportresponse = requests.get(url, headers=headers)
         self.assertEqual(showportresponse.status_code, 200)
         showportresponse = requests.get(url, headers=headers)
         self.assertEqual(showportresponse.status_code, 200)
-        self.assertEqual(json.loads(showportresponse.content)["port"]["status"], "ACTIVE")
+        self.assertEqual(json.loads(showportresponse.content)
+                         ["port"]["status"], "ACTIVE")
         print(" ")
 
         print('->>>>>>> test Neutron Show Non-Existing Port ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Neutron Show Non-Existing Port ->>>>>>>>>>>>>>>')
@@ -527,63 +596,76 @@ class testRestApi(ApiBaseOpenStack):
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/ports"
         createnonexistingportdata = '{"port": {"name": "new_port", "network_id": "non-existing-id"} }'
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/ports"
         createnonexistingportdata = '{"port": {"name": "new_port", "network_id": "non-existing-id"} }'
-        createnonexistingnetworkportresponse = requests.post(url, data=createnonexistingportdata, headers=headers)
+        createnonexistingnetworkportresponse = requests.post(
+            url, data=createnonexistingportdata, headers=headers)
         self.assertEqual(createnonexistingnetworkportresponse.status_code, 404)
         print(" ")
 
         print('->>>>>>> test Neutron Create Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/ports"
         self.assertEqual(createnonexistingnetworkportresponse.status_code, 404)
         print(" ")
 
         print('->>>>>>> test Neutron Create Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/ports"
-        createportdata = '{"port": {"name": "new_port", "network_id": "%s", "admin_state_up":true, "device_id":"device_id123", "device_owner":"device_owner123", "fixed_ips":"change_me","id":"new_id1234", "mac_address":"12:34:56:78:90", "status":"change_me", "tenant_id":"tenant_id123"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
-        createportresponse = requests.post(url, data=createportdata, headers=headers)
+        createportdata = '{"port": {"name": "new_port", "network_id": "%s", "admin_state_up":true, "device_id":"device_id123", "device_owner":"device_owner123", "fixed_ips":"change_me","id":"new_id1234", "mac_address":"12:34:56:78:90", "status":"change_me", "tenant_id":"tenant_id123"} }' % (json.loads(createnetworkresponse.content)[
+                                                                                                                                                                                                                                                                                                    "network"]["id"])
+        createportresponse = requests.post(
+            url, data=createportdata, headers=headers)
         self.assertEqual(createportresponse.status_code, 201)
         self.assertEqual(createportresponse.status_code, 201)
-        print (createportresponse.content)
-        self.assertEqual(json.loads(createportresponse.content)["port"]["name"], "new_port")
+        print(createportresponse.content)
+        self.assertEqual(json.loads(createportresponse.content)[
+                         "port"]["name"], "new_port")
         print(" ")
 
         print('->>>>>>> test Neutron Create Port With Existing Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/ports"
         print(" ")
 
         print('->>>>>>> test Neutron Create Port With Existing Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/ports"
-        createportwithexistingnamedata = '{"port": {"name": "new_port", "network_id": "%s"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
-        createportwithexistingnameresponse = requests.post(url, data=createportwithexistingnamedata, headers=headers)
+        createportwithexistingnamedata = '{"port": {"name": "new_port", "network_id": "%s"} }' % (
+            json.loads(createnetworkresponse.content)["network"]["id"])
+        createportwithexistingnameresponse = requests.post(
+            url, data=createportwithexistingnamedata, headers=headers)
         self.assertEqual(createportwithexistingnameresponse.status_code, 500)
         print(" ")
 
         print('->>>>>>> test Neutron Create Port Without Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/ports"
         self.assertEqual(createportwithexistingnameresponse.status_code, 500)
         print(" ")
 
         print('->>>>>>> test Neutron Create Port Without Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/ports"
-        createportdatawithoutname = '{"port": {"network_id": "%s"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
-        createportwithoutnameresponse = requests.post(url, data=createportdatawithoutname, headers=headers)
+        createportdatawithoutname = '{"port": {"network_id": "%s"} }' % (
+            json.loads(createnetworkresponse.content)["network"]["id"])
+        createportwithoutnameresponse = requests.post(
+            url, data=createportdatawithoutname, headers=headers)
         self.assertEqual(createportwithoutnameresponse.status_code, 201)
         self.assertEqual(createportwithoutnameresponse.status_code, 201)
-        self.assertIn("port:cp", json.loads(createportwithoutnameresponse.content)["port"]["name"])
+        self.assertIn("port:cp", json.loads(
+            createportwithoutnameresponse.content)["port"]["name"])
         print(" ")
 
         print('->>>>>>> test Neutron Update Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print(json.loads(createportresponse.content)["port"]["name"])
         print(" ")
 
         print('->>>>>>> test Neutron Update Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print(json.loads(createportresponse.content)["port"]["name"])
-        url = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(createportresponse.content)["port"]["name"])
+        url = "http://0.0.0.0:19696/v2.0/ports/%s" % (
+            json.loads(createportresponse.content)["port"]["name"])
         updateportdata = '{"port": {"name": "new_port_new_name", "admin_state_up":true, "device_id":"device_id123", "device_owner":"device_owner123", "fixed_ips":"change_me","mac_address":"12:34:56:78:90", "status":"change_me", "tenant_id":"tenant_id123", "network_id":"network_id123"} }'
         updateportdata = '{"port": {"name": "new_port_new_name", "admin_state_up":true, "device_id":"device_id123", "device_owner":"device_owner123", "fixed_ips":"change_me","mac_address":"12:34:56:78:90", "status":"change_me", "tenant_id":"tenant_id123", "network_id":"network_id123"} }'
-        updateportresponse = requests.put(url, data=updateportdata, headers=headers)
+        updateportresponse = requests.put(
+            url, data=updateportdata, headers=headers)
         self.assertEqual(updateportresponse.status_code, 200)
         self.assertEqual(updateportresponse.status_code, 200)
-        self.assertEqual(json.loads(updateportresponse.content)["port"]["name"], "new_port_new_name")
+        self.assertEqual(json.loads(updateportresponse.content)[
+                         "port"]["name"], "new_port_new_name")
         print(" ")
 
         print('->>>>>>> test Neutron Update Non-Existing Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/ports/non-existing-port-ip"
         updatenonexistingportdata = '{"port": {"name": "new_port_new_name"} }'
         print(" ")
 
         print('->>>>>>> test Neutron Update Non-Existing Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/ports/non-existing-port-ip"
         updatenonexistingportdata = '{"port": {"name": "new_port_new_name"} }'
-        updatenonexistingportresponse = requests.put(url, data=updatenonexistingportdata, headers=headers)
+        updatenonexistingportresponse = requests.put(
+            url, data=updatenonexistingportdata, headers=headers)
         self.assertEqual(updatenonexistingportresponse.status_code, 404)
         print(" ")
 
         print('->>>>>>> test Neutron Delete Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         self.assertEqual(updatenonexistingportresponse.status_code, 404)
         print(" ")
 
         print('->>>>>>> test Neutron Delete Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        righturl = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(createportresponse.content)["port"]["id"])
+        righturl = "http://0.0.0.0:19696/v2.0/ports/%s" % (
+            json.loads(createportresponse.content)["port"]["id"])
         deleterightportresponse = requests.delete(righturl, headers=headers)
         self.assertEqual(deleterightportresponse.status_code, 204)
         print(" ")
 
         deleterightportresponse = requests.delete(righturl, headers=headers)
         self.assertEqual(deleterightportresponse.status_code, 204)
         print(" ")
 
-
         print('->>>>>>> test Neutron Delete Non-Existing Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         wrongurl = "http://0.0.0.0:19696/v2.0/ports/unknownid"
         print('->>>>>>> test Neutron Delete Non-Existing Port ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         wrongurl = "http://0.0.0.0:19696/v2.0/ports/unknownid"
@@ -594,7 +676,8 @@ class testRestApi(ApiBaseOpenStack):
         print('->>>>>>> test Neutron Delete Subnet ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         wrongurl = "http://0.0.0.0:19696/v2.0/subnets/unknownid"
         print('->>>>>>> test Neutron Delete Subnet ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         wrongurl = "http://0.0.0.0:19696/v2.0/subnets/unknownid"
-        righturl = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(updatesubnetresponse.content)["subnet"]["id"])
+        righturl = "http://0.0.0.0:19696/v2.0/subnets/%s" % (
+            json.loads(updatesubnetresponse.content)["subnet"]["id"])
         deletewrongsubnetresponse = requests.delete(wrongurl, headers=headers)
         deleterightsubnetresponse = requests.delete(righturl, headers=headers)
         self.assertEqual(deletewrongsubnetresponse.status_code, 404)
         deletewrongsubnetresponse = requests.delete(wrongurl, headers=headers)
         deleterightsubnetresponse = requests.delete(righturl, headers=headers)
         self.assertEqual(deletewrongsubnetresponse.status_code, 404)
@@ -603,7 +686,8 @@ class testRestApi(ApiBaseOpenStack):
 
         print('->>>>>>> test Neutron Delete Network ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
 
         print('->>>>>>> test Neutron Delete Network ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        righturl = "http://0.0.0.0:19696/v2.0/networks/%s" % (json.loads(createnetworkresponse.content)["network"]["id"])
+        righturl = "http://0.0.0.0:19696/v2.0/networks/%s" % (
+            json.loads(createnetworkresponse.content)["network"]["id"])
         deleterightnetworkresponse = requests.delete(righturl, headers=headers)
         self.assertEqual(deleterightnetworkresponse.status_code, 204)
         print(" ")
         deleterightnetworkresponse = requests.delete(righturl, headers=headers)
         self.assertEqual(deleterightnetworkresponse.status_code, 204)
         print(" ")
@@ -621,14 +705,16 @@ class testRestApi(ApiBaseOpenStack):
         print(" ")
 
         headers = {'Content-type': 'application/json'}
         print(" ")
 
         headers = {'Content-type': 'application/json'}
-        test_heatapi_keystone_get_token = open(os.path.join(os.path.dirname(__file__), "templates/test_heatapi_keystone_get_token.yml")).read()
+        test_heatapi_keystone_get_token = open(os.path.join(os.path.dirname(
+            __file__), "templates/test_heatapi_keystone_get_token.yml")).read()
 
         print('->>>>>>> test Keystone List Versions ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:15000/"
         listapiversionstackresponse = requests.get(url, headers=headers)
         self.assertEqual(listapiversionstackresponse.status_code, 200)
 
         print('->>>>>>> test Keystone List Versions ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:15000/"
         listapiversionstackresponse = requests.get(url, headers=headers)
         self.assertEqual(listapiversionstackresponse.status_code, 200)
-        self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"]["values"][0]["id"], "v2.0")
+        self.assertEqual(json.loads(listapiversionstackresponse.content)[
+                         "versions"]["values"][0]["id"], "v2.0")
         print(" ")
 
         print('->>>>>>> test Keystone Show ApiV2 ->>>>>>>>>>>>>>>')
         print(" ")
 
         print('->>>>>>> test Keystone Show ApiV2 ->>>>>>>>>>>>>>>')
@@ -636,15 +722,18 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:15000/v2.0"
         showapiversionstackresponse = requests.get(url, headers=headers)
         self.assertEqual(showapiversionstackresponse.status_code, 200)
         url = "http://0.0.0.0:15000/v2.0"
         showapiversionstackresponse = requests.get(url, headers=headers)
         self.assertEqual(showapiversionstackresponse.status_code, 200)
-        self.assertEqual(json.loads(showapiversionstackresponse.content)["version"]["id"], "v2.0")
+        self.assertEqual(json.loads(showapiversionstackresponse.content)[
+                         "version"]["id"], "v2.0")
         print(" ")
 
         print('->>>>>>> test Keystone Get Token ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:15000/v2.0/tokens"
         print(" ")
 
         print('->>>>>>> test Keystone Get Token ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:15000/v2.0/tokens"
-        gettokenstackresponse = requests.post(url, data=json.dumps(yaml.load(test_heatapi_keystone_get_token)), headers=headers)
+        gettokenstackresponse = requests.post(url, data=json.dumps(
+            yaml.load(test_heatapi_keystone_get_token)), headers=headers)
         self.assertEqual(gettokenstackresponse.status_code, 200)
         self.assertEqual(gettokenstackresponse.status_code, 200)
-        self.assertEqual(json.loads(gettokenstackresponse.content)["access"]["user"]["name"], "tenantName")
+        self.assertEqual(json.loads(gettokenstackresponse.content)[
+                         "access"]["user"]["name"], "tenantName")
         print(" ")
 
     def testHeatDummy(self):
         print(" ")
 
     def testHeatDummy(self):
@@ -653,78 +742,91 @@ class testRestApi(ApiBaseOpenStack):
         print(" ")
 
         headers = {'Content-type': 'application/json'}
         print(" ")
 
         headers = {'Content-type': 'application/json'}
-        test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "templates/test_heatapi_template_create_stack.yml")).read()
-        test_heatapi_template_update_stack = open(os.path.join(os.path.dirname(__file__), "templates/test_heatapi_template_update_stack.yml")).read()
+        test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(
+            __file__), "templates/test_heatapi_template_create_stack.yml")).read()
+        test_heatapi_template_update_stack = open(os.path.join(os.path.dirname(
+            __file__), "templates/test_heatapi_template_update_stack.yml")).read()
 
         print('->>>>>>> test Heat List API Versions Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/"
         listapiversionstackresponse = requests.get(url, headers=headers)
         self.assertEqual(listapiversionstackresponse.status_code, 200)
 
         print('->>>>>>> test Heat List API Versions Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/"
         listapiversionstackresponse = requests.get(url, headers=headers)
         self.assertEqual(listapiversionstackresponse.status_code, 200)
-        self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"][0]["id"], "v1.0")
+        self.assertEqual(json.loads(listapiversionstackresponse.content)[
+                         "versions"][0]["id"], "v1.0")
         print(" ")
 
         print('->>>>>>> test Create Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
         print(" ")
 
         print('->>>>>>> test Create Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
-        createstackresponse = requests.post(url, data=json.dumps(yaml.load(test_heatapi_template_create_stack)), headers=headers)
+        createstackresponse = requests.post(url, data=json.dumps(
+            yaml.load(test_heatapi_template_create_stack)), headers=headers)
         self.assertEqual(createstackresponse.status_code, 201)
         self.assertEqual(createstackresponse.status_code, 201)
-        self.assertNotEqual(json.loads(createstackresponse.content)["stack"]["id"], "")
+        self.assertNotEqual(json.loads(
+            createstackresponse.content)["stack"]["id"], "")
         print(" ")
 
         print('->>>>>>> test Create Stack With Existing Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
         print(" ")
 
         print('->>>>>>> test Create Stack With Existing Name ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
-        createstackwithexistingnameresponse = requests.post(url, data='{"stack_name" : "s1"}', headers=headers)
+        createstackwithexistingnameresponse = requests.post(
+            url, data='{"stack_name" : "s1"}', headers=headers)
         self.assertEqual(createstackwithexistingnameresponse.status_code, 409)
         print(" ")
 
         print('->>>>>>> test Create Stack With Unsupported Version ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
         self.assertEqual(createstackwithexistingnameresponse.status_code, 409)
         print(" ")
 
         print('->>>>>>> test Create Stack With Unsupported Version ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
-        createstackwitheunsupportedversionresponse = requests.post(url, data='{"stack_name" : "stackname123", "template" : {"heat_template_version": "2015-04-29"}}', headers=headers)
-        self.assertEqual(createstackwitheunsupportedversionresponse.status_code, 400)
+        createstackwitheunsupportedversionresponse = requests.post(
+            url, data='{"stack_name" : "stackname123", "template" : {"heat_template_version": "2015-04-29"}}', headers=headers)
+        self.assertEqual(
+            createstackwitheunsupportedversionresponse.status_code, 400)
         print(" ")
 
         print(" ")
 
-
         print('->>>>>>> test List Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
         liststackresponse = requests.get(url, headers=headers)
         self.assertEqual(liststackresponse.status_code, 200)
         print('->>>>>>> test List Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
         liststackresponse = requests.get(url, headers=headers)
         self.assertEqual(liststackresponse.status_code, 200)
-        self.assertEqual(json.loads(liststackresponse.content)["stacks"][0]["stack_status"], "CREATE_COMPLETE")
+        self.assertEqual(json.loads(liststackresponse.content)[
+                         "stacks"][0]["stack_status"], "CREATE_COMPLETE")
         print(" ")
 
         print(" ")
 
-
         print('->>>>>>> test Show Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print('->>>>>>> test Show Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/%s"% json.loads(createstackresponse.content)['stack']['id']
+        url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/%s" % json.loads(
+            createstackresponse.content)['stack']['id']
         liststackdetailsresponse = requests.get(url, headers=headers)
         self.assertEqual(liststackdetailsresponse.status_code, 200)
         liststackdetailsresponse = requests.get(url, headers=headers)
         self.assertEqual(liststackdetailsresponse.status_code, 200)
-        self.assertEqual(json.loads(liststackdetailsresponse.content)["stack"]["stack_status"], "CREATE_COMPLETE")
+        self.assertEqual(json.loads(liststackdetailsresponse.content)[
+                         "stack"]["stack_status"], "CREATE_COMPLETE")
         print(" ")
 
         print('->>>>>>> test Show Non-Exisitng Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/non_exisitng_id123"
         print(" ")
 
         print('->>>>>>> test Show Non-Exisitng Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/non_exisitng_id123"
-        listnonexistingstackdetailsresponse = requests.get(url, headers=headers)
+        listnonexistingstackdetailsresponse = requests.get(
+            url, headers=headers)
         self.assertEqual(listnonexistingstackdetailsresponse.status_code, 404)
         print(" ")
 
         print('->>>>>>> test Update Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         self.assertEqual(listnonexistingstackdetailsresponse.status_code, 404)
         print(" ")
 
         print('->>>>>>> test Update Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/%s"% json.loads(createstackresponse.content)['stack']['id']
+        url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/%s" % json.loads(
+            createstackresponse.content)['stack']['id']
         updatestackresponse = requests.put(url, data=json.dumps(yaml.load(test_heatapi_template_update_stack)),
         updatestackresponse = requests.put(url, data=json.dumps(yaml.load(test_heatapi_template_update_stack)),
-                                            headers=headers)
+                                           headers=headers)
         self.assertEqual(updatestackresponse.status_code, 202)
         liststackdetailsresponse = requests.get(url, headers=headers)
         self.assertEqual(updatestackresponse.status_code, 202)
         liststackdetailsresponse = requests.get(url, headers=headers)
-        self.assertEqual(json.loads(liststackdetailsresponse.content)["stack"]["stack_status"], "UPDATE_COMPLETE")
+        self.assertEqual(json.loads(liststackdetailsresponse.content)[
+                         "stack"]["stack_status"], "UPDATE_COMPLETE")
         print(" ")
 
         print('->>>>>>> test Update Non-Existing Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/non_existing_id_1234"
         print(" ")
 
         print('->>>>>>> test Update Non-Existing Stack ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/non_existing_id_1234"
-        updatenonexistingstackresponse = requests.put(url, data={"non": "sense"}, headers=headers)
+        updatenonexistingstackresponse = requests.put(
+            url, data={"non": "sense"}, headers=headers)
         self.assertEqual(updatenonexistingstackresponse.status_code, 404)
         print(" ")
 
         self.assertEqual(updatenonexistingstackresponse.status_code, 404)
         print(" ")
 
@@ -753,19 +855,24 @@ class testRestApi(ApiBaseOpenStack):
         print('->>>>>>> Create ports p1 - p4 ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         # Get network id
         print('->>>>>>> Create ports p1 - p4 ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         # Get network id
-        network_resp = requests.get("http://0.0.0.0:19696/v2.0/networks?name=default", headers=headers)
+        network_resp = requests.get(
+            "http://0.0.0.0:19696/v2.0/networks?name=default", headers=headers)
         self.assertEqual(network_resp.status_code, 200)
         network_id = json.loads(network_resp.content)["networks"][0]["id"]
 
         url = "http://0.0.0.0:19696/v2.0/ports"
         port_request = '{"port": {"name": "%s", "network_id": "%s"}}'
         self.assertEqual(network_resp.status_code, 200)
         network_id = json.loads(network_resp.content)["networks"][0]["id"]
 
         url = "http://0.0.0.0:19696/v2.0/ports"
         port_request = '{"port": {"name": "%s", "network_id": "%s"}}'
-        p1_resp = requests.post(url, data=port_request % ("p1", network_id), headers=headers)
+        p1_resp = requests.post(url, data=port_request %
+                                ("p1", network_id), headers=headers)
         self.assertEqual(p1_resp.status_code, 201)
         self.assertEqual(p1_resp.status_code, 201)
-        p2_resp = requests.post(url, data=port_request % ("p2", network_id), headers=headers)
+        p2_resp = requests.post(url, data=port_request %
+                                ("p2", network_id), headers=headers)
         self.assertEqual(p2_resp.status_code, 201)
         self.assertEqual(p2_resp.status_code, 201)
-        p3_resp = requests.post(url, data=port_request % ("p3", network_id), headers=headers)
+        p3_resp = requests.post(url, data=port_request %
+                                ("p3", network_id), headers=headers)
         self.assertEqual(p3_resp.status_code, 201)
         self.assertEqual(p3_resp.status_code, 201)
-        p4_resp = requests.post(url, data=port_request % ("p4", network_id), headers=headers)
+        p4_resp = requests.post(url, data=port_request %
+                                ("p4", network_id), headers=headers)
         self.assertEqual(p4_resp.status_code, 201)
 
         p1_id = json.loads(p1_resp.content)["port"]["id"]
         self.assertEqual(p4_resp.status_code, 201)
 
         p1_id = json.loads(p1_resp.content)["port"]["id"]
@@ -776,11 +883,14 @@ class testRestApi(ApiBaseOpenStack):
         print('->>>>>>> test Neutron SFC Port Pair Create ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs"
         print('->>>>>>> test Neutron SFC Port Pair Create ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs"
-        pp1_resp = requests.post(url, data='{"port_pair": {"name": "pp1", "ingress": "%s", "egress": "%s"}}' % (p1_id, p2_id), headers=headers)
+        pp1_resp = requests.post(url, data='{"port_pair": {"name": "pp1", "ingress": "%s", "egress": "%s"}}' % (
+            p1_id, p2_id), headers=headers)
         self.assertEqual(pp1_resp.status_code, 201)
         self.assertEqual(pp1_resp.status_code, 201)
-        pp2_resp = requests.post(url, data='{"port_pair": {"name": "pp2", "ingress": "%s", "egress": "%s"}}' % (p3_id, p4_id), headers=headers)
+        pp2_resp = requests.post(url, data='{"port_pair": {"name": "pp2", "ingress": "%s", "egress": "%s"}}' % (
+            p3_id, p4_id), headers=headers)
         self.assertEqual(pp2_resp.status_code, 201)
         self.assertEqual(pp2_resp.status_code, 201)
-        pp3_resp = requests.post(url, data='{"port_pair": {"name": "pp3", "ingress": "%s", "egress": "%s"}}' % (p3_id, p4_id), headers=headers)
+        pp3_resp = requests.post(url, data='{"port_pair": {"name": "pp3", "ingress": "%s", "egress": "%s"}}' % (
+            p3_id, p4_id), headers=headers)
         self.assertEqual(pp3_resp.status_code, 201)
 
         pp1_id = json.loads(pp1_resp.content)["port_pair"]["id"]
         self.assertEqual(pp3_resp.status_code, 201)
 
         pp1_id = json.loads(pp1_resp.content)["port_pair"]["id"]
@@ -790,9 +900,11 @@ class testRestApi(ApiBaseOpenStack):
         print('->>>>>>> test Neutron SFC Port Pair Update ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s" % pp3_id
         print('->>>>>>> test Neutron SFC Port Pair Update ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s" % pp3_id
-        pp3_update_resp = requests.put(url, data='{"port_pair": {"description": "port_pair_update"}}', headers=headers)
+        pp3_update_resp = requests.put(
+            url, data='{"port_pair": {"description": "port_pair_update"}}', headers=headers)
         self.assertEqual(pp3_update_resp.status_code, 200)
         self.assertEqual(pp3_update_resp.status_code, 200)
-        self.assertEqual(json.loads(pp3_update_resp.content)["port_pair"]["description"], "port_pair_update")
+        self.assertEqual(json.loads(pp3_update_resp.content)[
+                         "port_pair"]["description"], "port_pair_update")
 
         print('->>>>>>> test Neutron SFC Port Pair Delete ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
 
         print('->>>>>>> test Neutron SFC Port Pair Delete ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
@@ -805,24 +917,29 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs"
         pp_list_resp = requests.get(url, headers=headers)
         self.assertEqual(pp_list_resp.status_code, 200)
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs"
         pp_list_resp = requests.get(url, headers=headers)
         self.assertEqual(pp_list_resp.status_code, 200)
-        self.assertEqual(len(json.loads(pp_list_resp.content)["port_pairs"]), 2)  # only pp1 and pp2 should be left
+        # only pp1 and pp2 should be left
+        self.assertEqual(
+            len(json.loads(pp_list_resp.content)["port_pairs"]), 2)
 
         print('->>>>>>> test Neutron SFC Port Pair Show ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s" % pp2_id
         pp2_show_resp = requests.get(url, headers=headers)
         self.assertEqual(pp2_show_resp.status_code, 200)
 
         print('->>>>>>> test Neutron SFC Port Pair Show ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s" % pp2_id
         pp2_show_resp = requests.get(url, headers=headers)
         self.assertEqual(pp2_show_resp.status_code, 200)
-        self.assertEqual(json.loads(pp2_show_resp.content)["port_pair"]["name"], "pp2")
-
+        self.assertEqual(json.loads(pp2_show_resp.content)
+                         ["port_pair"]["name"], "pp2")
 
         print('->>>>>>> test Neutron SFC Port Pair Group Create ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups"
 
         print('->>>>>>> test Neutron SFC Port Pair Group Create ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups"
-        ppg1_resp = requests.post(url, data='{"port_pair_group": {"name": "ppg1", "port_pairs": ["%s"]}}' % (pp1_id), headers=headers)
+        ppg1_resp = requests.post(
+            url, data='{"port_pair_group": {"name": "ppg1", "port_pairs": ["%s"]}}' % (pp1_id), headers=headers)
         self.assertEqual(ppg1_resp.status_code, 201)
         self.assertEqual(ppg1_resp.status_code, 201)
-        ppg2_resp = requests.post(url, data='{"port_pair_group": {"name": "ppg2", "port_pairs": ["%s"]}}' % (pp2_id), headers=headers)
+        ppg2_resp = requests.post(
+            url, data='{"port_pair_group": {"name": "ppg2", "port_pairs": ["%s"]}}' % (pp2_id), headers=headers)
         self.assertEqual(ppg2_resp.status_code, 201)
         self.assertEqual(ppg2_resp.status_code, 201)
-        ppg3_resp = requests.post(url, data='{"port_pair_group": {"name": "ppg3", "port_pairs": ["%s"]}}' % (pp2_id), headers=headers)
+        ppg3_resp = requests.post(
+            url, data='{"port_pair_group": {"name": "ppg3", "port_pairs": ["%s"]}}' % (pp2_id), headers=headers)
         self.assertEqual(ppg3_resp.status_code, 201)
 
         ppg1_id = json.loads(ppg1_resp.content)["port_pair_group"]["id"]
         self.assertEqual(ppg3_resp.status_code, 201)
 
         ppg1_id = json.loads(ppg1_resp.content)["port_pair_group"]["id"]
@@ -832,9 +949,11 @@ class testRestApi(ApiBaseOpenStack):
         print('->>>>>>> test Neutron SFC Port Pair Group Update ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s" % ppg3_id
         print('->>>>>>> test Neutron SFC Port Pair Group Update ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s" % ppg3_id
-        ppg3_update_resp = requests.put(url, data='{"port_pair_group": {"description": "port_pair_group_update"}}', headers=headers)
+        ppg3_update_resp = requests.put(
+            url, data='{"port_pair_group": {"description": "port_pair_group_update"}}', headers=headers)
         self.assertEqual(ppg3_update_resp.status_code, 200)
         self.assertEqual(ppg3_update_resp.status_code, 200)
-        self.assertEqual(json.loads(ppg3_update_resp.content)["port_pair_group"]["description"], "port_pair_group_update")
+        self.assertEqual(json.loads(ppg3_update_resp.content)[
+                         "port_pair_group"]["description"], "port_pair_group_update")
 
         print('->>>>>>> test Neutron SFC Port Pair Group Delete ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
 
         print('->>>>>>> test Neutron SFC Port Pair Group Delete ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
@@ -847,21 +966,26 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups"
         ppg_list_resp = requests.get(url, headers=headers)
         self.assertEqual(ppg_list_resp.status_code, 200)
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups"
         ppg_list_resp = requests.get(url, headers=headers)
         self.assertEqual(ppg_list_resp.status_code, 200)
-        self.assertEqual(len(json.loads(ppg_list_resp.content)["port_pair_groups"]), 2)  # only ppg1 and ppg2 should be left
+        # only ppg1 and ppg2 should be left
+        self.assertEqual(
+            len(json.loads(ppg_list_resp.content)["port_pair_groups"]), 2)
 
         print('->>>>>>> test Neutron SFC Port Pair Group Show ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s" % ppg2_id
         ppg2_show_resp = requests.get(url, headers=headers)
         self.assertEqual(ppg2_show_resp.status_code, 200)
 
         print('->>>>>>> test Neutron SFC Port Pair Group Show ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s" % ppg2_id
         ppg2_show_resp = requests.get(url, headers=headers)
         self.assertEqual(ppg2_show_resp.status_code, 200)
-        self.assertEqual(json.loads(ppg2_show_resp.content)["port_pair_group"]["name"], "ppg2")
+        self.assertEqual(json.loads(ppg2_show_resp.content)[
+                         "port_pair_group"]["name"], "ppg2")
 
         print('->>>>>>> test Neutron SFC Flow Classifier Create ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers"
 
         print('->>>>>>> test Neutron SFC Flow Classifier Create ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers"
-        fc1_resp = requests.post(url, data='{"flow_classifier": {"name": "fc1", "source_port_range_min": 22, "source_port_range_max": 4000}}', headers=headers)
+        fc1_resp = requests.post(
+            url, data='{"flow_classifier": {"name": "fc1", "source_port_range_min": 22, "source_port_range_max": 4000}}', headers=headers)
         self.assertEqual(fc1_resp.status_code, 201)
         self.assertEqual(fc1_resp.status_code, 201)
-        fc2_resp = requests.post(url, data='{"flow_classifier": {"name": "fc2", "source_port_range_min": 22, "source_port_range_max": 4000}}', headers=headers)
+        fc2_resp = requests.post(
+            url, data='{"flow_classifier": {"name": "fc2", "source_port_range_min": 22, "source_port_range_max": 4000}}', headers=headers)
         self.assertEqual(fc2_resp.status_code, 201)
 
         fc1_id = json.loads(fc1_resp.content)["flow_classifier"]["id"]
         self.assertEqual(fc2_resp.status_code, 201)
 
         fc1_id = json.loads(fc1_resp.content)["flow_classifier"]["id"]
@@ -870,9 +994,11 @@ class testRestApi(ApiBaseOpenStack):
         print('->>>>>>> test Neutron SFC Flow Classifier Update ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s" % fc2_id
         print('->>>>>>> test Neutron SFC Flow Classifier Update ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s" % fc2_id
-        fc2_update_resp = requests.put(url, data='{"flow_classifier": {"description": "flow_classifier_update"}}', headers=headers)
+        fc2_update_resp = requests.put(
+            url, data='{"flow_classifier": {"description": "flow_classifier_update"}}', headers=headers)
         self.assertEqual(fc2_update_resp.status_code, 200)
         self.assertEqual(fc2_update_resp.status_code, 200)
-        self.assertEqual(json.loads(fc2_update_resp.content)["flow_classifier"]["description"], "flow_classifier_update")
+        self.assertEqual(json.loads(fc2_update_resp.content)[
+                         "flow_classifier"]["description"], "flow_classifier_update")
 
         print('->>>>>>> test Neutron SFC Flow Classifier Delete ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
 
         print('->>>>>>> test Neutron SFC Flow Classifier Delete ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
@@ -885,22 +1011,25 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers"
         fc_list_resp = requests.get(url, headers=headers)
         self.assertEqual(fc_list_resp.status_code, 200)
         url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers"
         fc_list_resp = requests.get(url, headers=headers)
         self.assertEqual(fc_list_resp.status_code, 200)
-        self.assertEqual(len(json.loads(fc_list_resp.content)["flow_classifiers"]), 1)  # only fc1
+        self.assertEqual(len(json.loads(fc_list_resp.content)
+                             ["flow_classifiers"]), 1)  # only fc1
 
         print('->>>>>>> test Neutron SFC Flow Classifier Show ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s" % fc1_id
         fc1_show_resp = requests.get(url, headers=headers)
         self.assertEqual(fc1_show_resp.status_code, 200)
 
         print('->>>>>>> test Neutron SFC Flow Classifier Show ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s" % fc1_id
         fc1_show_resp = requests.get(url, headers=headers)
         self.assertEqual(fc1_show_resp.status_code, 200)
-        self.assertEqual(json.loads(fc1_show_resp.content)["flow_classifier"]["name"], "fc1")
-
+        self.assertEqual(json.loads(fc1_show_resp.content)[
+                         "flow_classifier"]["name"], "fc1")
 
         print('->>>>>>> test Neutron SFC Port Chain Create ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_chains"
 
         print('->>>>>>> test Neutron SFC Port Chain Create ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_chains"
-        pc1_resp = requests.post(url, data='{"port_chain": {"name": "pc1", "port_pair_groups": ["%s"], "flow_classifiers": ["%s"]}}' % (ppg1_id, fc1_id), headers=headers)
+        pc1_resp = requests.post(url, data='{"port_chain": {"name": "pc1", "port_pair_groups": ["%s"], "flow_classifiers": ["%s"]}}' % (
+            ppg1_id, fc1_id), headers=headers)
         self.assertEqual(pc1_resp.status_code, 201)
         self.assertEqual(pc1_resp.status_code, 201)
-        pc2_resp = requests.post(url, data='{"port_chain": {"name": "pc2", "port_pair_groups": ["%s"], "flow_classifiers": ["%s"]}}' % (ppg1_id, fc1_id), headers=headers)
+        pc2_resp = requests.post(url, data='{"port_chain": {"name": "pc2", "port_pair_groups": ["%s"], "flow_classifiers": ["%s"]}}' % (
+            ppg1_id, fc1_id), headers=headers)
         self.assertEqual(pc2_resp.status_code, 201)
 
         pc1_id = json.loads(pc1_resp.content)["port_chain"]["id"]
         self.assertEqual(pc2_resp.status_code, 201)
 
         pc1_id = json.loads(pc1_resp.content)["port_chain"]["id"]
@@ -909,9 +1038,11 @@ class testRestApi(ApiBaseOpenStack):
         print('->>>>>>> test Neutron SFC Port Chain Update ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_chains/%s" % pc2_id
         print('->>>>>>> test Neutron SFC Port Chain Update ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_chains/%s" % pc2_id
-        pc2_update_resp = requests.put(url, data='{"port_chain": {"description": "port_chain_update"}}', headers=headers)
+        pc2_update_resp = requests.put(
+            url, data='{"port_chain": {"description": "port_chain_update"}}', headers=headers)
         self.assertEqual(pc2_update_resp.status_code, 200)
         self.assertEqual(pc2_update_resp.status_code, 200)
-        self.assertEqual(json.loads(pc2_update_resp.content)["port_chain"]["description"], "port_chain_update")
+        self.assertEqual(json.loads(pc2_update_resp.content)[
+                         "port_chain"]["description"], "port_chain_update")
 
         print('->>>>>>> test Neutron SFC Port Chain Delete ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
 
         print('->>>>>>> test Neutron SFC Port Chain Delete ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
@@ -924,14 +1055,17 @@ class testRestApi(ApiBaseOpenStack):
         url = "http://0.0.0.0:19696/v2.0/sfc/port_chains"
         pc_list_resp = requests.get(url, headers=headers)
         self.assertEqual(pc_list_resp.status_code, 200)
         url = "http://0.0.0.0:19696/v2.0/sfc/port_chains"
         pc_list_resp = requests.get(url, headers=headers)
         self.assertEqual(pc_list_resp.status_code, 200)
-        self.assertEqual(len(json.loads(pc_list_resp.content)["port_chains"]), 1)  # only pc1
+        self.assertEqual(len(json.loads(pc_list_resp.content)
+                             ["port_chains"]), 1)  # only pc1
 
         print('->>>>>>> test Neutron SFC Port Chain Show ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_chains/%s" % pc1_id
         pc1_show_resp = requests.get(url, headers=headers)
         self.assertEqual(pc1_show_resp.status_code, 200)
 
         print('->>>>>>> test Neutron SFC Port Chain Show ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         url = "http://0.0.0.0:19696/v2.0/sfc/port_chains/%s" % pc1_id
         pc1_show_resp = requests.get(url, headers=headers)
         self.assertEqual(pc1_show_resp.status_code, 200)
-        self.assertEqual(json.loads(pc1_show_resp.content)["port_chain"]["name"], "pc1")
+        self.assertEqual(json.loads(pc1_show_resp.content)
+                         ["port_chain"]["name"], "pc1")
+
 
 if __name__ == '__main__':
     unittest.main()
 
 if __name__ == '__main__':
     unittest.main()
index 4a21fee..c0cf0c4 100755 (executable)
@@ -1,31 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import time
 import os
 import unittest
 import time
 import os
 import unittest
@@ -34,7 +31,6 @@ from emuvim.dcemulator.resourcemodel import BaseResourceModel, ResourceFlavor, N
 from emuvim.dcemulator.resourcemodel.upb.simple import UpbSimpleCloudDcRM, UpbOverprovisioningCloudDcRM, UpbDummyRM
 
 
 from emuvim.dcemulator.resourcemodel.upb.simple import UpbSimpleCloudDcRM, UpbOverprovisioningCloudDcRM, UpbDummyRM
 
 
-
 class testResourceModel(SimpleTestTopology):
     """
     Test the general resource model API and functionality.
 class testResourceModel(SimpleTestTopology):
     """
     Test the general resource model API and functionality.
@@ -98,15 +94,15 @@ def createDummyContainerObject(name, flavor):
         def __init__(self):
             # take defaukt values from son-emu
             self.resources = dict(
         def __init__(self):
             # take defaukt values from son-emu
             self.resources = dict(
-            cpu_period = -1,
-            cpu_quota = -1,
-            mem_limit = -1,
-            memswap_limit = -1
+                cpu_period=-1,
+                cpu_quota=-1,
+                mem_limit=-1,
+                memswap_limit=-1
             )
             )
-            #self.cpu_period = self.resources['cpu_period']
-            #self.cpu_quota = self.resources['cpu_quota']
-            #self.mem_limit = self.resources['mem_limit']
-            #self.memswap_limit = self.resources['memswap_limit']
+            # self.cpu_period = self.resources['cpu_period']
+            # self.cpu_quota = self.resources['cpu_quota']
+            # self.mem_limit = self.resources['mem_limit']
+            # self.memswap_limit = self.resources['memswap_limit']
 
         def updateCpuLimit(self, cpu_period, cpu_quota):
             self.resources['cpu_period'] = cpu_period
 
         def updateCpuLimit(self, cpu_period, cpu_quota):
             self.resources['cpu_period'] = cpu_period
@@ -121,8 +117,6 @@ def createDummyContainerObject(name, flavor):
     return d
 
 
     return d
 
 
-
-
 class testUpbSimpleCloudDcRM(SimpleTestTopology):
     """
     Test the UpbSimpleCloudDc resource model.
 class testUpbSimpleCloudDcRM(SimpleTestTopology):
     """
     Test the UpbSimpleCloudDc resource model.
@@ -139,35 +133,55 @@ class testUpbSimpleCloudDcRM(SimpleTestTopology):
         E_MEM = 512
         MAX_MU = 2048
         # create dummy resource model environment
         E_MEM = 512
         MAX_MU = 2048
         # create dummy resource model environment
-        reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
+        reg = ResourceModelRegistrar(
+            dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
         rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
         reg.register("test_dc", rm)
 
         c1 = createDummyContainerObject("c1", flavor="tiny")
         rm.allocate(c1)  # calculate allocation
         rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
         reg.register("test_dc", rm)
 
         c1 = createDummyContainerObject("c1", flavor="tiny")
         rm.allocate(c1)  # calculate allocation
-        self.assertEqual(float(c1.resources['cpu_quota']) / c1.resources['cpu_period'], E_CPU / MAX_CU * 0.5)   # validate compute result
-        self.assertEqual(float(c1.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 32)   # validate memory result
+        # validate compute result
+        self.assertEqual(float(
+            c1.resources['cpu_quota']) / c1.resources['cpu_period'], E_CPU / MAX_CU * 0.5)
+        # validate memory result
+        self.assertEqual(
+            float(c1.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 32)
 
         c2 = createDummyContainerObject("c2", flavor="small")
         rm.allocate(c2)  # calculate allocation
 
         c2 = createDummyContainerObject("c2", flavor="small")
         rm.allocate(c2)  # calculate allocation
-        self.assertEqual(float(c2.resources['cpu_quota']) / c2.resources['cpu_period'], E_CPU / MAX_CU * 1)   # validate compute result
-        self.assertEqual(float(c2.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)   # validate memory result
+        # validate compute result
+        self.assertEqual(float(
+            c2.resources['cpu_quota']) / c2.resources['cpu_period'], E_CPU / MAX_CU * 1)
+        # validate memory result
+        self.assertEqual(
+            float(c2.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128)
 
         c3 = createDummyContainerObject("c3", flavor="medium")
         rm.allocate(c3)  # calculate allocation
 
         c3 = createDummyContainerObject("c3", flavor="medium")
         rm.allocate(c3)  # calculate allocation
-        self.assertEqual(float(c3.resources['cpu_quota']) / c3.resources['cpu_period'], E_CPU / MAX_CU * 4)   # validate compute result
-        self.assertEqual(float(c3.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 256)   # validate memory result
+        # validate compute result
+        self.assertEqual(float(
+            c3.resources['cpu_quota']) / c3.resources['cpu_period'], E_CPU / MAX_CU * 4)
+        # validate memory result
+        self.assertEqual(
+            float(c3.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 256)
 
         c4 = createDummyContainerObject("c4", flavor="large")
         rm.allocate(c4)  # calculate allocation
 
         c4 = createDummyContainerObject("c4", flavor="large")
         rm.allocate(c4)  # calculate allocation
-        self.assertEqual(float(c4.resources['cpu_quota']) / c4.resources['cpu_period'], E_CPU / MAX_CU * 8)   # validate compute result
-        self.assertEqual(float(c4.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 512)   # validate memory result
+        # validate compute result
+        self.assertEqual(float(
+            c4.resources['cpu_quota']) / c4.resources['cpu_period'], E_CPU / MAX_CU * 8)
+        # validate memory result
+        self.assertEqual(
+            float(c4.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 512)
 
         c5 = createDummyContainerObject("c5", flavor="xlarge")
         rm.allocate(c5)  # calculate allocation
 
         c5 = createDummyContainerObject("c5", flavor="xlarge")
         rm.allocate(c5)  # calculate allocation
-        self.assertEqual(float(c5.resources['cpu_quota']) / c5.resources['cpu_period'], E_CPU / MAX_CU * 16)   # validate compute result
-        self.assertEqual(float(c5.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 1024)   # validate memory result
-
+        # validate compute result
+        self.assertEqual(float(
+            c5.resources['cpu_quota']) / c5.resources['cpu_period'], E_CPU / MAX_CU * 16)
+        # validate memory result
+        self.assertEqual(
+            float(c5.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 1024)
 
     def testAllocationCpuLimit(self):
         """
 
     def testAllocationCpuLimit(self):
         """
@@ -180,7 +194,8 @@ class testUpbSimpleCloudDcRM(SimpleTestTopology):
         E_MEM = 512
         MAX_MU = 4096
         # create dummy resource model environment
         E_MEM = 512
         MAX_MU = 4096
         # create dummy resource model environment
-        reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
+        reg = ResourceModelRegistrar(
+            dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
         rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
         reg.register("test_dc", rm)
 
         rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
         reg.register("test_dc", rm)
 
@@ -211,7 +226,8 @@ class testUpbSimpleCloudDcRM(SimpleTestTopology):
         E_MEM = 512
         MAX_MU = 2048
         # create dummy resource model environment
         E_MEM = 512
         MAX_MU = 2048
         # create dummy resource model environment
-        reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
+        reg = ResourceModelRegistrar(
+            dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
         rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
         reg.register("test_dc", rm)
 
         rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
         reg.register("test_dc", rm)
 
@@ -234,11 +250,9 @@ class testUpbSimpleCloudDcRM(SimpleTestTopology):
         Test the free procedure.
         :return:
         """
         Test the free procedure.
         :return:
         """
-        # config
-        E_CPU = 1.0
-        MAX_CU = 100
         # create dummy resource model environment
         # create dummy resource model environment
-        reg = ResourceModelRegistrar(dc_emulation_max_cpu=1.0, dc_emulation_max_mem=512)
+        reg = ResourceModelRegistrar(
+            dc_emulation_max_cpu=1.0, dc_emulation_max_mem=512)
         rm = UpbSimpleCloudDcRM(max_cu=100, max_mu=100)
         reg.register("test_dc", rm)
         c1 = createDummyContainerObject("c6", flavor="tiny")
         rm = UpbSimpleCloudDcRM(max_cu=100, max_mu=100)
         reg.register("test_dc", rm)
         c1 = createDummyContainerObject("c6", flavor="tiny")
@@ -279,8 +293,10 @@ class testUpbSimpleCloudDcRM(SimpleTestTopology):
         self.assertTrue(len(r._allocated_compute_instances) == 1)
 
         # check if there is a real limitation set for containers cgroup
         self.assertTrue(len(r._allocated_compute_instances) == 1)
 
         # check if there is a real limitation set for containers cgroup
-        # deactivated for now, seems not to work in docker-in-docker setup used in CI
-        self.assertEqual(float(tc1.resources['cpu_quota'])/tc1.resources['cpu_period'], 0.005)
+        # deactivated for now, seems not to work in docker-in-docker setup used
+        # in CI
+        self.assertEqual(
+            float(tc1.resources['cpu_quota']) / tc1.resources['cpu_period'], 0.005)
 
         # check if free was called during stopCompute
         self.dc[0].stopCompute("tc1")
 
         # check if free was called during stopCompute
         self.dc[0].stopCompute("tc1")
@@ -307,39 +323,50 @@ class testUpbOverprovisioningCloudDcRM(SimpleTestTopology):
         E_MEM = 512
         MAX_MU = 2048
         # create dummy resource model environment
         E_MEM = 512
         MAX_MU = 2048
         # create dummy resource model environment
-        reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
+        reg = ResourceModelRegistrar(
+            dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
         rm = UpbOverprovisioningCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
         reg.register("test_dc", rm)
 
         c1 = createDummyContainerObject("c1", flavor="small")
         rm.allocate(c1)  # calculate allocation
         rm = UpbOverprovisioningCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
         reg.register("test_dc", rm)
 
         c1 = createDummyContainerObject("c1", flavor="small")
         rm.allocate(c1)  # calculate allocation
-        self.assertAlmostEqual(float(c1.resources['cpu_quota']) / c1.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
-        self.assertAlmostEqual(float(c1.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
+        self.assertAlmostEqual(float(
+            c1.resources['cpu_quota']) / c1.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
+        self.assertAlmostEqual(
+            float(c1.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128)
         self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
 
         c2 = createDummyContainerObject("c2", flavor="small")
         rm.allocate(c2)  # calculate allocation
         self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
 
         c2 = createDummyContainerObject("c2", flavor="small")
         rm.allocate(c2)  # calculate allocation
-        self.assertAlmostEqual(float(c2.resources['cpu_quota']) / c2.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
-        self.assertAlmostEqual(float(c2.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
+        self.assertAlmostEqual(float(
+            c2.resources['cpu_quota']) / c2.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
+        self.assertAlmostEqual(
+            float(c2.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128)
         self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
 
         c3 = createDummyContainerObject("c3", flavor="small")
         rm.allocate(c3)  # calculate allocation
         self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
 
         c3 = createDummyContainerObject("c3", flavor="small")
         rm.allocate(c3)  # calculate allocation
-        self.assertAlmostEqual(float(c3.resources['cpu_quota']) / c3.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
-        self.assertAlmostEqual(float(c3.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
+        self.assertAlmostEqual(float(
+            c3.resources['cpu_quota']) / c3.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
+        self.assertAlmostEqual(
+            float(c3.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128)
         self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
 
         # from this container onwards, we should go to over provisioning mode:
         c4 = createDummyContainerObject("c4", flavor="small")
         rm.allocate(c4)  # calculate allocation
         self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
 
         # from this container onwards, we should go to over provisioning mode:
         c4 = createDummyContainerObject("c4", flavor="small")
         rm.allocate(c4)  # calculate allocation
-        self.assertAlmostEqual(float(c4.resources['cpu_quota']) / c4.resources['cpu_period'], E_CPU / MAX_CU * (float(3) / 4), places=5)
-        self.assertAlmostEqual(float(c4.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128, places=5)
+        self.assertAlmostEqual(float(
+            c4.resources['cpu_quota']) / c4.resources['cpu_period'], E_CPU / MAX_CU * (float(3) / 4), places=5)
+        self.assertAlmostEqual(float(
+            c4.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128, places=5)
         self.assertAlmostEqual(rm.cpu_op_factor, 0.75)
 
         c5 = createDummyContainerObject("c5", flavor="small")
         rm.allocate(c5)  # calculate allocation
         self.assertAlmostEqual(rm.cpu_op_factor, 0.75)
 
         c5 = createDummyContainerObject("c5", flavor="small")
         rm.allocate(c5)  # calculate allocation
-        self.assertAlmostEqual(float(c5.resources['cpu_quota']) / c5.resources['cpu_period'], E_CPU / MAX_CU * (float(3) / 5), places=5)
-        self.assertAlmostEqual(float(c5.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
+        self.assertAlmostEqual(float(
+            c5.resources['cpu_quota']) / c5.resources['cpu_period'], E_CPU / MAX_CU * (float(3) / 5), places=5)
+        self.assertAlmostEqual(
+            float(c5.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128)
         self.assertAlmostEqual(rm.cpu_op_factor, 0.6)
 
 
         self.assertAlmostEqual(rm.cpu_op_factor, 0.6)
 
 
@@ -359,7 +386,8 @@ class testUpbDummyRM(SimpleTestTopology):
         E_MEM = 512
         MAX_MU = 2048
         # create dummy resource model environment
         E_MEM = 512
         MAX_MU = 2048
         # create dummy resource model environment
-        reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
+        reg = ResourceModelRegistrar(
+            dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
         rm = UpbDummyRM(max_cu=MAX_CU, max_mu=MAX_MU)
         reg.register("test_dc", rm)
 
         rm = UpbDummyRM(max_cu=MAX_CU, max_mu=MAX_MU)
         reg.register("test_dc", rm)
 
@@ -370,4 +398,3 @@ class testUpbDummyRM(SimpleTestTopology):
         c2 = createDummyContainerObject("c2", flavor="small")
         rm.allocate(c2)  # calculate allocation
         self.assertEqual(len(rm._allocated_compute_instances), 2)
         c2 = createDummyContainerObject("c2", flavor="small")
         rm.allocate(c2)  # calculate allocation
         self.assertEqual(len(rm._allocated_compute_instances), 2)
-
index 4ea8008..ef157e5 100755 (executable)
@@ -1,36 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Test suite to automatically test emulator REST API endpoints.
-"""
-
-import time
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import unittest
 from emuvim.test.api_base import SimpleTestTopology
 import subprocess
 import unittest
 from emuvim.test.api_base import SimpleTestTopology
 import subprocess
@@ -60,13 +52,16 @@ class testRestApi(SimpleTestTopology):
 
         print('->>>>>>> vim-emu compute start -d datacenter0 -n vnf1 ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
 
         print('->>>>>>> vim-emu compute start -d datacenter0 -n vnf1 ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        subprocess.call("vim-emu compute start -d datacenter0 -n vnf1", shell=True)
+        subprocess.call(
+            "vim-emu compute start -d datacenter0 -n vnf1", shell=True)
         print('->>>>>>> vim-emu compute start -d datacenter0 -n vnf2 ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print('->>>>>>> vim-emu compute start -d datacenter0 -n vnf2 ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        subprocess.call("vim-emu compute start -d datacenter0 -n vnf2", shell=True)
+        subprocess.call(
+            "vim-emu compute start -d datacenter0 -n vnf2", shell=True)
         print('->>>>>>> vim-emu compute start -d datacenter0 -n vnf3 ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print('->>>>>>> vim-emu compute start -d datacenter0 -n vnf3 ->>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        subprocess.call("vim-emu compute start -d datacenter1 -n vnf3", shell=True)
+        subprocess.call(
+            "vim-emu compute start -d datacenter1 -n vnf3", shell=True)
         subprocess.call("vim-emu compute list", shell=True)
         print('->>>>>>> checking running nodes, compute list, and connectivity >>>>>>>>>>')
 
         subprocess.call("vim-emu compute list", shell=True)
         print('->>>>>>> checking running nodes, compute list, and connectivity >>>>>>>>>>')
 
@@ -78,33 +73,42 @@ class testRestApi(SimpleTestTopology):
         # check compute list result
         self.assertTrue(len(self.dc[0].listCompute()) == 2)
         self.assertTrue(len(self.dc[1].listCompute()) == 1)
         # check compute list result
         self.assertTrue(len(self.dc[0].listCompute()) == 2)
         self.assertTrue(len(self.dc[1].listCompute()) == 1)
-        self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
-        self.assertTrue(isinstance(self.dc[0].listCompute()[1], EmulatorCompute))
-        self.assertTrue(isinstance(self.dc[1].listCompute()[0], EmulatorCompute))
+        self.assertTrue(isinstance(
+            self.dc[0].listCompute()[0], EmulatorCompute))
+        self.assertTrue(isinstance(
+            self.dc[0].listCompute()[1], EmulatorCompute))
+        self.assertTrue(isinstance(
+            self.dc[1].listCompute()[0], EmulatorCompute))
         self.assertTrue(self.dc[0].listCompute()[1].name == "vnf1")
         self.assertTrue(self.dc[0].listCompute()[0].name == "vnf2")
         self.assertTrue(self.dc[1].listCompute()[0].name == "vnf3")
 
         # check connectivity by using ping
         self.assertTrue(self.dc[0].listCompute()[1].name == "vnf1")
         self.assertTrue(self.dc[0].listCompute()[0].name == "vnf2")
         self.assertTrue(self.dc[1].listCompute()[0].name == "vnf3")
 
         # check connectivity by using ping
-        self.assertTrue(self.net.ping([self.dc[0].listCompute()[1], self.dc[0].listCompute()[0]]) <= 0.0)
-        self.assertTrue(self.net.ping([self.dc[0].listCompute()[0], self.dc[1].listCompute()[0]]) <= 0.0)
-        self.assertTrue(self.net.ping([self.dc[1].listCompute()[0], self.dc[0].listCompute()[1]]) <= 0.0)
+        self.assertTrue(self.net.ping(
+            [self.dc[0].listCompute()[1], self.dc[0].listCompute()[0]]) <= 0.0)
+        self.assertTrue(self.net.ping(
+            [self.dc[0].listCompute()[0], self.dc[1].listCompute()[0]]) <= 0.0)
+        self.assertTrue(self.net.ping(
+            [self.dc[1].listCompute()[0], self.dc[0].listCompute()[1]]) <= 0.0)
 
         print('network add vnf1 vnf2->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
 
         print('network add vnf1 vnf2->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        output = subprocess.check_output("vim-emu network add -src vnf1 -dst vnf2 -b -c 10", shell=True)
+        output = subprocess.check_output(
+            "vim-emu network add -src vnf1 -dst vnf2 -b -c 10", shell=True)
         self.assertTrue("add-flow" in output)
         self.assertTrue("success" in output)
 
         print('network remove vnf1 vnf2->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         self.assertTrue("add-flow" in output)
         self.assertTrue("success" in output)
 
         print('network remove vnf1 vnf2->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        output = subprocess.check_output("vim-emu network remove -src vnf1 -dst vnf2 -b", shell=True)
+        output = subprocess.check_output(
+            "vim-emu network remove -src vnf1 -dst vnf2 -b", shell=True)
         self.assertTrue("del-flows" in output)
         self.assertTrue("success" in output)
 
         print('>>>>> checking --> vim-emu compute stop -d datacenter0 -n vnf2 ->>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
         self.assertTrue("del-flows" in output)
         self.assertTrue("success" in output)
 
         print('>>>>> checking --> vim-emu compute stop -d datacenter0 -n vnf2 ->>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        output = subprocess.check_output("vim-emu compute stop -d datacenter0 -n vnf2", shell=True)
+        output = subprocess.check_output(
+            "vim-emu compute stop -d datacenter0 -n vnf2", shell=True)
 
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 2)
 
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 2)
@@ -123,7 +127,8 @@ class testRestApi(SimpleTestTopology):
 
         print('>>>>> checking --> vim-emu compute status -d datacenter0 -n vnf1 ->>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
 
         print('>>>>> checking --> vim-emu compute status -d datacenter0 -n vnf1 ->>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        output = subprocess.check_output("vim-emu compute status -d datacenter0 -n vnf1", shell=True)
+        output = subprocess.check_output(
+            "vim-emu compute status -d datacenter0 -n vnf1", shell=True)
         output = ast.literal_eval(output)
 
         # check compute status result
         output = ast.literal_eval(output)
 
         # check compute status result
@@ -138,7 +143,8 @@ class testRestApi(SimpleTestTopology):
 
         print('->>>>> checking --> vim-emu datacenter status -d datacenter0 ->>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
 
         print('->>>>> checking --> vim-emu datacenter status -d datacenter0 ->>>>>>>>')
         print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
-        output = subprocess.check_output("vim-emu datacenter status -d datacenter0", shell=True)
+        output = subprocess.check_output(
+            "vim-emu datacenter status -d datacenter0", shell=True)
         # check datacenter status result
         self.assertTrue("datacenter0" in output)
         self.stopApi()
         # check datacenter status result
         self.assertTrue("datacenter0" in output)
         self.stopApi()
index 3450ce9..cb18c57 100755 (executable)
@@ -1,40 +1,35 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
 import time
 import requests
 import json
 import time
 import requests
 import json
-import os
 import unittest
 from emuvim.test.base import SimpleTestTopology
 from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
 from emuvim.api.sonata.dummygatekeeper import initialize_GK, parse_interface
 import unittest
 from emuvim.test.base import SimpleTestTopology
 from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
 from emuvim.api.sonata.dummygatekeeper import initialize_GK, parse_interface
-import mininet.clean
 from ipaddress import ip_network
 
 PACKAGE_PATH = "misc/sonata-demo-service.son"
 from ipaddress import ip_network
 
 PACKAGE_PATH = "misc/sonata-demo-service.son"
@@ -42,10 +37,11 @@ PACKAGE_PATH = "misc/sonata-demo-service.son"
 
 class testSonataDummyGatekeeper(SimpleTestTopology):
 
 
 class testSonataDummyGatekeeper(SimpleTestTopology):
 
-#    @unittest.skip("disabled")
+    #    @unittest.skip("disabled")
     def test_GK_Api_start_service(self):
         # create network
     def test_GK_Api_start_service(self):
         # create network
-        self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0, enable_learning=True)
+        self.createNet(nswitches=0, ndatacenter=2, nhosts=2,
+                       ndockers=0, enable_learning=True)
         # setup links
         self.net.addLink(self.dc[0], self.h[0])
         self.net.addLink(self.dc[0], self.dc[1])
         # setup links
         self.net.addLink(self.dc[0], self.h[0])
         self.net.addLink(self.dc[0], self.dc[1])
@@ -70,7 +66,8 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
 
         # instantiate service
         self.service_uuid = json.loads(r.text).get("service_uuid")
 
         # instantiate service
         self.service_uuid = json.loads(r.text).get("service_uuid")
-        r2 = requests.post("http://127.0.0.1:55000/instantiations", data=json.dumps({"service_uuid": self.service_uuid}))
+        r2 = requests.post("http://127.0.0.1:55000/instantiations",
+                           data=json.dumps({"service_uuid": self.service_uuid}))
         self.assertEqual(r2.status_code, 201)
 
         # give the emulator some time to instantiate everything
         self.assertEqual(r2.status_code, 201)
 
         # give the emulator some time to instantiate everything
@@ -80,7 +77,8 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
         r3 = requests.get("http://127.0.0.1:55000/packages")
         self.assertEqual(len(json.loads(r3.text).get("service_uuid_list")), 1)
         r4 = requests.get("http://127.0.0.1:55000/instantiations")
         r3 = requests.get("http://127.0.0.1:55000/packages")
         self.assertEqual(len(json.loads(r3.text).get("service_uuid_list")), 1)
         r4 = requests.get("http://127.0.0.1:55000/instantiations")
-        self.assertEqual(len(json.loads(r4.text).get("service_instantiations_list")), 1)
+        self.assertEqual(len(json.loads(r4.text).get(
+            "service_instantiations_list")), 1)
 
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 3)
 
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 3)
@@ -89,49 +87,59 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
         # check compute list result
         self.assertEqual(len(self.dc[0].listCompute()), 2)
         # check connectivity by using ping
         # check compute list result
         self.assertEqual(len(self.dc[0].listCompute()), 2)
         # check connectivity by using ping
-        ELAN_list=[]
+        ELAN_list = []
 
         # check E-Line connection, by checking the IP addresses
         for link in self.net.deployed_elines:
 
         # check E-Line connection, by checking the IP addresses
         for link in self.net.deployed_elines:
-            vnf_src, intf_src, vnf_sap_docker_name = parse_interface(link['connection_points_reference'][0])
+            vnf_src, intf_src, vnf_sap_docker_name = parse_interface(
+                link['connection_points_reference'][0])
             print vnf_src, intf_src
             src = self.net.getNodeByName(vnf_src)
             if not src:
                 continue
             network_list = src.getNetworkStatus()
             print vnf_src, intf_src
             src = self.net.getNodeByName(vnf_src)
             if not src:
                 continue
             network_list = src.getNetworkStatus()
-            src_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == intf_src][0]
-            src_mask = [intf['netmask'] for intf in network_list if intf['intf_name'] == intf_src][0]
+            src_ip = [intf['ip']
+                      for intf in network_list if intf['intf_name'] == intf_src][0]
+            src_mask = [intf['netmask']
+                        for intf in network_list if intf['intf_name'] == intf_src][0]
 
 
-            vnf_dst,  intf_dst, vnf_sap_docker_name = parse_interface(link['connection_points_reference'][1])
+            vnf_dst, intf_dst, vnf_sap_docker_name = parse_interface(
+                link['connection_points_reference'][1])
             dst = self.net.getNodeByName(vnf_dst)
             if not dst:
                 continue
             network_list = dst.getNetworkStatus()
             dst = self.net.getNodeByName(vnf_dst)
             if not dst:
                 continue
             network_list = dst.getNetworkStatus()
-            dst_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == intf_dst][0]
-            dst_mask = [intf['netmask'] for intf in network_list if intf['intf_name'] == intf_dst][0]
+            dst_ip = [intf['ip']
+                      for intf in network_list if intf['intf_name'] == intf_dst][0]
+            dst_mask = [intf['netmask']
+                        for intf in network_list if intf['intf_name'] == intf_dst][0]
 
 
-            print "src = {0}:{1} ip={2} ".format(vnf_src, intf_src, src_ip, src_mask)
-            print "dst = {0}:{1} ip={2} ".format(vnf_dst, intf_dst, dst_ip, dst_mask)
+            print "src = {0}:{1} ip={2} ".format(
+                vnf_src, intf_src, src_ip, src_mask)
+            print "dst = {0}:{1} ip={2} ".format(
+                vnf_dst, intf_dst, dst_ip, dst_mask)
 
             # check if the E-Line IP's are in the same subnet
             ret = ip_network(u'{0}'.format(src_ip, src_mask), strict=False)\
 
             # check if the E-Line IP's are in the same subnet
             ret = ip_network(u'{0}'.format(src_ip, src_mask), strict=False)\
-                .compare_networks(ip_network(u'{0}'.format(dst_ip, dst_mask),strict=False))
+                .compare_networks(ip_network(u'{0}'.format(dst_ip, dst_mask), strict=False))
             self.assertTrue(ret == 0)
 
             self.assertTrue(ret == 0)
 
-
         for vnf in self.dc[0].listCompute():
             # check E LAN connection
             network_list = vnf.getNetworkStatus()
         for vnf in self.dc[0].listCompute():
             # check E LAN connection
             network_list = vnf.getNetworkStatus()
-            mgmt_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == 'mgmt']
+            mgmt_ip = [intf['ip']
+                       for intf in network_list if intf['intf_name'] == 'mgmt']
             self.assertTrue(len(mgmt_ip) > 0)
             ip_address = mgmt_ip[0]
             ELAN_list.append(ip_address)
             print ip_address
 
             self.assertTrue(len(mgmt_ip) > 0)
             ip_address = mgmt_ip[0]
             ELAN_list.append(ip_address)
             print ip_address
 
-        # check ELAN connection by ping over the mgmt network (needs to be configured as ELAN in the test service)
+        # check ELAN connection by ping over the mgmt network (needs to be
+        # configured as ELAN in the test service)
         for vnf in self.dc[0].listCompute():
             network_list = vnf.getNetworkStatus()
         for vnf in self.dc[0].listCompute():
             network_list = vnf.getNetworkStatus()
-            mgmt_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == 'mgmt']
+            mgmt_ip = [intf['ip']
+                       for intf in network_list if intf['intf_name'] == 'mgmt']
             self.assertTrue(len(mgmt_ip) > 0)
             ip_address = mgmt_ip[0]
             print ELAN_list
             self.assertTrue(len(mgmt_ip) > 0)
             ip_address = mgmt_ip[0]
             print ELAN_list
@@ -140,7 +148,7 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
             test_ip_list.remove(ip_address)
             for ip in test_ip_list:
                 # only take ip address, without netmask
             test_ip_list.remove(ip_address)
             for ip in test_ip_list:
                 # only take ip address, without netmask
-                p = self.net.ping([vnf],manualdestip=ip.split('/')[0])
+                p = self.net.ping([vnf], manualdestip=ip.split('/')[0])
                 print p
                 self.assertTrue(p <= 0.0)
 
                 print p
                 self.assertTrue(p <= 0.0)
 
@@ -148,7 +156,7 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
         self.stopNet()
         initialize_GK()
 
         self.stopNet()
         initialize_GK()
 
-    #@unittest.skip("disabled")
+    # @unittest.skip("disabled")
     def test_GK_Api_stop_service(self):
         # create network
         self.createNet(ndatacenter=2, nhosts=2)
     def test_GK_Api_stop_service(self):
         # create network
         self.createNet(ndatacenter=2, nhosts=2)
@@ -176,7 +184,8 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
 
         # instantiate service
         self.service_uuid = json.loads(r.text).get("service_uuid")
 
         # instantiate service
         self.service_uuid = json.loads(r.text).get("service_uuid")
-        r2 = requests.post("http://127.0.0.1:55001/instantiations", data=json.dumps({"service_uuid": self.service_uuid}))
+        r2 = requests.post("http://127.0.0.1:55001/instantiations",
+                           data=json.dumps({"service_uuid": self.service_uuid}))
         self.assertEqual(r2.status_code, 201)
 
         # give the emulator some time to instantiate everything
         self.assertEqual(r2.status_code, 201)
 
         # give the emulator some time to instantiate everything
@@ -186,7 +195,8 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
         r3 = requests.get("http://127.0.0.1:55001/packages")
         self.assertEqual(len(json.loads(r3.text).get("service_uuid_list")), 1)
         r4 = requests.get("http://127.0.0.1:55001/instantiations")
         r3 = requests.get("http://127.0.0.1:55001/packages")
         self.assertEqual(len(json.loads(r3.text).get("service_uuid_list")), 1)
         r4 = requests.get("http://127.0.0.1:55001/instantiations")
-        self.assertEqual(len(json.loads(r4.text).get("service_instantiations_list")), 1)
+        self.assertEqual(len(json.loads(r4.text).get(
+            "service_instantiations_list")), 1)
 
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 3)
 
         # check number of running nodes
         self.assertTrue(len(self.getContainernetContainers()) == 3)
@@ -196,18 +206,21 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
         self.assertEqual(len(self.dc[0].listCompute()), 2)
 
         # stop the service
         self.assertEqual(len(self.dc[0].listCompute()), 2)
 
         # stop the service
-        service_instance_uuid = json.loads(r2.text).get("service_instance_uuid")
+        service_instance_uuid = json.loads(
+            r2.text).get("service_instance_uuid")
         self.assertTrue(service_instance_uuid is not None)
         self.assertTrue(service_instance_uuid is not None)
-        requests.delete("http://127.0.0.1:55001/instantiations", data=json.dumps({"service_uuid": self.service_uuid, "service_instance_uuid":service_instance_uuid}))
+        requests.delete("http://127.0.0.1:55001/instantiations", data=json.dumps(
+            {"service_uuid": self.service_uuid, "service_instance_uuid": service_instance_uuid}))
 
         r5 = requests.get("http://127.0.0.1:55001/instantiations")
 
         r5 = requests.get("http://127.0.0.1:55001/instantiations")
-        self.assertTrue(len(json.loads(r5.text).get("service_instantiations_list")), 0)     # note that there was 1 instance before
+        # note that there was 1 instance before
+        self.assertTrue(len(json.loads(r5.text).get(
+            "service_instantiations_list")), 0)
 
         # stop Mininet network
         self.stopNet()
         initialize_GK()
 
 
         # stop Mininet network
         self.stopNet()
         initialize_GK()
 
-
     @unittest.skip("disabled")
     def test_GK_stress_service(self):
         # create network
     @unittest.skip("disabled")
     def test_GK_stress_service(self):
         # create network
@@ -232,7 +245,8 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
 
         # instantiate service
         self.service_uuid = json.loads(r.text).get("service_uuid")
 
         # instantiate service
         self.service_uuid = json.loads(r.text).get("service_uuid")
-        r2 = requests.post("http://127.0.0.1:55002/instantiations", data=json.dumps({"service_uuid": self.service_uuid}))
+        r2 = requests.post("http://127.0.0.1:55002/instantiations",
+                           data=json.dumps({"service_uuid": self.service_uuid}))
         self.assertEqual(r2.status_code, 201)
 
         # give the emulator some time to instantiate everything
         self.assertEqual(r2.status_code, 201)
 
         # give the emulator some time to instantiate everything
@@ -242,18 +256,21 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
         r3 = requests.get("http://127.0.0.1:55002/packages")
         self.assertEqual(len(json.loads(r3.text).get("service_uuid_list")), 1)
         r4 = requests.get("http://127.0.0.1:55002/instantiations")
         r3 = requests.get("http://127.0.0.1:55002/packages")
         self.assertEqual(len(json.loads(r3.text).get("service_uuid_list")), 1)
         r4 = requests.get("http://127.0.0.1:55002/instantiations")
-        self.assertEqual(len(json.loads(r4.text).get("service_instantiations_list")), 1)
+        self.assertEqual(len(json.loads(r4.text).get(
+            "service_instantiations_list")), 1)
 
         # stop the service
 
         # stop the service
-        service_instance_uuid = json.loads(r2.text).get("service_instance_uuid")
+        service_instance_uuid = json.loads(
+            r2.text).get("service_instance_uuid")
         self.assertTrue(service_instance_uuid is not None)
         self.assertTrue(service_instance_uuid is not None)
-        requests.delete("http://127.0.0.1:55002/instantiations", data=json.dumps({"service_uuid": self.service_uuid, "service_instance_uuid":service_instance_uuid}))
+        requests.delete("http://127.0.0.1:55002/instantiations", data=json.dumps(
+            {"service_uuid": self.service_uuid, "service_instance_uuid": service_instance_uuid}))
 
         r5 = requests.get("http://127.0.0.1:55002/instantiations")
 
         r5 = requests.get("http://127.0.0.1:55002/instantiations")
-        self.assertTrue(len(json.loads(r5.text).get("service_instantiations_list")), 0)     # note that there was 1 instance before
+        # note that there was 1 instance before
+        self.assertTrue(len(json.loads(r5.text).get(
+            "service_instantiations_list")), 0)
 
         # stop Mininet network
         self.stopNet()
         initialize_GK()
 
         # stop Mininet network
         self.stopNet()
         initialize_GK()
-
-