#!/bin/bash
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
echo "vim-emu stage archive: Not yet implemented!"
#!/bin/bash
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
echo "vim-emu stage build: Skipped. Installation was already done during container build procedure."
#!/bin/bash
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
#
# Runs the unittests of "vim-emu". Script needs to be executed inside
# the vim-emu Docker container by user "root". It requires the container
# trigger the tests
cd /son-emu/
py.test -v src/emuvim/test/unittests
-
+# trigger pep8 style check
+echo "Doing flake8 style check ..."
+flake8 --exclude=.eggs,devops --ignore=E501 .
+echo "done."
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-Additional setup that installs 'son-emu-cli' in standalone mode.
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from setuptools import setup, find_packages
setup(name='vimemucli',
},
setup_requires=['pytest-runner'],
tests_require=['pytest'],
-)
+ )
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from setuptools import setup, find_packages
setup(name='emuvim',
'prometheus_client',
'ipaddress',
'simplejson',
- 'gevent'
+ 'gevent',
+ 'flake8'
],
zip_safe=False,
entry_points={
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import json
import logging
import copy
from flask import Flask
from flask import Response, request
from flask_restful import Api, Resource
-from mininet.link import Link
-import uuid
class ChainApi(Resource):
return response
def _start_flask(self):
- logging.info("Starting %s endpoint @ http://%s:%d" % ("ChainDummyApi", self.ip, self.port))
+ logging.info("Starting %s endpoint @ http://%s:%d" %
+ ("ChainDummyApi", self.ip, self.port))
if self.app is not None:
self.app.before_request(self.dump_playbook)
self.app.run(self.ip, self.port, debug=True, use_reloader=False)
if len(request.data) > 0:
data = "# CHAIN API\n"
data += "curl -X {type} -H \"Content-type: application/json\" -d '{data}' {url}".format(type=request.method,
- data=request.data,
- url=request.url)
+ data=request.data,
+ url=request.url)
logfile.write(data + "\n")
return Response(resp, status=200, mimetype="application/json")
except Exception as ex:
- logging.exception(u"%s: Could not show list of versions." % __name__)
+ logging.exception(
+ u"%s: Could not show list of versions." % __name__)
return ex.message, 500
for chain in self.api.manage.full_chain_data.values():
resp["chains"].append(chain)
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
except Exception as ex:
- logging.exception(u"%s: Could not list all network chains." % __name__)
+ logging.exception(
+ u"%s: Could not list all network chains." % __name__)
return ex.message, 500
for lb in self.api.manage.full_lb_data.values():
resp["loadbalancers"].append(lb)
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
except Exception as ex:
- logging.exception(u"%s: Could not list all live loadbalancers." % __name__)
+ logging.exception(
+ u"%s: Could not list all live loadbalancers." % __name__)
return ex.message, 500
vnf_dst_interface=dst_intfs, bidirectional=True,
path=path, layer2=layer2)
resp = {'cookie': cookie}
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
except Exception as e:
- logging.exception(u"%s: Error setting up the chain.\n %s" % (__name__, e))
- return Response(u"Error setting up the chain", status=500, mimetype="application/json")
+ logging.exception(
+ u"%s: Error setting up the chain.\n %s" % (__name__, e))
+ return Response(u"Error setting up the chain",
+ status=500, mimetype="application/json")
def delete(self, src_vnf, src_intfs, dst_vnf, dst_intfs):
"""
try:
cookie = self.api.manage.network_action_stop(src_vnf, dst_vnf, vnf_src_interface=src_intfs,
vnf_dst_interface=dst_intfs, bidirectional=True)
- return Response(json.dumps(cookie), status=200, mimetype="application/json")
+ return Response(json.dumps(cookie), status=200,
+ mimetype="application/json")
except Exception as e:
- logging.exception(u"%s: Error deleting the chain.\n %s" % (__name__, e))
- return Response(u"Error deleting the chain", status=500, mimetype="application/json")
+ logging.exception(
+ u"%s: Error deleting the chain.\n %s" % (__name__, e))
+ return Response(u"Error deleting the chain",
+ status=500, mimetype="application/json")
class ChainVnfDcStackInterfaces(Resource):
def __init__(self, api):
self.api = api
- def put(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+ def put(self, src_dc, src_stack, src_vnf, src_intfs,
+ dst_dc, dst_stack, dst_vnf, dst_intfs):
"""
A PUT request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
will set up chain.
"""
# search for real names
- real_names = self._findNames(src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
- if type(real_names) is not tuple:
+ real_names = self._findNames(
+ src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
+ if not isinstance(real_names, tuple):
# something went wrong
return real_names
container_src, container_dst, interface_src, interface_dst = real_names
# check if both VNFs exist
- if not self.api.manage.check_vnf_intf_pair(container_src, interface_src):
+ if not self.api.manage.check_vnf_intf_pair(
+ container_src, interface_src):
return Response(u"VNF %s or intfs %s does not exist" % (container_src, interface_src), status=501,
mimetype="application/json")
- if not self.api.manage.check_vnf_intf_pair(container_dst, interface_dst):
+ if not self.api.manage.check_vnf_intf_pair(
+ container_dst, interface_dst):
return Response(u"VNF %s or intfs %s does not exist" % (container_dst, interface_dst), status=501,
mimetype="application/json")
vnf_dst_interface=interface_dst, bidirectional=True,
layer2=True)
resp = {'cookie': cookie}
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
except Exception as e:
- logging.exception(u"%s: Error setting up the chain.\n %s" % (__name__, e))
- return Response(u"Error setting up the chain", status=500, mimetype="application/json")
+ logging.exception(
+ u"%s: Error setting up the chain.\n %s" % (__name__, e))
+ return Response(u"Error setting up the chain",
+ status=500, mimetype="application/json")
- def post(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+ def post(self, src_dc, src_stack, src_vnf, src_intfs,
+ dst_dc, dst_stack, dst_vnf, dst_intfs):
"""
A post request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
will create a chain between two interfaces at the specified vnfs.
layer2 = True
# search for real names
- real_names = self._findNames(src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
- if type(real_names) is not tuple:
+ real_names = self._findNames(
+ src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
+ if not isinstance(real_names, tuple):
# something went wrong
return real_names
vnf_dst_interface=interface_dst, bidirectional=True,
path=path, layer2=layer2)
resp = {'cookie': cookie}
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
except Exception as e:
- logging.exception(u"%s: Error setting up the chain.\n %s" % (__name__, e))
- return Response(u"Error setting up the chain", status=500, mimetype="application/json")
+ logging.exception(
+ u"%s: Error setting up the chain.\n %s" % (__name__, e))
+ return Response(u"Error setting up the chain",
+ status=500, mimetype="application/json")
- def delete(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+ def delete(self, src_dc, src_stack, src_vnf, src_intfs,
+ dst_dc, dst_stack, dst_vnf, dst_intfs):
"""
A DELETE request to "/v1/chain/<src_dc>/<src_stack>/<src_vnf>/<src_intfs>/<dst_dc>/<dst_stack>/<dst_vnf>/<dst_intfs>"
will delete a previously created chain.
"""
# search for real names
- real_names = self._findNames(src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
- if type(real_names) is not tuple:
+ real_names = self._findNames(
+ src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs)
+ if not isinstance(real_names, tuple):
# something went wrong, real_names is a Response object
return real_names
try:
cookie = self.api.manage.network_action_stop(container_src, container_dst, vnf_src_interface=interface_src,
vnf_dst_interface=interface_dst, bidirectional=True)
- return Response(json.dumps(cookie), status=200, mimetype="application/json")
+ return Response(json.dumps(cookie), status=200,
+ mimetype="application/json")
except Exception as e:
- logging.exception(u"%s: Error deleting the chain.\n %s" % (__name__, e))
- return Response(u"Error deleting the chain", status=500, mimetype="application/json")
+ logging.exception(
+ u"%s: Error deleting the chain.\n %s" % (__name__, e))
+ return Response(u"Error deleting the chain",
+ status=500, mimetype="application/json")
# Tries to find real container and interface names according to heat template names
# Returns a tuple of 4 or a Response object
- def _findNames(self, src_dc, src_stack, src_vnf, src_intfs, dst_dc, dst_stack, dst_vnf, dst_intfs):
+ def _findNames(self, src_dc, src_stack, src_vnf, src_intfs,
+ dst_dc, dst_stack, dst_vnf, dst_intfs):
# search for datacenters
if src_dc not in self.api.manage.net.dcs or dst_dc not in self.api.manage.net.dcs:
- return Response(u"At least one DC does not exist", status=500, mimetype="application/json")
+ return Response(u"At least one DC does not exist",
+ status=500, mimetype="application/json")
dc_src = self.api.manage.net.dcs[src_dc]
dc_dst = self.api.manage.net.dcs[dst_dc]
# search for related OpenStackAPIs
if api.compute.dc == dc_dst:
api_dst = api
if api_src is None or api_dst is None:
- return Response(u"At least one OpenStackAPI does not exist", status=500, mimetype="application/json")
+ return Response(u"At least one OpenStackAPI does not exist",
+ status=500, mimetype="application/json")
# search for stacks
stack_src = None
stack_dst = None
if stack.stack_name == dst_stack:
stack_dst = stack
if stack_src is None or stack_dst is None:
- return Response(u"At least one Stack does not exist", status=500, mimetype="application/json")
+ return Response(u"At least one Stack does not exist",
+ status=500, mimetype="application/json")
# search for servers
server_src = None
server_dst = None
server_dst = server
break
if server_src is None or server_dst is None:
- return Response(u"At least one VNF does not exist", status=500, mimetype="application/json")
+ return Response(u"At least one VNF does not exist",
+ status=500, mimetype="application/json")
container_src = server_src.name
container_dst = server_dst.name
if dst_intfs in server_dst.port_names:
port_dst = stack_dst.ports[dst_intfs]
if port_src is None or port_dst is None:
- return Response(u"At least one Port does not exist", status=500, mimetype="application/json")
+ return Response(u"At least one Port does not exist",
+ status=500, mimetype="application/json")
interface_src = port_src.intf_name
interface_dst = port_dst.intf_name
# check src vnf/port
if src_stack != "floating":
- real_src = self._findName(src_dc, src_stack, vnf_src_name, vnf_src_interface)
- if type(real_src) is not tuple:
+ real_src = self._findName(
+ src_dc, src_stack, vnf_src_name, vnf_src_interface)
+ if not isinstance(real_src, tuple):
# something went wrong, real_src is a Response object
return real_src
dst_server = dst_vnf.get('server', None)
dst_port = dst_vnf.get('port', None)
if dst_dc is not None and dst_stack is not None and dst_server is not None and dst_port is not None:
- real_dst = self._findName(dst_dc, dst_stack, dst_server, dst_port)
- if type(real_dst) is not tuple:
+ real_dst = self._findName(
+ dst_dc, dst_stack, dst_server, dst_port)
+ if not isinstance(real_dst, tuple):
# something went wrong, real_dst is a Response object
return real_dst
real_dst_dict[real_dst[0]] = real_dst[1]
- input_object = {"dst_vnf_interfaces": real_dst_dict, "path": req.get("path", None)}
+ input_object = {"dst_vnf_interfaces": real_dst_dict,
+ "path": req.get("path", None)}
if src_stack != "floating":
- self.api.manage.add_loadbalancer(container_src, interface_src, lb_data=input_object)
+ self.api.manage.add_loadbalancer(
+ container_src, interface_src, lb_data=input_object)
return Response(u"Loadbalancer set up at %s:%s" % (container_src, interface_src),
status=200, mimetype="application/json")
else:
- cookie, floating_ip = self.api.manage.add_floating_lb(src_dc, lb_data=input_object)
+ cookie, floating_ip = self.api.manage.add_floating_lb(
+ src_dc, lb_data=input_object)
return Response(json.dumps({"cookie": "%d" % cookie, "floating_ip": "%s" % floating_ip}),
status=200, mimetype="application/json")
logging.exception(u"%s: Error setting up the loadbalancer at %s %s %s:%s.\n %s" %
(__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface, e))
return Response(u"%s: Error setting up the loadbalancer at %s %s %s:%s.\n %s" %
- (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface, e), status=500,
+ (__name__, src_dc, src_stack, vnf_src_name,
+ vnf_src_interface, e), status=500,
mimetype="application/json")
def delete(self, src_dc, src_stack, vnf_src_name, vnf_src_interface):
try:
# check src vnf/port
if src_stack != "floating":
- real_src = self._findName(src_dc, src_stack, vnf_src_name, vnf_src_interface)
- if type(real_src) is not tuple:
+ real_src = self._findName(
+ src_dc, src_stack, vnf_src_name, vnf_src_interface)
+ if not isinstance(real_src, tuple):
# something went wrong, real_src is a Response object
return real_src
container_src, interface_src = real_src
- self.api.manage.delete_loadbalancer(container_src, interface_src)
+ self.api.manage.delete_loadbalancer(
+ container_src, interface_src)
return Response(u"Loadbalancer deleted at %s:%s" % (vnf_src_name, vnf_src_interface),
status=200, mimetype="application/json")
else:
logging.exception(u"%s: Error deleting the loadbalancer at %s %s %s%s.\n %s" %
(__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface, e))
return Response(u"%s: Error deleting the loadbalancer at %s %s %s%s." %
- (__name__, src_dc, src_stack, vnf_src_name, vnf_src_interface), status=500,
+ (__name__, src_dc, src_stack, vnf_src_name,
+ vnf_src_interface), status=500,
mimetype="application/json")
# Tries to find real container and port name according to heat template names
def _findName(self, dc, stack, vnf, port):
# search for datacenters
if dc not in self.api.manage.net.dcs:
- return Response(u"DC does not exist", status=500, mimetype="application/json")
+ return Response(u"DC does not exist", status=500,
+ mimetype="application/json")
dc_real = self.api.manage.net.dcs[dc]
# search for related OpenStackAPIs
api_real = None
if api.compute.dc == dc_real:
api_real = api
if api_real is None:
- return Response(u"OpenStackAPI does not exist", status=500, mimetype="application/json")
+ return Response(u"OpenStackAPI does not exist",
+ status=500, mimetype="application/json")
# search for stacks
stack_real = None
for stackObj in api_real.compute.stacks.values():
if stackObj.stack_name == stack:
stack_real = stackObj
if stack_real is None:
- return Response(u"Stack does not exist", status=500, mimetype="application/json")
+ return Response(u"Stack does not exist", status=500,
+ mimetype="application/json")
# search for servers
server_real = None
for server in stack_real.servers.values():
server_real = server
break
if server_real is None:
- return Response(u"VNF does not exist", status=500, mimetype="application/json")
+ return Response(u"VNF does not exist", status=500,
+ mimetype="application/json")
container_real = server_real.name
if port in server_real.port_names:
port_real = stack_real.ports[port]
if port_real is None:
- return Response(u"At least one Port does not exist", status=500, mimetype="application/json")
+ return Response(u"At least one Port does not exist",
+ status=500, mimetype="application/json")
interface_real = port_real.intf_name
if vnf_src_name != "floating":
# check if VNF exist
- if not self.api.manage.check_vnf_intf_pair(vnf_src_name, vnf_src_interface):
+ if not self.api.manage.check_vnf_intf_pair(
+ vnf_src_name, vnf_src_interface):
return Response(u"VNF %s or intfs %s does not exist" % (vnf_src_name, vnf_src_interface),
status=501,
mimetype="application/json")
- self.api.manage.add_loadbalancer(vnf_src_name, vnf_src_interface, lb_data=req)
+ self.api.manage.add_loadbalancer(
+ vnf_src_name, vnf_src_interface, lb_data=req)
return Response(u"Loadbalancer set up at %s:%s" % (vnf_src_name, vnf_src_interface),
status=200, mimetype="application/json")
else:
- cookie, floating_ip = self.api.manage.add_floating_lb(vnf_src_interface, lb_data=req)
+ cookie, floating_ip = self.api.manage.add_floating_lb(
+ vnf_src_interface, lb_data=req)
return Response(json.dumps({"cookie": "%d" % cookie, "floating_ip": "%s" % floating_ip}),
status=200, mimetype="application/json")
"""
# check if VNF exist
- if not self.api.manage.check_vnf_intf_pair(vnf_src_name, vnf_src_interface):
+ if not self.api.manage.check_vnf_intf_pair(
+ vnf_src_name, vnf_src_interface):
return Response(u"VNF %s or intfs %s does not exist" % (vnf_src_name, vnf_src_interface), status=501,
mimetype="application/json")
try:
- logging.debug("Deleting loadbalancer at %s: interface: %s" % (vnf_src_name, vnf_src_interface))
+ logging.debug("Deleting loadbalancer at %s: interface: %s" %
+ (vnf_src_name, vnf_src_interface))
net = self.api.manage.net
if vnf_src_name != "floating":
return Response(u"Source VNF or interface can not be found." % vnf_src_name,
status=404, mimetype="application/json")
- self.api.manage.delete_loadbalancer(vnf_src_name, vnf_src_interface)
+ self.api.manage.delete_loadbalancer(
+ vnf_src_name, vnf_src_interface)
return Response(u"Loadbalancer deleted at %s:%s" % (vnf_src_name, vnf_src_interface),
status=200, mimetype="application/json")
# with their unique keys
link = copy.copy(data)
for edge in link:
- # do not add any links to the floating switch to the topology!
+ # do not add any links to the floating switch
+ # to the topology!
if graph_node == "fs1":
continue
# the translator wants everything as a string!
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from mininet.link import Link
-from resources import *
+from resources.instance_flavor import InstanceFlavor
+from resources.net import Net
+from resources.port import Port
+from resources.port_pair import PortPair
+from resources.port_pair_group import PortPairGroup
+from resources.flow_classifier import FlowClassifier
+from resources.port_chain import PortChain
+from resources.server import Server
+from resources.image import Image
+
from docker import DockerClient
import logging
import threading
for image in self.dcli.images.list():
if len(image.tags) > 0:
for t in image.tags:
- t = t.replace(":latest", "") # only use short tag names for OSM compatibility
+ # only use short tag names for OSM compatibility
+ t = t.replace(":latest", "")
if t not in self._images:
self._images[t] = Image(t)
return self._images
"""
if not self.check_stack(stack):
self.clean_broken_stack(stack)
- raise HeatApiStackInvalidException("Stack did not pass validity checks")
+ raise HeatApiStackInvalidException(
+ "Stack did not pass validity checks")
self.stacks[stack.id] = stack
def clean_broken_stack(self, stack):
for port_name in server.port_names:
if port_name not in stack.ports:
LOG.warning("Server %s of stack %s has a port named %s that is not known." %
- (server.name, stack.stack_name, port_name))
+ (server.name, stack.stack_name, port_name))
everything_ok = False
if server.image is None:
LOG.warning("Server %s holds no image." % (server.name))
for port in stack.ports.values():
if port.net_name not in stack.nets:
LOG.warning("Port %s of stack %s has a network named %s that is not known." %
- (port.name, stack.stack_name, port.net_name))
+ (port.name, stack.stack_name, port.net_name))
everything_ok = False
if port.intf_name is None:
LOG.warning("Port %s has no interface name." % (port.name))
break
if not found:
LOG.warning("Router %s of stack %s has a network named %s that is not known." %
- (router.name, stack.stack_name, subnet_name))
+ (router.name, stack.stack_name, subnet_name))
everything_ok = False
return everything_ok
- def add_flavor(self, name, cpu, memory, memory_unit, storage, storage_unit):
+ def add_flavor(self, name, cpu, memory,
+ memory_unit, storage, storage_unit):
"""
Adds a flavor to the stack.
:param storage_unit:
:type storage_unit: ``str``
"""
- flavor = InstanceFlavor(name, cpu, memory, memory_unit, storage, storage_unit)
+ flavor = InstanceFlavor(
+ name, cpu, memory, memory_unit, storage, storage_unit)
self.flavors[flavor.name] = flavor
return flavor
* *False*: else
:rtype: ``bool``
"""
- LOG.debug("updating stack {} with new_stack {}".format(old_stack_id, new_stack))
+ LOG.debug("updating stack {} with new_stack {}".format(
+ old_stack_id, new_stack))
if old_stack_id not in self.stacks:
return False
old_stack = self.stacks[old_stack_id]
# Remove unnecessary networks
for net in old_stack.nets.values():
- if not net.name in new_stack.nets:
+ if net.name not in new_stack.nets:
self.delete_network(net.id)
# Remove all unnecessary servers
for server in old_stack.servers.values():
if server.name in new_stack.servers:
- if not server.compare_attributes(new_stack.servers[server.name]):
+ if not server.compare_attributes(
+ new_stack.servers[server.name]):
self.stop_compute(server)
else:
# Delete unused and changed links
for port_name in server.port_names:
if port_name in old_stack.ports and port_name in new_stack.ports:
- if not old_stack.ports.get(port_name) == new_stack.ports.get(port_name):
+ if not old_stack.ports.get(
+ port_name) == new_stack.ports.get(port_name):
my_links = self.dc.net.links
for link in my_links:
if str(link.intf1) == old_stack.ports[port_name].intf_name and \
- str(link.intf1.ip) == \
- old_stack.ports[port_name].ip_address.split('/')[0]:
+ str(link.intf1.ip) == \
+ old_stack.ports[port_name].ip_address.split('/')[0]:
self._remove_link(server.name, link)
# Add changed link
if port.compare_attributes(old_port):
for net in new_stack.nets.values():
if net.name == port.net_name:
- if net.assign_ip_address(old_port.ip_address, port.name):
+ if net.assign_ip_address(
+ old_port.ip_address, port.name):
port.ip_address = old_port.ip_address
port.mac_address = old_port.mac_address
else:
- port.ip_address = net.get_new_ip_address(port.name)
+ port.ip_address = net.get_new_ip_address(
+ port.name)
for port in new_stack.ports.values():
for net in new_stack.nets.values():
- if port.net_name == net.name and not net.is_my_ip(port.ip_address, port.name):
+ if port.net_name == net.name and not net.is_my_ip(
+ port.ip_address, port.name):
port.ip_address = net.get_new_ip_address(port.name)
def update_subnet_cidr(self, old_stack, new_stack):
if port is not None:
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
- network_dict[network_dict['id']] = self.find_network_by_name_or_id(port.net_name).name
+ network_dict[network_dict['id']] = self.find_network_by_name_or_id(
+ port.net_name).name
network.append(network_dict)
# default network dict
if len(network) < 1:
if "SON_EMU_CMD=" in env_var:
cmd = str(env_var.split("=")[1])
server.son_emu_command = cmd
- # execute command in new thread to ensure that GK is not blocked by VNF
+ # execute command in new thread to ensure that GK is not
+ # blocked by VNF
t = threading.Thread(target=c.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
:param server: The server that should be removed
:type server: ``heat.resources.server``
"""
- LOG.debug("Stopping container %s with full name %s" % (server.name, server.full_name))
+ LOG.debug("Stopping container %s with full name %s" %
+ (server.name, server.full_name))
link_names = list()
for port_name in server.port_names:
prt = self.find_port_by_name_or_id(port_name)
my_links = self.dc.net.links
for link in my_links:
if str(link.intf1) in link_names:
- # Remove all self created links that connect the server to the main switch
+ # Remove all self created links that connect the server to the
+ # main switch
self._remove_link(server.name, link)
# Stop the server and the remaining connection to the datacenter switch
return self.computeUnits[name_or_id]
for server in self.computeUnits.values():
- if server.name == name_or_id or server.template_name == name_or_id or server.full_name == name_or_id:
+ if (server.name == name_or_id or
+ server.template_name == name_or_id or
+ server.full_name == name_or_id):
return server
- if (server.name == self._shorten_server_name(name_or_id)
- or server.template_name == self._shorten_server_name(name_or_id)
- or server.full_name == self._shorten_server_name(name_or_id)):
+ if (server.name == self._shorten_server_name(name_or_id) or
+ server.template_name == self._shorten_server_name(name_or_id) or
+ server.full_name == self._shorten_server_name(name_or_id)):
return server
return None
:return: Returns the created server.
:rtype: :class:`heat.resources.server`
"""
- if self.find_server_by_name_or_id(name) is not None and not stack_operation:
+ if self.find_server_by_name_or_id(
+ name) is not None and not stack_operation:
raise Exception("Server with name %s already exists." % name)
safe_name = self._shorten_server_name(name)
server = Server(safe_name)
LOG.info("Short server name: {}".format(h))
return name
-
def delete_server(self, server):
"""
Deletes the given server from the stack dictionary and the computeUnits dictionary.
:return: :class:`heat.resources.net`
"""
LOG.debug("Creating network with name %s" % name)
- if self.find_network_by_name_or_id(name) is not None and not stack_operation:
- LOG.warning("Creating network with name %s failed, as it already exists" % name)
+ if self.find_network_by_name_or_id(
+ name) is not None and not stack_operation:
+ LOG.warning(
+ "Creating network with name %s failed, as it already exists" % name)
raise Exception("Network with name %s already exists." % name)
network = Net(name)
network.id = str(uuid.uuid4())
"""
net = self.find_network_by_name_or_id(name_or_id)
if net is None:
- raise Exception("Network with name or id %s does not exists." % name_or_id)
+ raise Exception(
+ "Network with name or id %s does not exists." % name_or_id)
for stack in self.stacks.values():
stack.nets.pop(net.name, None)
"""
port = self.find_port_by_name_or_id(name)
if port is not None and not stack_operation:
- LOG.warning("Creating port with name %s failed, as it already exists" % name)
+ LOG.warning(
+ "Creating port with name %s failed, as it already exists" % name)
raise Exception("Port with name %s already exists." % name)
LOG.debug("Creating port with name %s" % name)
port = Port(name)
"""
port = self.find_port_by_name_or_id(name_or_id)
if port is None:
- LOG.warning("Port with name or id %s does not exist. Can't delete it." % name_or_id)
+ LOG.warning(
+ "Port with name or id %s does not exist. Can't delete it." % name_or_id)
return
my_links = self.dc.net.links
for link in my_links:
if str(link.intf1) == port.intf_name and \
- str(link.intf1.ip) == port.ip_address.split('/')[0]:
+ str(link.intf1.ip) == port.ip_address.split('/')[0]:
self._remove_link(link.intf1.node.name, link)
break
"""
port_pair = self.find_port_pair_by_name_or_id(name)
if port_pair is not None and not stack_operation:
- logging.warning("Creating port pair with name %s failed, as it already exists" % name)
+ logging.warning(
+ "Creating port pair with name %s failed, as it already exists" % name)
raise Exception("Port pair with name %s already exists." % name)
logging.debug("Creating port pair with name %s" % name)
port_pair = PortPair(name)
"""
port_pair = self.find_port_pair_by_name_or_id(name_or_id)
if port_pair is None:
- raise Exception("Port pair with name or id %s does not exists." % name_or_id)
+ raise Exception(
+ "Port pair with name or id %s does not exists." % name_or_id)
self.port_pairs.pop(port_pair.id, None)
"""
port_pair_group = self.find_port_pair_group_by_name_or_id(name)
if port_pair_group is not None and not stack_operation:
- logging.warning("Creating port pair group with name %s failed, as it already exists" % name)
- raise Exception("Port pair group with name %s already exists." % name)
+ logging.warning(
+ "Creating port pair group with name %s failed, as it already exists" % name)
+ raise Exception(
+ "Port pair group with name %s already exists." % name)
logging.debug("Creating port pair group with name %s" % name)
port_pair_group = PortPairGroup(name)
if not stack_operation:
"""
port_pair_group = self.find_port_pair_group_by_name_or_id(name_or_id)
if port_pair_group is None:
- raise Exception("Port pair with name or id %s does not exists." % name_or_id)
+ raise Exception(
+ "Port pair with name or id %s does not exists." % name_or_id)
self.port_pair_groups.pop(port_pair_group.id, None)
"""
port_chain = self.find_port_chain_by_name_or_id(name)
if port_chain is not None and not stack_operation:
- logging.warning("Creating port chain with name %s failed, as it already exists" % name)
+ logging.warning(
+ "Creating port chain with name %s failed, as it already exists" % name)
raise Exception("Port chain with name %s already exists." % name)
logging.debug("Creating port chain with name %s" % name)
port_chain = PortChain(name)
port_chain = self.find_port_chain_by_name_or_id(name_or_id)
port_chain.uninstall(self)
if port_chain is None:
- raise Exception("Port chain with name or id %s does not exists." % name_or_id)
+ raise Exception(
+ "Port chain with name or id %s does not exists." % name_or_id)
self.port_chains.pop(port_chain.id, None)
"""
flow_classifier = self.find_flow_classifier_by_name_or_id(name)
if flow_classifier is not None and not stack_operation:
- logging.warning("Creating flow classifier with name %s failed, as it already exists" % name)
- raise Exception("Flow classifier with name %s already exists." % name)
+ logging.warning(
+ "Creating flow classifier with name %s failed, as it already exists" % name)
+ raise Exception(
+ "Flow classifier with name %s already exists." % name)
logging.debug("Creating flow classifier with name %s" % name)
flow_classifier = FlowClassifier(name)
if not stack_operation:
"""
flow_classifier = self.find_flow_classifier_by_name_or_id(name_or_id)
if flow_classifier is None:
- raise Exception("Flow classifier with name or id %s does not exists." % name_or_id)
+ raise Exception(
+ "Flow classifier with name or id %s does not exists." % name_or_id)
self.flow_classifiers.pop(flow_classifier.id, None)
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from docker import DockerClient, APIClient
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+from docker import APIClient
import time
import re
cpu_usage = 0
for number in numbers:
cpu_usage += number
- return {'CPU_used': cpu_usage, 'CPU_used_systime': sys_time, 'CPU_cores': len(numbers)}
+ return {'CPU_used': cpu_usage,
+ 'CPU_used_systime': sys_time, 'CPU_cores': len(numbers)}
def docker_mem_used(container_id):
out_dict = dict()
out_dict['MEM_used'] = docker_mem_used(container_id)
out_dict['MEM_limit'] = docker_max_mem(container_id)
- out_dict['MEM_%'] = float(out_dict['MEM_used']) / float(out_dict['MEM_limit'])
+ out_dict['MEM_%'] = float(out_dict['MEM_used']) / \
+ float(out_dict['MEM_limit'])
return out_dict
second_disk_io = docker_block_rw(container_id)
# Disk access
- time_div = (int(second_disk_io['BLOCK_systime']) - int(first_disk_io['BLOCK_systime']))
- read_div = int(second_disk_io['BLOCK_read']) - int(first_disk_io['BLOCK_read'])
- write_div = int(second_disk_io['BLOCK_write']) - int(first_disk_io['BLOCK_write'])
+ time_div = (int(second_disk_io['BLOCK_systime']
+ ) - int(first_disk_io['BLOCK_systime']))
+ read_div = int(second_disk_io['BLOCK_read']) - \
+ int(first_disk_io['BLOCK_read'])
+ write_div = int(second_disk_io['BLOCK_write']) - \
+ int(first_disk_io['BLOCK_write'])
out_dict = {'BLOCK_read/s': int(read_div * 1000000000 / float(time_div) + 0.5),
'BLOCK_write/s': int(write_div * 1000000000 / float(time_div) + 0.5)}
'NET_out/s': int(out_div * 1000000000 / float(time_div) + 0.5)})
# CPU utilization
- time_div = (int(second_cpu_usage['CPU_used_systime']) - int(first_cpu_usage['CPU_used_systime']))
- usage_div = int(second_cpu_usage['CPU_used']) - int(first_cpu_usage['CPU_used'])
- out_dict.update({'CPU_%': usage_div / float(time_div), 'CPU_cores': first_cpu_usage['CPU_cores']})
+ time_div = (int(second_cpu_usage['CPU_used_systime']
+ ) - int(first_cpu_usage['CPU_used_systime']))
+ usage_div = int(second_cpu_usage['CPU_used']) - \
+ int(first_cpu_usage['CPU_used'])
+ out_dict.update({'CPU_%': usage_div / float(time_div),
+ 'CPU_cores': first_cpu_usage['CPU_cores']})
return out_dict
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from __future__ import print_function # TODO remove when print is no longer needed for debugging
-from resources import *
+from resources.router import Router
from datetime import datetime
import re
import sys
* *False*: Else
:rtype: ``bool``
"""
- if not self.check_template_version(str(input_dict['heat_template_version'])):
+ if not self.check_template_version(
+ str(input_dict['heat_template_version'])):
print('Unsupported template version: ' + input_dict['heat_template_version'], file=sys.stderr)
return False
self.bufferResource = list()
for resource in self.resources.values():
- self.handle_resource(resource, stack, dc_label, stack_update=stack_update)
+ self.handle_resource(resource, stack, dc_label,
+ stack_update=stack_update)
- # This loop tries to create all classes which had unresolved dependencies.
+ # This loop tries to create all classes which had unresolved
+ # dependencies.
unresolved_resources_last_round = len(self.bufferResource) + 1
- while len(self.bufferResource) > 0 and unresolved_resources_last_round > len(self.bufferResource):
+ while len(self.bufferResource) > 0 and unresolved_resources_last_round > len(
+ self.bufferResource):
unresolved_resources_last_round = len(self.bufferResource)
number_of_items = len(self.bufferResource)
while number_of_items > 0:
- self.handle_resource(self.bufferResource.pop(0), stack, dc_label, stack_update=stack_update)
+ self.handle_resource(self.bufferResource.pop(
+ 0), stack, dc_label, stack_update=stack_update)
number_of_items -= 1
if len(self.bufferResource) > 0:
try:
net_name = resource['properties']['name']
if net_name not in stack.nets:
- stack.nets[net_name] = self.compute.create_network(net_name, True)
+ stack.nets[net_name] = self.compute.create_network(
+ net_name, True)
except Exception as e:
LOG.warning('Could not create Net: ' + e.message)
net.subnet_name = resource['properties']['name']
if 'gateway_ip' in resource['properties']:
net.gateway_ip = resource['properties']['gateway_ip']
- net.subnet_id = resource['properties'].get('id', str(uuid.uuid4()))
+ net.subnet_id = resource['properties'].get(
+ 'id', str(uuid.uuid4()))
net.subnet_creation_time = str(datetime.now())
if not stack_update:
net.set_cidr(IP.get_new_cidr(net.subnet_id))
else:
port = stack.ports[port_name]
- if str(resource['properties']['network']['get_resource']) in stack.nets:
- net = stack.nets[resource['properties']['network']['get_resource']]
+ if str(resource['properties']['network']
+ ['get_resource']) in stack.nets:
+ net = stack.nets[resource['properties']
+ ['network']['get_resource']]
if net.subnet_id is not None:
port.net_name = net.name
port.ip_address = net.get_new_ip_address(port.name)
if 'OS::Nova::Server' in resource['type']:
try:
- compute_name = str(dc_label) + '_' + str(stack.stack_name) + '_' + str(resource['properties']['name'])
+ compute_name = str(dc_label) + '_' + str(stack.stack_name) + \
+ '_' + str(resource['properties']['name'])
shortened_name = str(dc_label) + '_' + str(stack.stack_name) + '_' + \
- self.shorten_server_name(str(resource['properties']['name']), stack)
+ self.shorten_server_name(
+ str(resource['properties']['name']), stack)
nw_list = resource['properties']['networks']
if shortened_name not in stack.servers:
- server = self.compute.create_server(shortened_name, stack_update)
+ server = self.compute.create_server(
+ shortened_name, stack_update)
stack.servers[shortened_name] = server
else:
server = stack.servers[shortened_name]
server.full_name = compute_name
server.template_name = str(resource['properties']['name'])
- server.command = resource['properties'].get('command', '/bin/sh')
+ server.command = resource['properties'].get(
+ 'command', '/bin/sh')
server.image = resource['properties']['image']
server.flavor = resource['properties']['flavor']
# we don't know which network it belongs to yet, but the resource will appear later in a valid
# template
if port_name not in stack.ports:
- stack.ports[port_name] = self.compute.create_port(port_name, stack_update)
+ stack.ports[port_name] = self.compute.create_port(
+ port_name, stack_update)
server.port_names.append(port_name)
return
except Exception as e:
stack.routers[router_name].add_subnet(subnet_name)
return
except Exception as e:
- LOG.warning('Could not create RouterInterface: ' + e.__repr__())
+ LOG.warning(
+ 'Could not create RouterInterface: ' + e.__repr__())
self.bufferResource.append(resource)
return
port_name = resource['properties']['port_id']['get_resource']
floating_network_id = resource['properties']['floating_network_id']
if port_name not in stack.ports:
- stack.ports[port_name] = self.compute.create_port(port_name, stack_update)
+ stack.ports[port_name] = self.compute.create_port(
+ port_name, stack_update)
stack.ports[port_name].floating_ip = floating_network_id
except Exception as e:
if 'OS::Heat::ResourceGroup' in resource['type']:
try:
embedded_resource = resource['properties']['resource_def']
- LOG.debug("Found resource in resource group: {}".format(embedded_resource))
+ LOG.debug("Found resource in resource group: {}".format(
+ embedded_resource))
# recursively parse embedded resource
- self.handle_resource(embedded_resource, stack, dc_label, stack_update)
+ self.handle_resource(
+ embedded_resource, stack, dc_label, stack_update)
except Exception as e:
print('Could not create Router: ' + e.message)
return
- LOG.warning('Could not determine resource type: {}'.format(resource['type']))
+ LOG.warning(
+ 'Could not determine resource type: {}'.format(resource['type']))
return
def shorten_server_name(self, server_name, stack):
if year < 2015:
return False
if year == 2015:
- if month < 04:
+ if month < 0o4:
return False
- if month == 04 and day < 30:
+ if month == 0o4 and day < 30:
return False
return True
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from urlparse import urlparse
import logging
LOG = logging.getLogger("api.openstack.helper")
+
def get_host(r):
try:
return urlparse(r.base_url).hostname
- except:
+ except BaseException:
LOG.error("Could not get host part of request URL.")
return "0.0.0.0"
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from resources.net import Net
import threading
int_ip = Net.cidr_2_int(cidr)
- if not int_ip in __issued_ips:
+ if int_ip not in __issued_ips:
return False
if __issued_ips[int_ip] == uuid:
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""Openstack manage component of PG Sandman.
-
-.. module:: manage
- :synopsis: Module containing the OpenstackManage class.
-.. moduleauthor: PG Sandman
-
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
import threading
import uuid
import chain_api
import json
import random
-from emuvim.api.openstack.resources import Net, Port
+from emuvim.api.openstack.resources.net import Net
+from emuvim.api.openstack.resources.port import Port
from mininet.node import OVSSwitch, RemoteController, Node
self.ip = ip
self.port = port
self._net = None
- # to keep track which src_vnf(input port on the switch) handles a load balancer
+ # to keep track which src_vnf(input port on the switch) handles a load
+ # balancer
self.lb_flow_cookies = dict()
self.chain_flow_cookies = dict()
# debug and to maintain
self.flow_groups = dict()
- # we want one global chain api. this should not be datacenter dependent!
+ # we want one global chain api. this should not be datacenter
+ # dependent!
self.chain = chain_api.ChainApi(ip, port, self)
self.thread = threading.Thread(target=self.chain._start_flask, args=())
self.thread.daemon = True
# create a port for the host
port = Port("root-port")
- #port.id = str(uuid.uuid4())
+ # port.id = str(uuid.uuid4())
port.net_name = fn.name
# get next free ip
# floating ip network setup
# wierd way of getting a datacenter object
first_dc = self.net.dcs.values()[0]
- # set a dpid for the switch. for this we have to get the id of the next possible dc
- self.floating_switch = self.net.addSwitch("fs1", dpid=hex(first_dc._get_next_dc_dpid())[2:])
+ # set a dpid for the switch. for this we have to get the id of the
+ # next possible dc
+ self.floating_switch = self.net.addSwitch(
+ "fs1", dpid=hex(first_dc._get_next_dc_dpid())[2:])
# this is the interface appearing on the physical host
self.floating_root = Node('root', inNamespace=False)
self.net.hosts.append(self.floating_root)
self.net.nameToNode['root'] = self.floating_root
- self.floating_intf = self.net.addLink(self.floating_root, self.floating_switch).intf1
+ self.floating_intf = self.net.addLink(
+ self.floating_root, self.floating_switch).intf1
self.floating_root.setIP(root_ip, intf=self.floating_intf)
- self.floating_nodes[(self.floating_root.name, root_ip)] = self.floating_root
-
+ self.floating_nodes[(self.floating_root.name,
+ root_ip)] = self.floating_root
def stop_floating_network(self):
self._net = None
vnf_dst_interface = kwargs.get('vnf_dst_interface')
layer2 = kwargs.get('layer2', True)
match = kwargs.get('match')
- flow = (vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
+ flow = (vnf_src_name, vnf_src_interface,
+ vnf_dst_name, vnf_dst_interface)
if flow in self.chain_flow_cookies:
- raise Exception("There is already a chain at the specified src/dst pair!")
- # set up a layer 2 chain, this allows multiple chains for the same interface
+ raise Exception(
+ "There is already a chain at the specified src/dst pair!")
+ # set up a layer 2 chain, this allows multiple chains for the same
+ # interface
src_node = self.net.getNodeByName(vnf_src_name)
dst_node = self.net.getNodeByName(vnf_dst_name)
dst_intf = dst_node.intf(vnf_dst_interface)
if layer2:
- switch, inport = self._get_connected_switch_data(vnf_src_name, vnf_src_interface)
- self.setup_arp_reply_at(switch, inport, dst_intf.IP(), dst_intf.MAC())
+ switch, inport = self._get_connected_switch_data(
+ vnf_src_name, vnf_src_interface)
+ self.setup_arp_reply_at(
+ switch, inport, dst_intf.IP(), dst_intf.MAC())
if isinstance(match, str):
match += ",dl_dst=%s" % dst_intf.MAC()
else:
cookie = kwargs.get('cookie', self.get_cookie())
self.cookies.add(cookie)
- c = self.net.setChain(
+ self.net.setChain(
vnf_src_name, vnf_dst_name,
vnf_src_interface=vnf_src_interface,
vnf_dst_interface=vnf_dst_interface,
cookie=cookie,
path=kwargs.get('path'))
- # to keep this logic seperate of the core son-emu do the housekeeping here
+ # to keep this logic seperate of the core son-emu do the
+ # housekeeping here
data = dict()
data["src_vnf"] = vnf_src_name
data["src_intf"] = vnf_src_interface
vnf_dst_interface)[0]
# add route to dst ip to this interface
- # this might block on containers that are still setting up, so start a new thread
+ # this might block on containers that are still setting up, so
+ # start a new thread
if not kwargs.get('no_route'):
# son_emu does not like concurrent commands for a container so we need to lock this if multiple chains
# on the same interface are created
- src_node.setHostRoute(dst_node.intf(vnf_dst_interface).IP(), vnf_src_interface)
+ src_node.setHostRoute(dst_node.intf(
+ vnf_dst_interface).IP(), vnf_src_interface)
try:
- son_emu_data = json.loads(self.get_son_emu_chain_data(vnf_src_name))
- except:
+ son_emu_data = json.loads(
+ self.get_son_emu_chain_data(vnf_src_name))
+ except BaseException:
son_emu_data = dict()
if "son_emu_data" not in son_emu_data:
son_emu_data["son_emu_data"] = dict()
son_emu_data["son_emu_data"]["interfaces"] = dict()
if vnf_src_interface not in son_emu_data["son_emu_data"]["interfaces"]:
son_emu_data["son_emu_data"]["interfaces"][vnf_src_interface] = list()
- son_emu_data["son_emu_data"]["interfaces"][vnf_src_interface].append(dst_intf.IP())
+ son_emu_data["son_emu_data"]["interfaces"][vnf_src_interface].append(
+ dst_intf.IP())
self.set_son_emu_chain_data(vnf_src_name, son_emu_data)
:param data: Raw data to store on the node.
:type data: ``str``
"""
- self.net.getNodeByName(vnf_name).cmd("echo \'%s\' > /tmp/son_emu_data.json" % json.dumps(data))
+ self.net.getNodeByName(vnf_name).cmd(
+ "echo \'%s\' > /tmp/son_emu_data.json" % json.dumps(data))
ip_list = []
for intf in data['son_emu_data']['interfaces'].values():
ip_list.extend(intf)
- self.net.getNodeByName(vnf_name).cmd("echo \'%s\' > /tmp/son_emu_data" % "\n".join(ip_list))
+ self.net.getNodeByName(vnf_name).cmd(
+ "echo \'%s\' > /tmp/son_emu_data" % "\n".join(ip_list))
def get_son_emu_chain_data(self, vnf_name):
"""
:return: raw data stored on the node
:rtype: ``str``
"""
- return self.net.getNodeByName(vnf_name).cmd("cat /tmp/son_emu_data.json")
+ return self.net.getNodeByName(vnf_name).cmd(
+ "cat /tmp/son_emu_data.json")
def _get_connected_switch_data(self, vnf_name, vnf_interface):
"""
link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == vnf_interface or
- link_dict[link][
- 'src_port_name'] == vnf_interface):
+ link_dict[link][
+ 'src_port_name'] == vnf_interface):
# found the right link and connected switch
src_sw = connected_sw
src_sw_inport_nr = link_dict[link]['dst_port_nr']
:return: path, src_sw, dst_sw
:rtype: ``list``, ``str``, ``str``
"""
- # modified version of the _chainAddFlow from emuvim.dcemulator.net._chainAddFlow
+ # modified version of the _chainAddFlow from
+ # emuvim.dcemulator.net._chainAddFlow
src_sw = None
dst_sw = None
logging.debug("Find shortest path from vnf %s to %s",
link_dict = self.net.DCNetwork_graph[src_vnf][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == src_vnf_intf or
- link_dict[link][
- 'src_port_name'] == src_vnf_intf):
+ link_dict[link][
+ 'src_port_name'] == src_vnf_intf):
# found the right link and connected switch
src_sw = connected_sw
break
link_dict = self.net.DCNetwork_graph[connected_sw][dst_vnf]
for link in link_dict:
if link_dict[link]['dst_port_id'] == dst_vnf_intf or \
- link_dict[link][
- 'dst_port_name'] == dst_vnf_intf:
+ link_dict[link][
+ 'dst_port_name'] == dst_vnf_intf:
# found the right link and connected
dst_sw = connected_sw
break
# returns the first found shortest path
# if all shortest paths are wanted, use: all_shortest_paths
path = nx.shortest_path(self.net.DCNetwork_graph, src_sw, dst_sw)
- except:
+ except BaseException:
logging.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
src_vnf, dst_vnf, src_sw, dst_sw))
logging.debug("Graph nodes: %r" % self.net.DCNetwork_graph.nodes())
logging.debug("Graph edges: %r" % self.net.DCNetwork_graph.edges())
for e, v in self.net.DCNetwork_graph.edges():
logging.debug("%r" % self.net.DCNetwork_graph[e][v])
- return "No path could be found between {0} and {1}".format(src_vnf, dst_vnf)
+ return "No path could be found between {0} and {1}".format(
+ src_vnf, dst_vnf)
- logging.info("Shortest path between {0} and {1}: {2}".format(src_vnf, dst_vnf, path))
+ logging.info("Shortest path between {0} and {1}: {2}".format(
+ src_vnf, dst_vnf, path))
return path, src_sw, dst_sw
def add_loadbalancer(self, src_vnf_name, src_vnf_interface, lb_data):
custom_paths = lb_data.get('path', dict())
dest_vnf_outport_nrs = list()
- logging.debug("Call to add_loadbalancer at %s intfs:%s" % (src_vnf_name, src_vnf_interface))
+ logging.debug("Call to add_loadbalancer at %s intfs:%s" %
+ (src_vnf_name, src_vnf_interface))
if not self.check_vnf_intf_pair(src_vnf_name, src_vnf_interface):
- raise Exception(u"Source VNF %s or intfs %s does not exist" % (src_vnf_name, src_vnf_interface))
+ raise Exception(u"Source VNF %s or intfs %s does not exist" % (
+ src_vnf_name, src_vnf_interface))
- # find the switch belonging to the source interface, as well as the inport nr
+ # find the switch belonging to the source interface, as well as the
+ # inport nr
for connected_sw in net.DCNetwork_graph.neighbors(src_vnf_name):
link_dict = net.DCNetwork_graph[src_vnf_name][connected_sw]
for link in link_dict:
link_dict = net.DCNetwork_graph[vnf_name][connected_sw]
for link in link_dict:
if link_dict[link]['src_port_name'] == dest_intfs_mapping[vnf_name]:
- dest_vnf_outport_nrs.append(int(link_dict[link]['dst_port_nr']))
+ dest_vnf_outport_nrs.append(
+ int(link_dict[link]['dst_port_nr']))
# get first switch
if (src_vnf_name, src_vnf_interface) not in self.lb_flow_cookies:
self.lb_flow_cookies[(src_vnf_name, src_vnf_interface)] = list()
- src_intf = None
src_ip = None
src_mac = None
for intf in net[src_vnf_name].intfs.values():
if intf.name == src_vnf_interface:
src_mac = intf.mac
src_ip = intf.ip
- src_intf = intf
# set up paths for each destination vnf individually
index = 0
data["cookie"] = cookie
# lb mac for src -> target connections
- lb_mac = "31:33:70:%02x:%02x:%02x" % (random.randint(0, 255),random.randint(0, 255),random.randint(0, 255))
+ lb_mac = "31:33:70:%02x:%02x:%02x" % (random.randint(
+ 0, 255), random.randint(0, 255), random.randint(0, 255))
# calculate lb ip as src_intf.ip +1
octets = src_ip.split('.')
plus_one = '.'.join(octets)
# set up arp reply as well as add the route to the interface
- self.setup_arp_reply_at(src_sw, src_sw_inport_nr, plus_one, lb_mac, cookie=cookie)
- net.getNodeByName(src_vnf_name).setHostRoute(plus_one, src_vnf_interface)
+ self.setup_arp_reply_at(src_sw, src_sw_inport_nr,
+ plus_one, lb_mac, cookie=cookie)
+ net.getNodeByName(src_vnf_name).setHostRoute(
+ plus_one, src_vnf_interface)
for dst_vnf_name, dst_vnf_interface in dest_intfs_mapping.items():
path, src_sw, dst_sw = self._get_path(src_vnf_name, dst_vnf_name,
if custom_paths is not None and dst_vnf_name in custom_paths:
if dst_vnf_interface in custom_paths[dst_vnf_name]:
path = custom_paths[dst_vnf_name][dst_vnf_interface]
- logging.debug("Taking custom path from %s to %s: %s" % (src_vnf_name, dst_vnf_name, path))
+ logging.debug("Taking custom path from %s to %s: %s" % (
+ src_vnf_name, dst_vnf_name, path))
if not self.check_vnf_intf_pair(dst_vnf_name, dst_vnf_interface):
self.delete_loadbalancer(src_vnf_name, src_vnf_interface)
- raise Exception(u"VNF %s or intfs %s does not exist" % (dst_vnf_name, dst_vnf_interface))
+ raise Exception(u"VNF %s or intfs %s does not exist" %
+ (dst_vnf_name, dst_vnf_interface))
if isinstance(path, dict):
self.delete_loadbalancer(src_vnf_name, src_vnf_interface)
- raise Exception(u"Can not find a valid path. Are you specifying the right interfaces?.")
+ raise Exception(
+ u"Can not find a valid path. Are you specifying the right interfaces?.")
target_mac = "fa:17:00:03:13:37"
target_ip = "0.0.0.0"
current_hop = src_sw
switch_inport_nr = src_sw_inport_nr
- #self.setup_arp_reply_at(src_sw, src_sw_inport_nr, target_ip, target_mac, cookie=cookie)
- net.getNodeByName(dst_vnf_name).setHostRoute(src_ip, dst_vnf_interface)
+ # self.setup_arp_reply_at(src_sw, src_sw_inport_nr, target_ip, target_mac, cookie=cookie)
+ net.getNodeByName(dst_vnf_name).setHostRoute(
+ src_ip, dst_vnf_interface)
# choose free vlan if path contains more than 1 switch
if len(path) > 1:
switch_outport_nr = dst_sw_outport_nr
logging.info("end node reached: {0}".format(dst_vnf_name))
elif not isinstance(next_node, OVSSwitch):
- logging.info("Next node: {0} is not a switch".format(next_hop))
+ logging.info(
+ "Next node: {0} is not a switch".format(next_hop))
return "Next node: {0} is not a switch".format(next_hop)
else:
# take first link between switches by default
index_edge_out = 0
switch_outport_nr = net.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
- cmd = 'priority=1,in_port=%s,cookie=%s' % (switch_inport_nr, cookie)
- cmd_back = 'priority=1,in_port=%s,cookie=%s' % (switch_outport_nr, cookie)
- # if a vlan is picked, the connection is routed through multiple switches
+ cmd = 'priority=1,in_port=%s,cookie=%s' % (
+ switch_inport_nr, cookie)
+ cmd_back = 'priority=1,in_port=%s,cookie=%s' % (
+ switch_outport_nr, cookie)
+ # if a vlan is picked, the connection is routed through
+ # multiple switches
if vlan is not None:
if path.index(current_hop) == 0: # first node
# flow #index set up
# remove any vlan tags
cmd += ',dl_vlan=%s' % vlan
cmd += ',actions=pop_vlan,output:%s' % switch_outport_nr
- # set up arp replys at the port so the dst nodes know the src
- self.setup_arp_reply_at(current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
+ # set up arp replys at the port so the dst nodes know
+ # the src
+ self.setup_arp_reply_at(
+ current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
# reverse route
cmd_back = 'in_port=%s' % switch_outport_nr
cmd += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
cmd_back += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
else:
- cmd += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_outport_nr)
- cmd_back += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_inport_nr)
+ cmd += ',dl_vlan=%s,actions=output:%s' % (
+ vlan, switch_outport_nr)
+ cmd_back += ',dl_vlan=%s,actions=output:%s' % (
+ vlan, switch_inport_nr)
# output the packet at the correct outport
else:
cmd = 'in_port=%s' % src_sw_inport_nr
cmd_back += ',set_field:%s->ip_src' % plus_one
cmd_back += ',output:%s' % src_sw_inport_nr
- self.setup_arp_reply_at(current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
+ self.setup_arp_reply_at(
+ current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
# excecute the command on the target switch
logging.debug(cmd)
# advance to next destination
index += 1
- # set up the actual load balancing rule as a multipath on the very first switch
+ # set up the actual load balancing rule as a multipath on the very
+ # first switch
cmd = '"in_port=%s' % src_sw_inport_nr
cmd += ',cookie=%s' % (cookie)
cmd += ',ip'
# load balance modulo n over all dest interfaces
# TODO: in newer openvswitch implementations this should be changed to symmetric_l3l4+udp
# to balance any kind of traffic
- cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(dest_intfs_mapping)
+ cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(
+ dest_intfs_mapping)
# reuse the cookie as table entry as it will be unique
cmd += ',resubmit(, %s)"' % cookie
link_dict = net.DCNetwork_graph[vnf_name][connected_sw]
for link in link_dict:
if link_dict[link]['src_port_name'] == dest_intfs_mapping[vnf_name]:
- dest_vnf_outport_nrs.append(int(link_dict[link]['dst_port_nr']))
+ dest_vnf_outport_nrs.append(
+ int(link_dict[link]['dst_port_nr']))
if len(dest_vnf_outport_nrs) == 0:
- raise Exception("There are no paths specified for the loadbalancer")
+ raise Exception(
+ "There are no paths specified for the loadbalancer")
src_ip = self.floating_intf.IP()
src_mac = self.floating_intf.MAC()
index = 0
cookie = self.get_cookie()
main_cmd = "add-flow -OOpenFlow13"
- floating_ip = self.floating_network.get_new_ip_address("floating-ip").split("/")[0]
+ floating_ip = self.floating_network.get_new_ip_address(
+ "floating-ip").split("/")[0]
for dst_vnf_name, dst_vnf_interface in dest_intfs_mapping.items():
path = None
if custom_paths is not None and dst_vnf_name in custom_paths:
if dst_vnf_interface in custom_paths[dst_vnf_name]:
path = custom_paths[dst_vnf_name][dst_vnf_interface]
- logging.debug("Taking custom path to %s: %s" % (dst_vnf_name, path))
+ logging.debug("Taking custom path to %s: %s" %
+ (dst_vnf_name, path))
else:
if datacenter not in self.floating_links:
self.floating_links[datacenter] = \
net.addLink(self.floating_switch, datacenter)
path = \
- self._get_path(self.floating_root.name, dst_vnf_name, self.floating_intf.name, dst_vnf_interface)[0]
+ self._get_path(self.floating_root.name, dst_vnf_name,
+ self.floating_intf.name, dst_vnf_interface)[0]
if isinstance(path, dict):
self.delete_flow_by_cookie(cookie)
- raise Exception(u"Can not find a valid path. Are you specifying the right interfaces?.")
+ raise Exception(
+ u"Can not find a valid path. Are you specifying the right interfaces?.")
intf = net[dst_vnf_name].nameToIntf[dst_vnf_interface]
target_mac = str(intf.MAC())
switch_outport_nr = dst_sw_outport_nr
logging.info("end node reached: {0}".format(dst_vnf_name))
elif not isinstance(next_node, OVSSwitch):
- logging.info("Next node: {0} is not a switch".format(next_hop))
+ logging.info(
+ "Next node: {0} is not a switch".format(next_hop))
return "Next node: {0} is not a switch".format(next_hop)
else:
# take first link between switches by default
index_edge_out = 0
switch_outport_nr = net.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
- # default filters, just overwritten on the first node and last node
- cmd = 'priority=1,in_port=%s,cookie=%s' % (switch_inport_nr, cookie)
- cmd_back = 'priority=1,in_port=%s,cookie=%s' % (switch_outport_nr, cookie)
+ # default filters, just overwritten on the first node and last
+ # node
+ cmd = 'priority=1,in_port=%s,cookie=%s' % (
+ switch_inport_nr, cookie)
+ cmd_back = 'priority=1,in_port=%s,cookie=%s' % (
+ switch_outport_nr, cookie)
if i == 0: # first node
cmd = 'in_port=%s' % src_sw_inport_nr
cmd += ',cookie=%s' % cookie
# remove any vlan tags
cmd_back += ',dl_vlan=%s' % vlan
cmd_back += ',actions=pop_vlan,output:%s' % switch_inport_nr
- self.setup_arp_reply_at(current_hop, src_sw_inport_nr, floating_ip, target_mac, cookie=cookie)
+ self.setup_arp_reply_at(
+ current_hop, src_sw_inport_nr, floating_ip, target_mac, cookie=cookie)
elif next_hop == dst_vnf_name: # last switch
# remove any vlan tags
cmd += ',dl_vlan=%s' % vlan
cmd += ',actions=pop_vlan,output:%s' % switch_outport_nr
- # set up arp replys at the port so the dst nodes know the src
- self.setup_arp_reply_at(current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
+ # set up arp replys at the port so the dst nodes know the
+ # src
+ self.setup_arp_reply_at(
+ current_hop, switch_outport_nr, src_ip, src_mac, cookie=cookie)
# reverse route
cmd_back = 'in_port=%s' % switch_outport_nr
cmd_back += ',set_field:%s->eth_src' % src_mac
cmd_back += ',set_field:%s->ip_src' % floating_ip
cmd_back += ',output:%s' % switch_inport_nr
- net.getNodeByName(dst_vnf_name).setHostRoute(src_ip, dst_vnf_interface)
+ net.getNodeByName(dst_vnf_name).setHostRoute(
+ src_ip, dst_vnf_interface)
else: # middle node
# if we have a circle in the path we need to specify this, as openflow will ignore the packet
# if we just output it on the same port as it came in
cmd += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
cmd_back += ',dl_vlan=%s,actions=IN_PORT' % (vlan)
else:
- cmd += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_outport_nr)
- cmd_back += ',dl_vlan=%s,actions=output:%s' % (vlan, switch_inport_nr)
+ cmd += ',dl_vlan=%s,actions=output:%s' % (
+ vlan, switch_outport_nr)
+ cmd_back += ',dl_vlan=%s,actions=output:%s' % (
+ vlan, switch_inport_nr)
# excecute the command on the target switch
logging.debug(cmd)
# advance to next destination
index += 1
- # set up the actual load balancing rule as a multipath on the very first switch
+ # set up the actual load balancing rule as a multipath on the very
+ # first switch
cmd = '"in_port=%s' % src_sw_inport_nr
cmd += ',cookie=%s' % (cookie)
cmd += ',ip'
# load balance modulo n over all dest interfaces
# TODO: in newer openvswitch implementations this should be changed to symmetric_l3l4+udp
# to balance any kind of traffic
- cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(dest_intfs_mapping)
+ cmd += ',multipath(symmetric_l4,1024,modulo_n,%s,0,NXM_NX_REG1[0..12])' % len(
+ dest_intfs_mapping)
# reuse the cookie as table entry as it will be unique
cmd += ',resubmit(, %s)"' % cookie
return cookie, floating_ip
- def setup_arp_reply_at(self, switch, port_nr, target_ip, target_mac, cookie=None):
+ def setup_arp_reply_at(self, switch, port_nr,
+ target_ip, target_mac, cookie=None):
"""
Sets up a custom ARP reply at a switch.
An ARP request coming in on the `port_nr` for `target_ip` will be answered with target IP/MAC.
cookie = self.get_cookie()
main_cmd = "add-flow -OOpenFlow13"
- # first set up ARP requests for the source node, so it will always 'find' a partner
+ # first set up ARP requests for the source node, so it will always
+ # 'find' a partner
cmd = '"in_port=%s' % port_nr
cmd += ',cookie=%s' % cookie
cmd += ',arp'
self.cookies.remove(cookie)
return True
- def delete_chain_by_intf(self, src_vnf_name, src_vnf_intf, dst_vnf_name, dst_vnf_intf):
+ def delete_chain_by_intf(
+ self, src_vnf_name, src_vnf_intf, dst_vnf_name, dst_vnf_intf):
"""
Removes a flow identified by the vnf_name/vnf_intf pairs
:return: True if successful, else false
:rtype: ``bool``
"""
- logging.debug("Deleting flow for vnf/intf pair %s %s" % (src_vnf_name, src_vnf_intf))
+ logging.debug("Deleting flow for vnf/intf pair %s %s" %
+ (src_vnf_name, src_vnf_intf))
if not self.check_vnf_intf_pair(src_vnf_name, src_vnf_intf):
return False
if not self.check_vnf_intf_pair(dst_vnf_name, dst_vnf_intf):
return False
target_flow = (src_vnf_name, src_vnf_intf, dst_vnf_name, dst_vnf_intf)
- if not target_flow in self.chain_flow_cookies:
+ if target_flow not in self.chain_flow_cookies:
return False
- success = self.delete_flow_by_cookie(self.chain_flow_cookies[target_flow])
+ success = self.delete_flow_by_cookie(
+ self.chain_flow_cookies[target_flow])
if success:
del self.chain_flow_cookies[target_flow]
delete_group = list()
group_id = self.get_flow_group(vnf_src_name, vnf_src_interface)
for node in self.net.switches:
- for cookie in self.lb_flow_cookies[(vnf_src_name, vnf_src_interface)]:
+ for cookie in self.lb_flow_cookies[(
+ vnf_src_name, vnf_src_interface)]:
flow = dict()
flow["dpid"] = int(node.dpid, 16)
flow["cookie"] = cookie
logging.debug("Deleting group with id %s" % group_id)
for switch_del_group in delete_group:
if self.net.controller == RemoteController:
- self.net.ryu_REST("stats/groupentry/delete", data=switch_del_group)
+ self.net.ryu_REST("stats/groupentry/delete",
+ data=switch_del_group)
# unmap groupid from the interface
target_pair = (vnf_src_name, vnf_src_interface)
"""
cookie = int(cookie)
if cookie not in self.floating_cookies:
- raise Exception("Can not delete floating loadbalancer as the flowcookie is not known")
+ raise Exception(
+ "Can not delete floating loadbalancer as the flowcookie is not known")
self.delete_flow_by_cookie(cookie)
floating_ip = self.floating_cookies[cookie]
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+from manage import OpenstackManage
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
+from openstack_dummies.glance_dummy_api import GlanceDummyApi
+from openstack_dummies.heat_dummy_api import HeatDummyApi
+from openstack_dummies.keystone_dummy_api import KeystoneDummyApi
+from openstack_dummies.neutron_dummy_api import NeutronDummyApi
+from openstack_dummies.nova_dummy_api import NovaDummyApi
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from manage import OpenstackManage
-from openstack_dummies import *
import logging
import threading
import compute
-import requests
import socket
import time
self.port = port
self.compute = compute.OpenstackCompute()
self.openstack_endpoints = dict()
- self.openstack_endpoints['keystone'] = KeystoneDummyApi(self.ip, self.port)
- self.openstack_endpoints['neutron'] = NeutronDummyApi(self.ip, self.port + 4696, self.compute)
- self.openstack_endpoints['nova'] = NovaDummyApi(self.ip, self.port + 3774, self.compute)
- self.openstack_endpoints['heat'] = HeatDummyApi(self.ip, self.port + 3004, self.compute)
- self.openstack_endpoints['glance'] = GlanceDummyApi(self.ip, self.port + 4242, self.compute)
+ self.openstack_endpoints['keystone'] = KeystoneDummyApi(
+ self.ip, self.port)
+ self.openstack_endpoints['neutron'] = NeutronDummyApi(
+ self.ip, self.port + 4696, self.compute)
+ self.openstack_endpoints['nova'] = NovaDummyApi(
+ self.ip, self.port + 3774, self.compute)
+ self.openstack_endpoints['heat'] = HeatDummyApi(
+ self.ip, self.port + 3004, self.compute)
+ self.openstack_endpoints['glance'] = GlanceDummyApi(
+ self.ip, self.port + 4242, self.compute)
self.rest_threads = list()
self.manage = OpenstackManage()
self.compute.dc = dc
for ep in self.openstack_endpoints.values():
ep.manage = self.manage
- logging.info \
- ("Connected DC(%s) to API endpoint %s(%s:%d)" % (dc.label, self.__class__.__name__, self.ip, self.port))
+ logging.info("Connected DC(%s) to API endpoint %s(%s:%d)" %
+ (dc.label, self.__class__.__name__, self.ip, self.port))
def connect_dc_network(self, dc_network):
"""
c.server_thread.start()
if wait_for_port:
self._wait_for_port(c.ip, c.port)
-
+
def stop(self):
"""
Stop all connected OpenStack endpoints that are connected to this API endpoint.
"""
for c in self.openstack_endpoints.values():
c.stop()
- #for c in self.openstack_endpoints.values():
+ # for c in self.openstack_endpoints.values():
# if c.server_thread:
# print("Waiting for WSGIServers to be stopped ...")
# c.server_thread.join()
if r == 0:
break # port is open proceed
else:
- logging.warning("Waiting for {}:{} ... ({}/10)".format(ip, port, i + 1))
+ logging.warning(
+ "Waiting for {}:{} ... ({}/10)".format(ip, port, i + 1))
time.sleep(1)
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from glance_dummy_api import GlanceDummyApi
-from heat_dummy_api import HeatDummyApi
-from keystone_dummy_api import KeystoneDummyApi
-from neutron_dummy_api import NeutronDummyApi
-from nova_dummy_api import NovaDummyApi
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from flask import Flask, request
from flask_restful import Api, Resource
from gevent.pywsgi import WSGIServer
with self.manage.lock:
with open(self.playbook_file, 'a') as logfile:
if len(request.data) > 0:
- data = "# %s API\n" % str(self.__class__).split('.')[-1].rstrip('\'>')
+ data = "# %s API\n" % str(
+ self.__class__).split('.')[-1].rstrip('\'>')
data += "curl -X {type} -H \"Content-type: application/json\" -d '{data}' {url}".format(type=request.method,
- data=request.data,
- url=request.url)
+ data=request.data,
+ url=request.url)
logfile.write(data + "\n")
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from flask_restful import Resource
from flask import Response, request
from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
]
}]
resp['versions'] = versions
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
class GlanceSchema(Resource):
resp['name'] = 'someImageName'
resp['properties'] = dict()
# just an ugly hack to allow the openstack client to work
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
class GlanceListImagesApi(Resource):
def get(self):
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
- try:
+ try:
resp = dict()
resp['next'] = None
resp['first'] = "/v2/images"
break
if "marker" in request.args: # ugly hack to fix pageination of openstack client
resp['images'] = None
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve the list of images." % __name__)
+ LOG.exception(
+ u"%s: Could not retrieve the list of images." % __name__)
return ex.message, 500
def post(self):
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
body_data = json.loads(request.data)
- except:
+ except BaseException:
body_data = dict()
# lets see what we should create
img_name = request.headers.get("X-Image-Meta-Name")
img_size = request.headers.get("X-Image-Meta-Size")
img_disk_format = request.headers.get("X-Image-Meta-Disk-Format")
img_is_public = request.headers.get("X-Image-Meta-Is-Public")
- img_container_format = request.headers.get("X-Image-Meta-Container-Format")
+ img_container_format = request.headers.get(
+ "X-Image-Meta-Container-Format")
# try to use body payload if header fields are empty
if img_name is None:
img_name = body_data.get("name")
img_size = 1234
img_disk_format = body_data.get("disk_format")
- img_is_public = True if "public" in body_data.get("visibility") else False
+ img_is_public = True if "public" in body_data.get(
+ "visibility") else False
img_container_format = body_data.get("container_format")
# try to find ID of already existing image (matched by name)
img_id = None
resp['id'] = image.id
resp['name'] = image.name
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
- response = Response("Image with id or name %s does not exists." % id, status=404)
+ response = Response(
+ "Image with id or name %s does not exists." % id, status=404)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve image with id %s." % (__name__, id))
- return Response(ex.message, status=500, mimetype='application/json')
+ LOG.exception(
+ u"%s: Could not retrieve image with id %s." % (__name__, id))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
def put(self, id):
LOG.debug("API CALL: %s " % str(self.__class__.__name__))
resp = dict()
resp['id'] = image.id
resp['name'] = image.name
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
- response = Response("Image with id or name %s does not exists." % id, status=404)
+ response = Response(
+ "Image with id or name %s does not exists." % id, status=404)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- logging.exception(u"%s: Could not retrieve image with id %s." % (__name__, id))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception(
+ u"%s: Could not retrieve image with id %s." % (__name__, id))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from flask import request, Response
from flask_restful import Resource
-from emuvim.api.openstack.resources import Stack
+from emuvim.api.openstack.resources.stack import Stack
from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
from emuvim.api.openstack.helper import get_host
from datetime import datetime
]
}]
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
class HeatCreateStack(Resource):
stack.stack_name = stack_dict['stack_name']
reader = HeatParser(self.api.compute)
- if isinstance(stack_dict['template'], str) or isinstance(stack_dict['template'], unicode):
+ if isinstance(stack_dict['template'], str) or isinstance(
+ stack_dict['template'], unicode):
stack_dict['template'] = json.loads(stack_dict['template'])
- if not reader.parse_input(stack_dict['template'], stack, self.api.compute.dc.label):
+ if not reader.parse_input(
+ stack_dict['template'], stack, self.api.compute.dc.label):
self.api.compute.clean_broken_stack(stack)
return 'Could not create stack.', 400
self.api.compute.add_stack(stack)
self.api.compute.deploy_stack(stack.id)
- return Response(json.dumps(return_dict), status=201, mimetype="application/json")
+ return Response(json.dumps(return_dict), status=201,
+ mimetype="application/json")
except Exception as ex:
LOG.exception("Heat: Create Stack exception.")
"tags": ""
})
- return Response(json.dumps(return_stacks), status=200, mimetype="application/json")
+ return Response(json.dumps(return_stacks),
+ status=200, mimetype="application/json")
except Exception as ex:
LOG.exception("Heat: List Stack exception.")
return ex.message, 500
"stack_name": stack.stack_name,
"stack_owner": "The owner of the stack.", # add stack owner
"stack_status": stack.status,
- "stack_status_reason": "The reason for the current status of the stack.", # add status reason
+ # add status reason
+ "stack_status_reason": "The reason for the current status of the stack.",
"template_description": "The description of the stack template.",
"stack_user_project_id": "The project UUID of the stack user.",
"timeout_mins": "",
}
}
- return Response(json.dumps(return_stack), status=200, mimetype="application/json")
+ return Response(json.dumps(return_stack),
+ status=200, mimetype="application/json")
except Exception as ex:
LOG.exception("Heat: Show stack exception.")
return ex.message, 500
-
+
class HeatShowStackTemplate(Resource):
def __init__(self, api):
self.api = api
:param tenant_id:
:param stack_name_or_id:
:param stack_id:
- :return: Returns a json response which contains the stack's template.
+ :return: Returns a json response which contains the stack's template.
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
stack = tmp_stack
if stack is None:
return 'Could not resolve Stack - ID', 404
- #LOG.debug("STACK: {}".format(stack))
- #LOG.debug("TEMPLATE: {}".format(stack.template))
- return Response(json.dumps(stack.template), status=200, mimetype="application/json")
+ # LOG.debug("STACK: {}".format(stack))
+ # LOG.debug("TEMPLATE: {}".format(stack.template))
+ return Response(json.dumps(stack.template),
+ status=200, mimetype="application/json")
except Exception as ex:
LOG.exception("Heat: Show stack template exception.")
:param tenant_id:
:param stack_name_or_id:
:param stack_id:
- :return: Returns a json response which contains the stack's template.
+ :return: Returns a json response which contains the stack's template.
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
response = {"resources": []}
- return Response(json.dumps(response), status=200, mimetype="application/json")
+ return Response(json.dumps(response), status=200,
+ mimetype="application/json")
except Exception as ex:
LOG.exception("Heat: Show stack template exception.")
def patch(self, tenant_id, stack_name_or_id, stack_id=None):
LOG.debug("API CALL: %s PATCH" % str(self.__class__.__name__))
return self.update_stack(tenant_id, stack_name_or_id, stack_id)
-
+
def update_stack(self, tenant_id, stack_name_or_id, stack_id=None):
"""
Updates an existing stack with a new heat template.
stack.status = "UPDATE_COMPLETE"
reader = HeatParser(self.api.compute)
- if isinstance(stack_dict['template'], str) or isinstance(stack_dict['template'], unicode):
+ if isinstance(stack_dict['template'], str) or isinstance(
+ stack_dict['template'], unicode):
stack_dict['template'] = json.loads(stack_dict['template'])
- if not reader.parse_input(stack_dict['template'], stack, self.api.compute.dc.label, stack_update=True):
+ if not reader.parse_input(
+ stack_dict['template'], stack, self.api.compute.dc.label, stack_update=True):
return 'Could not create stack.', 400
stack.template = stack_dict['template']
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from flask_restful import Resource
from flask import request, Response
from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
def __init__(self, in_ip, in_port):
super(KeystoneDummyApi, self).__init__(in_ip, in_port)
- self.api.add_resource(KeystoneListVersions, "/", resource_class_kwargs={'api': self})
- self.api.add_resource(KeystoneShowAPIv2, "/v2.0", resource_class_kwargs={'api': self})
- self.api.add_resource(KeystoneGetToken, "/v2.0/tokens", resource_class_kwargs={'api': self})
- self.api.add_resource(KeystoneShowAPIv3, "/v3.0", resource_class_kwargs={'api': self})
- self.api.add_resource(KeystoneGetTokenv3, "/v3.0/auth/tokens", resource_class_kwargs={'api': self})
+ self.api.add_resource(KeystoneListVersions, "/",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(KeystoneShowAPIv2, "/v2.0",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(KeystoneGetToken, "/v2.0/tokens",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(KeystoneShowAPIv3, "/v3.0",
+ resource_class_kwargs={'api': self})
+ self.api.add_resource(
+ KeystoneGetTokenv3, "/v3.0/auth/tokens", resource_class_kwargs={'api': self})
class KeystoneListVersions(Resource):
}]
resp['versions']['values'] = version
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
class KeystoneShowAPIv2(Resource):
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
- neutron_port = self.api.port + 4696
- heat_port = self.api.port + 3004
+ # neutron_port = self.api.port + 4696
+ # heat_port = self.api.port + 3004
resp = dict()
resp['version'] = {
]
}
LOG.debug(json.dumps(resp))
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
class KeystoneShowAPIv3(Resource):
"""
LOG.debug("API CALL: %s GET" % str(self.__class__.__name__))
- neutron_port = self.api.port + 4696
- heat_port = self.api.port + 3004
+ # neutron_port = self.api.port + 4696
+ # heat_port = self.api.port + 3004
resp = dict()
resp['version'] = {
]
}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
class KeystoneGetToken(Resource):
token['issued_at'] = "2014-01-30T15:30:58.819Z"
token['expires'] = "2999-01-30T15:30:58.819Z"
- token['id'] = req['auth'].get('token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
+ token['id'] = req['auth'].get(
+ 'token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
token['tenant'] = dict()
token['tenant']['description'] = None
token['tenant']['enabled'] = True
- token['tenant']['id'] = req['auth'].get('tenantId', 'fc394f2ab2df4114bde39905f800dc57')
+ token['tenant']['id'] = req['auth'].get(
+ 'tenantId', 'fc394f2ab2df4114bde39905f800dc57')
token['tenant']['name'] = "tenantName"
ret['access']['user'] = dict()
user['username'] = req.get('username', "username")
user['name'] = "tenantName"
user['roles_links'] = list()
- user['id'] = token['tenant'].get('id', "fc394f2ab2df4114bde39905f800dc57")
+ user['id'] = token['tenant'].get(
+ 'id', "fc394f2ab2df4114bde39905f800dc57")
user['roles'] = [{'name': 'Member'}]
ret['access']['region_name'] = "RegionOne"
"endpoints_links": [],
"type": "identity",
"name": "keystone"
- },
+ },
{
"endpoints": [
{
"endpoints_links": [],
"type": "network",
"name": "neutron"
- },
+ },
{
"endpoints": [
{
"endpoints_links": [],
"type": "image",
"name": "glance"
- },
+ },
{
"endpoints": [
{
"endpoints_links": [],
"type": "orchestration",
"name": "heat"
- }
+ }
]
ret['access']["metadata"] = {
- "is_admin": 0,
- "roles": [
- "7598ac3c634d4c3da4b9126a5f67ca2b"
- ]
- },
+ "is_admin": 0,
+ "roles": [
+ "7598ac3c634d4c3da4b9126a5f67ca2b"
+ ]
+ },
ret['access']['trust'] = {
"id": "394998fa61f14736b1f0c1f322882949",
"trustee_user_id": "269348fdd9374b8885da1418e0730af1",
"trustor_user_id": "3ec3164f750146be97f21559ee4d9c51",
"impersonation": False
}
- return Response(json.dumps(ret), status=200, mimetype='application/json')
+ return Response(json.dumps(ret), status=200,
+ mimetype='application/json')
except Exception as ex:
logging.exception("Keystone: Get token failed.")
return ex.message, 500
+
class KeystoneGetTokenv3(Resource):
"""
Returns a static keystone token.
token['extras'] = dict()
token['user'] = dict()
user = token['user']
- user['id'] = req['auth'].get('token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
+ user['id'] = req['auth'].get(
+ 'token', {'id': 'fc394f2ab2df4114bde39905f800dc57'}).get('id')
user['name'] = "tenantName"
user['password_expires_at'] = None
user['domain'] = {"id": "default", "name": "Default"}
# project
token['project'] = {
"domain": {
- "id" : "default",
+ "id": "default",
"name": "Default"
},
"id": "8538a3f13f9541b28c2620eb19065e45",
"id": "2dad48f09e2a447a9bf852bcd93543fc",
"type": "identity",
"name": "keystone"
- },
+ },
{
"endpoints": [
{
"id": "2dad48f09e2a447a9bf852bcd93548cf",
"type": "network",
"name": "neutron"
- },
+ },
{
"endpoints": [
{
"id": "2dad48f09e2a447a9bf852bcd93548cf",
"type": "image",
"name": "glance"
- },
+ },
{
"endpoints": [
{
"id": "2dad48f09e2a447a9bf852bcd93548bf",
"type": "orchestration",
"name": "heat"
- }
+ }
]
- return Response(json.dumps(ret), status=201, mimetype='application/json')
+ return Response(json.dumps(ret), status=201,
+ mimetype='application/json')
except Exception as ex:
logging.exception("Keystone: Get token failed.")
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from flask_restful import Resource
from flask import request, Response
from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
-from emuvim.api.openstack.helper import get_host
from datetime import datetime
import neutron_sfc_dummy_api as SFC
import logging
super(NeutronDummyApi, self).__init__(ip, port)
self.compute = compute
- # create default networks (OSM usually assumes to have these pre-configured)
+ # create default networks (OSM usually assumes to have these
+ # pre-configured)
self.compute.create_network("mgmt")
self.compute.create_network("mgmtnet")
}]
resp['versions'] = versions
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
class NeutronShowAPIv2Details(Resource):
],
"name": "network",
"collection": "networks"
- },
+ },
{
"links": [
{
],
"name": "ports",
"collection": "ports"
- }
+ }
]
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
class NeutronListNetworks(Resource):
try:
if request.args.get('name'):
tmp_network = NeutronShowNetwork(self.api)
- response = tmp_network.get_network(request.args.get('name'), True)
- LOG.debug("{} RESPONSE (1): {}".format(self.__class__.__name__, response))
+ response = tmp_network.get_network(
+ request.args.get('name'), True)
+ LOG.debug("{} RESPONSE (1): {}".format(
+ self.__class__.__name__, response))
return response
id_list = request.args.getlist('id')
if len(id_list) == 1:
tmp_network = NeutronShowNetwork(self.api)
- response = tmp_network.get_network(request.args.get('id'), True)
- LOG.debug("{} RESPONSE (2): {}".format(self.__class__.__name__, response))
+ response = tmp_network.get_network(
+ request.args.get('id'), True)
+ LOG.debug("{} RESPONSE (2): {}".format(
+ self.__class__.__name__, response))
return response
network_list = list()
network_list.append(tmp_network_dict)
network_dict["networks"] = network_list
- LOG.debug("{} RESPONSE (3): {}".format(self.__class__.__name__, network_dict))
- return Response(json.dumps(network_dict), status=200, mimetype='application/json')
+ LOG.debug("{} RESPONSE (3): {}".format(
+ self.__class__.__name__, network_dict))
+ return Response(json.dumps(network_dict),
+ status=200, mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: List networks exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronShowNetwork(Resource):
:rtype: :class:`flask.response`
"""
try:
- net = self.api.compute.find_network_by_name_or_id(network_name_or_id)
+ net = self.api.compute.find_network_by_name_or_id(
+ network_name_or_id)
if net is None:
- return Response(u'Network not found.\n', status=404, mimetype='application/json')
+ return Response(u'Network not found.\n',
+ status=404, mimetype='application/json')
tmp_network_dict = net.create_network_dict()
tmp_dict = dict()
else:
tmp_dict["network"] = tmp_network_dict
- return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
-
+ return Response(json.dumps(tmp_dict), status=200,
+ mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Show network exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronCreateNetwork(Resource):
name = network_dict['network']['name']
net = self.api.compute.find_network_by_name_or_id(name)
if net is not None:
- return Response('Network already exists.\n', status=400, mimetype='application/json')
+ return Response('Network already exists.\n',
+ status=400, mimetype='application/json')
net = self.api.compute.create_network(name)
- return Response(json.dumps({"network": net.create_network_dict()}), status=201, mimetype='application/json')
+ return Response(json.dumps(
+ {"network": net.create_network_dict()}), status=201, mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: Create network excepiton.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronUpdateNetwork(Resource):
if network_id in self.api.compute.nets:
net = self.api.compute.nets[network_id]
network_dict = json.loads(request.data)
- old_net = copy.copy(net)
if "status" in network_dict["network"]:
net.status = network_dict["network"]["status"]
if "admin_state_up" in network_dict["network"]:
pass # tmp_network_dict["admin_state_up"] = True
if "tenant_id" in network_dict["network"]:
- pass # tmp_network_dict["tenant_id"] = "c1210485b2424d48804aad5d39c61b8f"
+ # tmp_network_dict["tenant_id"] = "c1210485b2424d48804aad5d39c61b8f"
+ pass
if "shared" in network_dict["network"]:
pass # tmp_network_dict["shared"] = False
- return Response(json.dumps(network_dict), status=200, mimetype='application/json')
+ return Response(json.dumps(network_dict),
+ status=200, mimetype='application/json')
- return Response('Network not found.\n', status=404, mimetype='application/json')
+ return Response('Network not found.\n', status=404,
+ mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: Show networks exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronDeleteNetwork(Resource):
delete_subnet = NeutronDeleteSubnet(self.api)
resp = delete_subnet.delete(net.subnet_id)
- if not '204' in resp.status and not '404' in resp.status:
+ if '204' not in resp.status and '404' not in resp.status:
return resp
self.api.compute.delete_network(network_id)
return Response('', status=204, mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: Delete network exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronListSubnets(Resource):
subnet_dict["subnets"] = subnet_list
- return Response(json.dumps(subnet_dict), status=200, mimetype='application/json')
+ return Response(json.dumps(subnet_dict), status=200,
+ mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: List subnets exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronShowSubnet(Resource):
tmp_dict["subnets"] = [tmp_subnet_dict]
else:
tmp_dict["subnet"] = tmp_subnet_dict
- return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
+ return Response(json.dumps(tmp_dict),
+ status=200, mimetype='application/json')
- return Response('Subnet not found. (' + subnet_name_or_id + ')\n', status=404, mimetype='application/json')
+ return Response('Subnet not found. (' + subnet_name_or_id +
+ ')\n', status=404, mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: Show subnet exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronCreateSubnet(Resource):
LOG.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
subnet_dict = json.loads(request.data)
- net = self.api.compute.find_network_by_name_or_id(subnet_dict['subnet']['network_id'])
+ net = self.api.compute.find_network_by_name_or_id(
+ subnet_dict['subnet']['network_id'])
if net is None:
- return Response('Could not find network.\n', status=404, mimetype='application/json')
+ return Response('Could not find network.\n',
+ status=404, mimetype='application/json')
- net.subnet_name = subnet_dict["subnet"].get('name', str(net.name) + '-sub')
+ net.subnet_name = subnet_dict["subnet"].get(
+ 'name', str(net.name) + '-sub')
if net.subnet_id is not None:
- LOG.error("Only one subnet per network is supported: {}".format(net.subnet_id))
- return Response('Only one subnet per network is supported\n', status=409, mimetype='application/json')
+ LOG.error(
+ "Only one subnet per network is supported: {}".format(net.subnet_id))
+ return Response('Only one subnet per network is supported\n',
+ status=409, mimetype='application/json')
if "id" in subnet_dict["subnet"]:
net.subnet_id = subnet_dict["subnet"]["id"]
if "enable_dhcp" in subnet_dict["subnet"]:
pass
- return Response(json.dumps({'subnet': net.create_subnet_dict()}), status=201, mimetype='application/json')
+ return Response(json.dumps(
+ {'subnet': net.create_subnet_dict()}), status=201, mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: Create network excepiton.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronUpdateSubnet(Resource):
net.subnet_update_time = str(datetime.now())
tmp_dict = {'subnet': net.create_subnet_dict()}
- return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
+ return Response(json.dumps(tmp_dict),
+ status=200, mimetype='application/json')
- return Response('Network not found.\n', status=404, mimetype='application/json')
+ return Response('Network not found.\n', status=404,
+ mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: Show networks exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronDeleteSubnet(Resource):
if net.subnet_id == subnet_id:
for server in self.api.compute.computeUnits.values():
for port_name in server.port_names:
- port = self.api.compute.find_port_by_name_or_id(port_name)
+ port = self.api.compute.find_port_by_name_or_id(
+ port_name)
if port is None:
- LOG.warning("Port search for {} returned None.".format(port_name))
+ LOG.warning(
+ "Port search for {} returned None.".format(port_name))
continue
if port.net_name == net.name:
port.ip_address = None
net.delete_subnet()
- return Response('', status=204, mimetype='application/json')
+ return Response(
+ '', status=204, mimetype='application/json')
- return Response('Could not find subnet.', status=404, mimetype='application/json')
+ return Response('Could not find subnet.',
+ status=404, mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: Delete subnet exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronListPorts(Resource):
port_dict["ports"] = port_list
- return Response(json.dumps(port_dict), status=200, mimetype='application/json')
+ return Response(json.dumps(port_dict), status=200,
+ mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: List ports exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronShowPort(Resource):
try:
port = self.api.compute.find_port_by_name_or_id(port_name_or_id)
if port is None:
- return Response('Port not found. (' + port_name_or_id + ')\n', status=404, mimetype='application/json')
+ return Response('Port not found. (' + port_name_or_id + ')\n',
+ status=404, mimetype='application/json')
tmp_port_dict = port.create_port_dict(self.api.compute)
tmp_dict = dict()
if as_list:
tmp_dict["ports"] = [tmp_port_dict]
else:
tmp_dict["port"] = tmp_port_dict
- return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
+ return Response(json.dumps(tmp_dict), status=200,
+ mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: Show port exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronCreatePort(Resource):
net_id = port_dict['port']['network_id']
if net_id not in self.api.compute.nets:
- return Response('Could not find network.\n', status=404, mimetype='application/json')
+ return Response('Could not find network.\n',
+ status=404, mimetype='application/json')
net = self.api.compute.nets[net_id]
if 'name' in port_dict['port']:
name = "port:cp%s:man:%s" % (num_ports, str(uuid.uuid4()))
if self.api.compute.find_port_by_name_or_id(name):
- return Response("Port with name %s already exists.\n" % name, status=500, mimetype='application/json')
+ return Response("Port with name %s already exists.\n" %
+ name, status=500, mimetype='application/json')
port = self.api.compute.create_port(name)
if "tenant_id" in port_dict["port"]:
pass
- # add the port to a stack if the specified network is a stack network
+ # add the port to a stack if the specified network is a stack
+ # network
for stack in self.api.compute.stacks.values():
for net in stack.nets.values():
if net.id == net_id:
mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: Show port exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronUpdatePort(Resource):
port_dict = json.loads(request.data)
port = self.api.compute.find_port_by_name_or_id(port_id)
if port is None:
- return Response("Port with id %s does not exists.\n" % port_id, status=404, mimetype='application/json')
+ return Response("Port with id %s does not exists.\n" %
+ port_id, status=404, mimetype='application/json')
old_port = copy.copy(port)
stack = None
port.set_name(port_dict["port"]["name"])
if stack is not None:
if port.net_name in stack.nets:
- stack.nets[port.net_name].update_port_name_for_ip_address(port.ip_address, port.name)
+ stack.nets[port.net_name].update_port_name_for_ip_address(
+ port.ip_address, port.name)
stack.ports[port.name] = stack.ports[old_port.name]
del stack.ports[old_port.name]
if "network_id" in port_dict["port"]:
mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: Update port exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronDeletePort(Resource):
try:
port = self.api.compute.find_port_by_name_or_id(port_id)
if port is None:
- return Response("Port with id %s does not exists.\n" % port_id, status=404)
+ return Response("Port with id %s does not exists.\n" %
+ port_id, status=404)
stack = None
for s in self.api.compute.stacks.values():
for p in s.ports.values():
stack = s
if stack is not None:
if port.net_name in stack.nets:
- stack.nets[port.net_name].withdraw_ip_address(port.ip_address)
+ stack.nets[port.net_name].withdraw_ip_address(
+ port.ip_address)
for server in stack.servers.values():
try:
server.port_names.remove(port.name)
except Exception as ex:
LOG.exception("Neutron: Delete port exception.")
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class NeutronAddFloatingIp(Resource):
ip["floating_ip_address"] = "172.0.0.%d" % i
ip["fixed_ip_address"] = "10.0.0.%d" % i
resp["floatingips"].append(ip)
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
def post(self):
"""
status=400, mimetype='application/json')
if port.floating_ip is not None:
- return Response("We allow only one floating ip per port\n", status=400, mimetype='application/json')
+ return Response("We allow only one floating ip per port\n",
+ status=400, mimetype='application/json')
else:
num_ports = len(self.api.compute.ports)
name = "port:cp%s:fl:%s" % (num_ports, str(uuid.uuid4()))
resp["floating_ip_address"] = port.floating_ip
resp["fixed_ip_address"] = port.floating_ip
- return Response(json.dumps(response), status=200, mimetype='application/json')
+ return Response(json.dumps(response), status=200,
+ mimetype='application/json')
except Exception as ex:
LOG.exception("Neutron: Create FloatingIP exception %s.", ex)
- return Response(ex.message, status=500, mimetype='application/json')
+ return Response(ex.message, status=500,
+ mimetype='application/json')
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from flask_restful import Resource
from flask import request, Response
import logging
import json
-import uuid
-
-from emuvim.api.openstack.resources.port_chain import PortChain
-from emuvim.api.openstack.helper import get_host
class SFC(Resource):
request_dict = json.loads(request.data).get("port_pair")
name = request_dict["name"]
- ingress_port = self.api.compute.find_port_by_name_or_id(request_dict["ingress"])
- egress_port = self.api.compute.find_port_by_name_or_id(request_dict["egress"])
+ ingress_port = self.api.compute.find_port_by_name_or_id(
+ request_dict["ingress"])
+ egress_port = self.api.compute.find_port_by_name_or_id(
+ request_dict["egress"])
port_pair = self.api.compute.create_port_pair(name)
port_pair.ingress = ingress_port
resp = {
"port_pair": port_pair.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=201, mimetype='application/json')
+ return Response(json.dumps(resp), status=201,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortPairsUpdate(SFC):
resp = {
"port_pair": port_pair.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortPairsDelete(SFC):
return Response("", status=204,
mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortPairsList(SFC):
port_pair_list.append(port_pair.create_dict(self.api.compute))
resp = {"port_pairs": port_pair_list}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortPairsShow(SFC):
resp = {
"port_pair": port_pair.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
###############################################################################
try:
request_dict = json.loads(request.data).get("port_pair_group")
- port_pair_group = self.api.compute.create_port_pair_group(request_dict["name"])
+ port_pair_group = self.api.compute.create_port_pair_group(
+ request_dict["name"])
port_pair_group.port_pairs = request_dict["port_pairs"]
if "description" in request_dict:
port_pair_group.description = request_dict["description"]
if "port_pair_group_parameters" in request_dict:
- port_pair_group.port_pair_group_parameters = request_dict["port_pair_group_parameters"]
+ port_pair_group.port_pair_group_parameters = request_dict[
+ "port_pair_group_parameters"]
resp = {
"port_pair_group": port_pair_group.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=201, mimetype='application/json')
+ return Response(json.dumps(resp), status=201,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortPairGroupUpdate(SFC):
try:
request_dict = json.loads(request.data).get("port_pair_group")
- port_pair_group = self.api.compute.find_port_pair_group_by_name_or_id(group_id)
+ port_pair_group = self.api.compute.find_port_pair_group_by_name_or_id(
+ group_id)
if "name" in request_dict:
port_pair_group.name = request_dict["name"]
if "description" in request_dict:
resp = {
"port_pair_group": port_pair_group.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortPairGroupDelete(SFC):
return Response("", status=204,
mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortPairGroupList(SFC):
try:
port_pair_group_list = []
for port_pair_group in self.api.compute.port_pair_groups.values():
- port_pair_group_list.append(port_pair_group.create_dict(self.api.compute))
+ port_pair_group_list.append(
+ port_pair_group.create_dict(self.api.compute))
resp = {"port_pair_groups": port_pair_group_list}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortPairGroupShow(SFC):
logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
- port_pair_group = self.api.compute.find_port_pair_group_by_name_or_id(group_id)
+ port_pair_group = self.api.compute.find_port_pair_group_by_name_or_id(
+ group_id)
resp = {
"port_pair_group": port_pair_group.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
###############################################################################
try:
request_dict = json.loads(request.data).get("flow_classifier")
- flow_classifier = self.api.compute.create_flow_classifier(request_dict["name"])
+ flow_classifier = self.api.compute.create_flow_classifier(
+ request_dict["name"])
if "description" in request_dict:
flow_classifier.description = request_dict["description"]
if "ethertype" in request_dict:
if "source_port_range_max" in request_dict:
flow_classifier.source_port_range_max = request_dict["source_port_range_max"]
if "destination_port_range_min" in request_dict:
- flow_classifier.destination_port_range_min = request_dict["destination_port_range_min"]
+ flow_classifier.destination_port_range_min = request_dict[
+ "destination_port_range_min"]
if "destination_port_range_max" in request_dict:
- flow_classifier.destination_port_range_max = request_dict["destination_port_range_max"]
+ flow_classifier.destination_port_range_max = request_dict[
+ "destination_port_range_max"]
if "source_ip_prefix" in request_dict:
flow_classifier.source_ip_prefix = request_dict["source_ip_prefix"]
if "destination_ip_prefix" in request_dict:
resp = {
"flow_classifier": flow_classifier.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=201, mimetype='application/json')
+ return Response(json.dumps(resp), status=201,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class FlowClassifierUpdate(SFC):
try:
request_dict = json.loads(request.data).get("flow_classifier")
- flow_classifier = self.api.compute.find_flow_classifier_by_name_or_id(flow_classifier_id)
+ flow_classifier = self.api.compute.find_flow_classifier_by_name_or_id(
+ flow_classifier_id)
if "name" in request_dict:
flow_classifier.name = request_dict["name"]
if "description" in request_dict:
resp = {
"flow_classifier": flow_classifier.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class FlowClassifierDelete(SFC):
return Response("", status=204,
mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class FlowClassifierList(SFC):
try:
flow_classifier_list = []
for flow_classifier in self.api.compute.flow_classifiers.values():
- flow_classifier_list.append(flow_classifier.create_dict(self.api.compute))
+ flow_classifier_list.append(
+ flow_classifier.create_dict(self.api.compute))
resp = {"flow_classifiers": flow_classifier_list}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class FlowClassifierShow(SFC):
logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
- flow_classifier = self.api.compute.find_flow_classifier_by_name_or_id(flow_classifier_id)
+ flow_classifier = self.api.compute.find_flow_classifier_by_name_or_id(
+ flow_classifier_id)
resp = {
"flow_classifier": flow_classifier.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
###############################################################################
try:
request_dict = json.loads(request.data).get("port_chain")
- port_chain = self.api.compute.create_port_chain(request_dict["name"])
+ port_chain = self.api.compute.create_port_chain(
+ request_dict["name"])
port_chain.port_pair_groups = request_dict["port_pair_groups"]
if "description" in request_dict:
port_chain.description = request_dict["description"]
resp = {
"port_chain": port_chain.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=201, mimetype='application/json')
+ return Response(json.dumps(resp), status=201,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortChainUpdate(SFC):
resp = {
"port_chain": port_chain.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortChainDelete(SFC):
return Response("", status=204,
mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortChainList(SFC):
try:
port_chain_list = []
for port_chain in self.api.compute.port_chains.values():
- port_chain_list.append(port_chain.create_dict(self.api.compute))
+ port_chain_list.append(
+ port_chain.create_dict(self.api.compute))
resp = {"port_chains": port_chain_list}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
class PortChainShow(SFC):
logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
- port_chain = self.api.compute.find_port_chain_by_name_or_id(chain_id)
+ port_chain = self.api.compute.find_port_chain_by_name_or_id(
+ chain_id)
resp = {
"port_chain": port_chain.create_dict(self.api.compute)
}
- return Response(json.dumps(resp), status=200, mimetype='application/json')
+ return Response(json.dumps(resp), status=200,
+ mimetype='application/json')
except Exception as ex:
- logging.exception("Neutron SFC: %s Exception." % str(self.__class__.__name__))
- return Response(ex.message, status=500, mimetype='application/json')
+ logging.exception("Neutron SFC: %s Exception." %
+ str(self.__class__.__name__))
+ return Response(ex.message, status=500,
+ mimetype='application/json')
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from flask_restful import Resource
from flask import Response, request
from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
resp['servers'].append(s)
- response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response = Response(json.dumps(resp), status=200,
+ mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
+ LOG.exception(
+ u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
def post(self, id):
if self.api.compute.find_server_by_name_or_id(name) is not None:
LOG.error("Server with name %s already exists. 409" % name)
- return Response("Server with name %s already exists." % name, status=409)
+ return Response(
+ "Server with name %s already exists." % name, status=409)
# TODO: not finished!
- resp = dict()
-
server = self.api.compute.create_server(name)
- server.full_name = str(self.api.compute.dc.label) + "_" + server_dict["name"]
+ server.full_name = str(
+ self.api.compute.dc.label) + "_" + server_dict["name"]
server.template_name = server_dict["name"]
if "metadata" in server_dict:
server.properties = server_dict["metadata"]
if networks is not None:
for net in networks:
- port = self.api.compute.find_port_by_name_or_id(net.get('port', ""))
+ port = self.api.compute.find_port_by_name_or_id(
+ net.get('port', ""))
if port is not None:
server.port_names.append(port.name)
else:
- return Response("Currently only networking by port is supported.", status=400)
+ return Response(
+ "Currently only networking by port is supported.", status=400)
self.api.compute._start_compute(server)
resp['servers'].append(s)
- response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response = Response(json.dumps(resp), status=200,
+ mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
+ LOG.exception(
+ u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
resp['servers'].append(s)
- response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response = Response(json.dumps(resp), status=200,
+ mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
+ LOG.exception(
+ u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
flavor.id)}]
resp['flavors'].append(f)
- response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response = Response(json.dumps(resp), status=200,
+ mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
+ LOG.exception(
+ u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
def post(self, id):
id,
f.id)}]
resp = {"flavor": data}
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
class NovaListFlavorsDetails(Resource):
f['rxtx_factor'] = 1.0
resp['flavors'].append(f)
- response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response = Response(json.dumps(resp), status=200,
+ mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve the list of servers." % __name__)
+ LOG.exception(
+ u"%s: Could not retrieve the list of servers." % __name__)
return ex.message, 500
def post(self, id):
id,
f.id)}]
resp = {"flavor": data}
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
class NovaListFlavorById(Resource):
self.api.port,
id,
flavor.id)}]
- response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response = Response(json.dumps(resp), status=200,
+ mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve flavor with id %s" % (__name__, flavorid))
+ LOG.exception(u"%s: Could not retrieve flavor with id %s" %
+ (__name__, flavorid))
return ex.message, 500
def delete(self, id, flavorid):
id,
image.id)}]
resp['images'].append(f)
- response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response = Response(json.dumps(resp), status=200,
+ mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve the list of images." % __name__)
+ LOG.exception(
+ u"%s: Could not retrieve the list of images." % __name__)
return ex.message, 500
}
resp['images'].append(f)
- response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response = Response(json.dumps(resp), status=200,
+ mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve the list of images." % __name__)
+ LOG.exception(
+ u"%s: Could not retrieve the list of images." % __name__)
return ex.message, 500
i['id'] = image.id
i['name'] = image.name
- return Response(json.dumps(resp), status=200, mimetype="application/json")
+ return Response(json.dumps(resp), status=200,
+ mimetype="application/json")
- response = Response("Image with id or name %s does not exists." % imageid, status=404)
+ response = Response(
+ "Image with id or name %s does not exists." % imageid, status=404)
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve image with id %s." % (__name__, imageid))
+ LOG.exception(u"%s: Could not retrieve image with id %s." %
+ (__name__, imageid))
return ex.message, 500
def delete(self, id, imageid):
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
- return Response("Server with id or name %s does not exists." % serverid, status=404)
+ return Response(
+ "Server with id or name %s does not exists." % serverid, status=404)
s = server.create_server_dict()
s['links'] = [{'href': "http://%s:%d/v2.1/%s/servers/%s" % (get_host(request),
self.api.port,
]
}
- response = Response(json.dumps({'server': s}), status=200, mimetype="application/json")
+ response = Response(json.dumps(
+ {'server': s}), status=200, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve the server details." % __name__)
+ LOG.exception(
+ u"%s: Could not retrieve the server details." % __name__)
return ex.message, 500
def delete(self, id, serverid):
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
- return Response('Could not find server.', status=404, mimetype="application/json")
+ return Response('Could not find server.',
+ status=404, mimetype="application/json")
self.api.compute.stop_compute(server)
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
- return Response("Server with id or name %s does not exists." % serverid, status=404)
+ return Response(
+ "Server with id or name %s does not exists." % serverid, status=404)
if server.emulator_compute is None:
LOG.error("The targeted container does not exist.")
- return Response("The targeted container of %s does not exist." % serverid, status=404)
+ return Response(
+ "The targeted container of %s does not exist." % serverid, status=404)
data = json.loads(request.data).get("interfaceAttachment")
resp = dict()
port = data.get("port_id", None)
elif net is not None:
network = self.api.compute.find_network_by_name_or_id(net)
if network is None:
- return Response("Network with id or name %s does not exists." % net, status=404)
+ return Response(
+ "Network with id or name %s does not exists." % net, status=404)
port = self.api.compute.create_port("port:cp%s:fl:%s" %
(len(self.api.compute.ports), str(uuid.uuid4())))
port = self.api.compute.find_port_by_name_or_id(port)
network_dict['id'] = port.intf_name
network_dict['ip'] = port.ip_address
- network = self.api.compute.find_network_by_name_or_id(port.net_name)
+ network = self.api.compute.find_network_by_name_or_id(
+ port.net_name)
network_dict[network_dict['id']] = network.name
else:
- raise Exception("You can only attach interfaces by port or network at the moment")
+ raise Exception(
+ "You can only attach interfaces by port or network at the moment")
if network == self.api.manage.floating_network:
dc.net.addLink(server.emulator_compute, self.api.manage.floating_switch,
params1=network_dict, cls=Link, intfName1=port.intf_name)
resp["port_state"] = "ACTIVE"
resp["port_id"] = port.id
- resp["net_id"] = self.api.compute.find_network_by_name_or_id(port.net_name).id
+ resp["net_id"] = self.api.compute.find_network_by_name_or_id(
+ port.net_name).id
resp["mac_addr"] = port.mac_address
resp["fixed_ips"] = list()
fixed_ips = dict()
fixed_ips["ip_address"] = port.ip_address
fixed_ips["subnet_id"] = network.subnet_name
resp["fixed_ips"].append(fixed_ips)
- response = Response(json.dumps({"interfaceAttachment": resp}), status=202, mimetype="application/json")
+ response = Response(json.dumps(
+ {"interfaceAttachment": resp}), status=202, mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not add interface to the server." % __name__)
+ LOG.exception(
+ u"%s: Could not add interface to the server." % __name__)
return ex.message, 500
try:
server = self.api.compute.find_server_by_name_or_id(serverid)
if server is None:
- return Response("Server with id or name %s does not exists." % serverid, status=404)
+ return Response(
+ "Server with id or name %s does not exists." % serverid, status=404)
port = self.api.compute.find_port_by_name_or_id(port_id)
if port is None:
- return Response("Port with id or name %s does not exists." % port_id, status=404)
+ return Response(
+ "Port with id or name %s does not exists." % port_id, status=404)
for link in self.api.compute.dc.net.links:
if str(link.intf1) == port.intf_name and \
- str(link.intf1.ip) == port.ip_address.split('/')[0]:
+ str(link.intf1.ip) == port.ip_address.split('/')[0]:
self.api.compute.dc.net.removeLink(link)
break
return response
except Exception as ex:
- LOG.exception(u"%s: Could not detach interface from the server." % __name__)
+ LOG.exception(
+ u"%s: Could not detach interface from the server." % __name__)
return ex.message, 500
"rate": []
}
}
- response = Response(json.dumps(resp), status=200, mimetype="application/json")
+ response = Response(json.dumps(resp), status=200,
+ mimetype="application/json")
response.headers['Access-Control-Allow-Origin'] = '*'
return response
except Exception as ex:
- LOG.exception(u"%s: Could not retrieve the list of images." % __name__)
+ LOG.exception(
+ u"%s: Could not retrieve the list of images." % __name__)
return ex.message, 500
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from instance_flavor import InstanceFlavor
-from model import Model
-from net import Net
-from port import Port
-from port_pair import PortPair
-from port_pair_group import PortPairGroup
-from flow_classifier import FlowClassifier
-from port_chain import PortChain
-from resource import Resource
-from router import Router
-from server import Server
-from stack import Stack
-from template import Template
-from image import Image
\ No newline at end of file
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import uuid
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import uuid
from datetime import datetime
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import uuid
class InstanceFlavor:
- def __init__(self, name, cpu=None, memory=None, memory_unit=None, storage=None, storage_unit=None):
+ def __init__(self, name, cpu=None, memory=None,
+ memory_unit=None, storage=None, storage_unit=None):
self.id = str(uuid.uuid4())
self.name = name
self.cpu = cpu
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
class LoadBalancer(object):
- def __init__(self, name, id=None, flavor=None, image=None, command=None, nw_list=None):
+ def __init__(self, name, id=None, flavor=None,
+ image=None, command=None, nw_list=None):
self.name = name
self.id = id # not set
self.out_ports = dict()
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
class Model:
def __init__(self, resources=None):
if not resources:
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import re
if self.start_end_dict is None:
return None
- int_start_ip = Net.ip_2_int(self.start_end_dict['start']) + 2 # First address as network address not usable
+ # First address as network address not usable
+ int_start_ip = Net.ip_2_int(self.start_end_dict['start']) + 2
# Second one is for gateways only
- int_end_ip = Net.ip_2_int(self.start_end_dict['end']) - 1 # Last address for broadcasts
+ # Last address for broadcasts
+ int_end_ip = Net.ip_2_int(self.start_end_dict['end']) - 1
while int_start_ip in self._issued_ip_addresses and int_start_ip <= int_end_ip:
int_start_ip += 1
if int_ip in self._issued_ip_addresses:
return False
- int_start_ip = Net.ip_2_int(self.start_end_dict['start']) + 1 # First address as network address not usable
- int_end_ip = Net.ip_2_int(self.start_end_dict['end']) - 1 # Last address for broadcasts
+ # First address as network address not usable
+ int_start_ip = Net.ip_2_int(self.start_end_dict['start']) + 1
+ # Last address for broadcasts
+ int_end_ip = Net.ip_2_int(self.start_end_dict['end']) - 1
if int_ip < int_start_ip or int_ip > int_end_ip:
return False
"""
int_ip = Net.cidr_2_int(cidr)
- if not int_ip in self._issued_ip_addresses:
+ if int_ip not in self._issued_ip_addresses:
return False
if self._issued_ip_addresses[int_ip] == port_name:
:rtype: ``dict``
"""
network_dict = dict()
- network_dict["status"] = "ACTIVE" # TODO do we support inactive networks?
- if self.subnet_id == None:
+ # TODO do we support inactive networks?
+ network_dict["status"] = "ACTIVE"
+ if self.subnet_id is None:
network_dict["subnets"] = []
else:
network_dict["subnets"] = [self.subnet_id]
network_dict["name"] = self.name
network_dict["admin_state_up"] = True # TODO is it always true?
- network_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456" # TODO what should go in here
+ # TODO what should go in here
+ network_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456"
network_dict["id"] = self.id
network_dict["shared"] = False # TODO is it always false?
return network_dict
subnet_dict = dict()
subnet_dict["name"] = self.subnet_name
subnet_dict["network_id"] = self.id
- subnet_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456" # TODO what should go in here?
+ # TODO what should go in here?
+ subnet_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456"
subnet_dict["created_at"] = self.subnet_creation_time
subnet_dict["dns_nameservers"] = []
subnet_dict["allocation_pools"] = [self.start_end_dict]
def __eq__(self, other):
if self.name == other.name and self.subnet_name == other.subnet_name and \
- self.gateway_ip == other.gateway_ip and \
- self.segmentation_id == other.segmentation_id and \
- self._cidr == other._cidr and \
- self.start_end_dict == other.start_end_dict:
+ self.gateway_ip == other.gateway_ip and \
+ self.segmentation_id == other.segmentation_id and \
+ self._cidr == other._cidr and \
+ self.start_end_dict == other.start_end_dict:
return True
return False
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
import threading
import uuid
class Port:
- def __init__(self, name, ip_address=None, mac_address=None, floating_ip=None):
+ def __init__(self, name, ip_address=None,
+ mac_address=None, floating_ip=None):
self.name = name
self.intf_name = None
self.id = str(uuid.uuid4())
if len(split_name) >= 3:
if split_name[2] == 'input' or split_name[2] == 'in':
self.intf_name = split_name[0][:4] + '-' + \
- 'in'
+ 'in'
elif split_name[2] == 'output' or split_name[2] == 'out':
self.intf_name = split_name[0][:4] + '-' + \
- 'out'
+ 'out'
else:
self.intf_name = split_name[0][:4] + '-' + \
- split_name[2][:4]
+ split_name[2][:4]
else:
self.intf_name = self.name[:9]
global intf_names
intf_len = len(self.intf_name)
self.intf_name = self.intf_name + '-' + str(counter)[:4]
- while self.intf_name in intf_names and counter < 999 and not intf_names[self.intf_name][0] == self.id:
+ while self.intf_name in intf_names and counter < 999 and not intf_names[
+ self.intf_name][0] == self.id:
counter += 1
self.intf_name = self.intf_name[:intf_len] + '-' + str(counter)[:4]
if counter >= 1000:
- logging.ERROR("Port %s could not create unique interface name (%s)", self.name, self.intf_name)
+ logging.ERROR(
+ "Port %s could not create unique interface name (%s)", self.name, self.intf_name)
lock.release()
return
"""
port_dict = dict()
port_dict["admin_state_up"] = True # TODO is it always true?
- port_dict["device_id"] = "257614cc-e178-4c92-9c61-3b28d40eca44" # TODO find real values
+ # TODO find real values
+ port_dict["device_id"] = "257614cc-e178-4c92-9c61-3b28d40eca44"
port_dict["device_owner"] = "" # TODO do we have such things?
net = compute.find_network_by_name_or_id(self.net_name)
port_dict["fixed_ips"] = [
port_dict["name"] = self.name
port_dict["network_id"] = net.id if net is not None else ""
port_dict["status"] = "ACTIVE" # TODO do we support inactive port?
- port_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456" # TODO find real tenant_id
+ # TODO find real tenant_id
+ port_dict["tenant_id"] = "abcdefghijklmnopqrstuvwxyz123456"
return port_dict
def compare_attributes(self, other):
return False
if self.name == other.name and self.floating_ip == other.floating_ip and \
- self.net_name == other.net_name:
+ self.net_name == other.net_name:
return True
return False
return False
if self.name == other.name and self.ip_address == other.ip_address and \
- self.mac_address == other.mac_address and \
- self.floating_ip == other.floating_ip and \
- self.net_name == other.net_name:
+ self.mac_address == other.mac_address and \
+ self.floating_ip == other.floating_ip and \
+ self.net_name == other.net_name:
return True
return False
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import random
import uuid
import logging
self.flow_classifiers = list()
self.chain_parameters = dict()
- # Cookie for internal identification of installed flows (e.g. to delete them)
+ # Cookie for internal identification of installed flows (e.g. to delete
+ # them)
self.cookie = random.randint(1, 0xffffffff)
def create_dict(self, compute):
def install(self, compute):
for flow_classifier_id in self.flow_classifiers:
- flow_classifier = compute.find_flow_classifier_by_name_or_id(flow_classifier_id)
+ flow_classifier = compute.find_flow_classifier_by_name_or_id(
+ flow_classifier_id)
if flow_classifier:
pass
- # TODO: for every flow classifier create match and pass it to setChain
+ # TODO: for every flow classifier create match and pass it to
+ # setChain
for group_id in self.port_pair_groups:
- port_pair_group = compute.find_port_pair_group_by_name_or_id(group_id)
+ port_pair_group = compute.find_port_pair_group_by_name_or_id(
+ group_id)
for port_pair_id in port_pair_group.port_pairs:
port_pair = compute.find_port_pair_by_name_or_id(port_pair_id)
# TODO: Not sure, if this should throw an error
if not server_ingress:
- logging.warn("Neutron SFC: ingress port %s not connected." % str(port_pair.ingress.name))
+ logging.warn("Neutron SFC: ingress port %s not connected." % str(
+ port_pair.ingress.name))
continue
if not server_egress:
- logging.warn("Neutron SFC: egress port %s not connected." % str(port_pair.egress.name))
+ logging.warn("Neutron SFC: egress port %s not connected." % str(
+ port_pair.egress.name))
continue
compute.dc.net.setChain(
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import uuid
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import uuid
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
class Resource:
def __init__(self, name, type=None, properties=None):
self.name = name
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import uuid
def __eq__(self, other):
if self.name == other.name and len(self.subnet_names) == len(other.subnet_names) and \
- set(self.subnet_names) == set(other.subnet_names):
+ set(self.subnet_names) == set(other.subnet_names):
return True
return False
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
class Server(object):
- def __init__(self, name, id=None, flavor=None, image=None, command=None, nw_list=None):
+ def __init__(self, name, id=None, flavor=None,
+ image=None, command=None, nw_list=None):
self.name = name
self.full_name = None
self.template_name = None
:rtype: ``bool``
"""
if self.name == other.name and self.full_name == other.full_name and \
- self.flavor == other.flavor and \
- self.image == other.image and \
- self.command == other.command:
+ self.flavor == other.flavor and \
+ self.image == other.image and \
+ self.command == other.command:
return True
return False
def __eq__(self, other):
if self.name == other.name and self.full_name == other.full_name and \
- self.flavor == other.flavor and \
- self.image == other.image and \
- self.command == other.command and \
- len(self.port_names) == len(other.port_names) and \
- set(self.port_names) == set(other.port_names):
+ self.flavor == other.flavor and \
+ self.image == other.image and \
+ self.command == other.command and \
+ len(self.port_names) == len(other.port_names) and \
+ set(self.port_names) == set(other.port_names):
return True
return False
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import uuid
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
class Template:
def __init__(self, resources=None):
self.version = '2015-04-30'
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
from flask_restful import Resource
from flask import request
import json
import threading
-from copy import deepcopy
logging.basicConfig()
data = request.json
if data is None:
data = {}
- elif type(data) is not dict:
+ elif not isinstance(data, dict):
data = json.loads(request.json)
network = data.get("network")
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
- logging.debug("%r = %r" % (var , cmd))
- if var=="SON_EMU_CMD" or var=="VIM_EMU_CMD":
- logging.info("Executing entry point script in %r: %r" % (c.name, cmd))
- # execute command in new thread to ensure that API is not blocked by VNF
+ logging.debug("%r = %r" % (var, cmd))
+ if var == "SON_EMU_CMD" or var == "VIM_EMU_CMD":
+ logging.info(
+ "Executing entry point script in %r: %r" % (c.name, cmd))
+ # execute command in new thread to ensure that API is
+ # not blocked by VNF
t = threading.Thread(target=c.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
logging.debug("API CALL: compute status")
try:
- return dcs.get(dc_label).containers.get(compute_name).getStatus(), 200, CORS_HEADER
+ return dcs.get(dc_label).containers.get(
+ compute_name).getStatus(), 200, CORS_HEADER
except Exception as ex:
logging.exception("API error.")
return ex.message, 500, CORS_HEADER
def delete(self, dc_label, compute_name):
logging.debug("API CALL: compute stop")
try:
- return dcs.get(dc_label).stopCompute(compute_name), 200, CORS_HEADER
+ return dcs.get(dc_label).stopCompute(
+ compute_name), 200, CORS_HEADER
except Exception as ex:
logging.exception("API error.")
return ex.message, 500, CORS_HEADER
all_containers += dc.listCompute()
all_extSAPs += dc.listExtSAPs()
- extSAP_list = [(sap.name, sap.getStatus()) for sap in all_extSAPs]
- container_list = [(c.name, c.getStatus()) for c in all_containers]
+ extSAP_list = [(sap.name, sap.getStatus())
+ for sap in all_extSAPs]
+ container_list = [(c.name, c.getStatus())
+ for c in all_containers]
total_list = container_list + extSAP_list
return total_list, 200, CORS_HEADER
else:
# return list of compute nodes for specified DC
- container_list = [(c.name, c.getStatus()) for c in dcs.get(dc_label).listCompute()]
- extSAP_list = [(sap.name, sap.getStatus()) for sap in dcs.get(dc_label).listExtSAPs()]
+ container_list = [(c.name, c.getStatus())
+ for c in dcs.get(dc_label).listCompute()]
+ extSAP_list = [(sap.name, sap.getStatus())
+ for sap in dcs.get(dc_label).listExtSAPs()]
total_list = container_list + extSAP_list
return total_list, 200, CORS_HEADER
except Exception as ex:
logging.exception("API error.")
return ex.message, 500, CORS_HEADER
+
class ComputeResources(Resource):
"""
Update the container's resources using the docker.update function
# then no data
if params is None:
params = {}
- logging.debug("REST CALL: update container resources {0}".format(params))
- #check if container exists
+ logging.debug(
+ "REST CALL: update container resources {0}".format(params))
+ # check if container exists
d = dcs.get(dc_label).net.getNodeByName(compute_name)
# general request of cpu percentage
cpu_period = int(dcs.get(dc_label).net.cpu_period)
value = params.get('cpu_bw')
cpu_quota = int(cpu_period * float(value))
- #put default values back
+ # put default values back
if float(value) <= 0:
cpu_period = 100000
cpu_quota = -1
params['cpu_period'] = cpu_period
params['cpu_quota'] = cpu_quota
- #d.updateCpuLimit(cpu_period=cpu_period, cpu_quota=cpu_quota)
+ # d.updateCpuLimit(cpu_period=cpu_period, cpu_quota=cpu_quota)
# only pass allowed keys to docker
allowed_keys = ['blkio_weight', 'cpu_period', 'cpu_quota', 'cpu_shares', 'cpuset_cpus',
'cpuset_mems', 'mem_limit', 'mem_reservation', 'memswap_limit',
'kernel_memory', 'restart_policy']
- filtered_params = {key:params[key] for key in allowed_keys if key in params}
+ filtered_params = {key: params[key]
+ for key in allowed_keys if key in params}
d.update_resources(**filtered_params)
return d
+
class DatacenterList(Resource):
global dcs
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Distributed Cloud Emulator (dcemulator)
-Networking and monitoring functions
-(c) 2015 by Steven Van Rossem <steven.vanrossem@intec.ugent.be>
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+#
+# Distributed Cloud Emulator (dcemulator)
+# Networking and monitoring functions
+# (c) 2015 by Steven Van Rossem <steven.vanrossem@intec.ugent.be>
import logging
-from flask_restful import Resource, reqparse
+from flask_restful import Resource
from flask import request
-import json
logging.basicConfig()
net = None
-
class MonitorInterfaceAction(Resource):
"""
Monitor the counters of a VNF interface
try:
if cookie:
- c = net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)
+ c = net.monitor_agent.setup_flow(
+ vnf_name, vnf_interface, metric, cookie)
else:
- c = net.monitor_agent.setup_metric(vnf_name, vnf_interface, metric)
+ c = net.monitor_agent.setup_metric(
+ vnf_name, vnf_interface, metric)
# return monitor message response
- return str(c), 200, CORS_HEADER
+ return str(c), 200, CORS_HEADER
except Exception as ex:
logging.exception("API error.")
return ex.message, 500, CORS_HEADER
try:
if cookie:
- c = net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
+ c = net.monitor_agent.stop_flow(
+ vnf_name, vnf_interface, metric, cookie)
else:
- c = net.monitor_agent.stop_metric(vnf_name, vnf_interface, metric)
+ c = net.monitor_agent.stop_metric(
+ vnf_name, vnf_interface, metric)
# return monitor message response
return str(c), 200, CORS_HEADER
except Exception as ex:
cookie = data.get("cookie", 0)
try:
- c = net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)
+ c = net.monitor_agent.setup_flow(
+ vnf_name, vnf_interface, metric, cookie)
# return monitor message response
return str(c), 200, CORS_HEADER
except Exception as ex:
cookie = data.get("cookie", 0)
try:
- c = net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
+ c = net.monitor_agent.stop_flow(
+ vnf_name, vnf_interface, metric, cookie)
# return monitor message response
return str(c), 200, CORS_HEADER
except Exception as ex:
logging.exception("API error.")
return ex.message, 500, CORS_HEADER
+
class MonitorLinkAction(Resource):
"""
Add or remove flow monitoring on chains between VNFs.
:return: message string indicating if the chain action is succesful or not
"""
- # the global net is set from the topology file, and connected via connectDCNetwork function in rest_api_endpoint.py
+ # the global net is set from the topology file, and connected via
+ # connectDCNetwork function in rest_api_endpoint.py
global net
def put(self):
monitor = data.get("monitor")
monitor_placement = data.get("monitor_placement")
- #first install monitor flow
+ # first install monitor flow
c1 = net.setChain(
vnf_src_name, vnf_dst_name,
vnf_src_interface=vnf_src_interface,
monitor=monitor,
monitor_placement=monitor_placement)
- #then export monitor flow
+ # then export monitor flow
metric = data.get("metric")
if 'rx' in monitor_placement:
vnf_name = vnf_dst_name
c2 = 'command unknown'
if command == 'add-flow':
- c2 = net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)
+ c2 = net.monitor_agent.setup_flow(
+ vnf_name, vnf_interface, metric, cookie)
elif command == 'del-flows':
- c2 = net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
+ c2 = net.monitor_agent.stop_flow(
+ vnf_name, vnf_interface, metric, cookie)
# return setChain response
return (str(c1) + " " + str(c2)), 200, CORS_HEADER
logging.exception("API error.")
return ex.message, 500, CORS_HEADER
+
class MonitorSkewAction(Resource):
"""
Monitor the counters of a VNF interface
resource_name = data.get("resource_name", 'cpu')
try:
# configure skewmon
- c = net.monitor_agent.update_skewmon(vnf_name, resource_name, action='start')
+ c = net.monitor_agent.update_skewmon(
+ vnf_name, resource_name, action='start')
# return monitor message response
- return str(c), 200, CORS_HEADER
+ return str(c), 200, CORS_HEADER
except Exception as ex:
logging.exception("API error.")
return ex.message, 500, CORS_HEADER
resource_name = data.get("resource_name", 'cpu')
try:
# configure skewmon
- c = net.monitor_agent.update_skewmon(vnf_name, resource_name, action='stop')
+ c = net.monitor_agent.update_skewmon(
+ vnf_name, resource_name, action='stop')
# return monitor message response
return str(c), 200, CORS_HEADER
logging.exception("API error.")
return ex.message, 500, CORS_HEADER
+
class MonitorTerminal(Resource):
"""
start a terminal for the selected VNFs
c = net.monitor_agent.term(vnf_list)
# return monitor message response
- return str(c), 200, CORS_HEADER
+ return str(c), 200, CORS_HEADER
except Exception as ex:
logging.exception("API error.")
return ex.message, 500, CORS_HEADER
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Distributed Cloud Emulator (dcemulator)
-Networking and monitoring functions
-(c) 2015 by Steven Van Rossem <steven.vanrossem@intec.ugent.be>
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+#
+# Distributed Cloud Emulator (dcemulator)
+# Networking and monitoring functions
+# (c) 2015 by Steven Van Rossem <steven.vanrossem@intec.ugent.be>
import logging
from flask_restful import Resource
from flask import request
-import json
import networkx
logging.basicConfig()
CORS_HEADER = {'Access-Control-Allow-Origin': '*'}
-# the global net is set from the topology file, and connected via connectDCNetwork function in rest_api_endpoint.py
+# the global net is set from the topology file, and connected via
+# connectDCNetwork function in rest_api_endpoint.py
net = None
node_attr = networkx.get_node_attributes(net.DCNetwork_graph, 'type')
for node_name in net.DCNetwork_graph.nodes():
nodes2.append(node_name)
- node_index = nodes2.index(node_name)
type = node_attr[node_name]
- node_dict = {"name":node_name,"group":type}
+ node_dict = {"name": node_name, "group": type}
nodes.append(node_dict)
# add links between other DCs
node1_index = nodes2.index(node1_name)
for node2_name in net.DCNetwork_graph.neighbors(node1_name):
node2_index = nodes2.index(node2_name)
- edge_dict = {"source": node1_index, "target": node2_index, "value": 10}
+ edge_dict = {"source": node1_index,
+ "target": node2_index, "value": 10}
links.append(edge_dict)
- json = {"nodes":nodes, "links":links}
+ json = {"nodes": nodes, "links": links}
return json, 200, CORS_HEADER
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
import threading
# need to import total module to set its global variable dcs
import compute
-from compute import dcs, ComputeList, Compute, ComputeResources, DatacenterList, DatacenterStatus
+from compute import ComputeList, Compute, ComputeResources, DatacenterList, DatacenterStatus
# need to import total module to set its global variable net
import network
self.ip = listenip
self.port = port
- # connect this DC network to the rest api endpoint (needed for the networking and monitoring api)
+ # connect this DC network to the rest api endpoint (needed for the
+ # networking and monitoring api)
self.connectDCNetwork(DCnetwork)
# setup Flask
# find directory of dashboard files
- dashboard_file = pkg_resources.resource_filename('emuvim.dashboard', "index.html")
+ dashboard_file = pkg_resources.resource_filename(
+ 'emuvim.dashboard', "index.html")
dashboard_dir = path.dirname(dashboard_file)
logging.info("Started emu dashboard: {0}".format(dashboard_dir))
- self.app = Flask(__name__, static_folder=dashboard_dir, static_url_path='/dashboard')
+ self.app = Flask(__name__, static_folder=dashboard_dir,
+ static_url_path='/dashboard')
self.api = Api(self.app)
# setup endpoints
# compute related actions (start/stop VNFs, get info)
- self.api.add_resource(Compute, "/restapi/compute/<dc_label>/<compute_name>")
+ self.api.add_resource(
+ Compute, "/restapi/compute/<dc_label>/<compute_name>")
self.api.add_resource(ComputeList,
- "/restapi/compute",
- "/restapi/compute/<dc_label>")
- self.api.add_resource(ComputeResources, "/restapi/compute/resources/<dc_label>/<compute_name>")
+ "/restapi/compute",
+ "/restapi/compute/<dc_label>")
+ self.api.add_resource(
+ ComputeResources, "/restapi/compute/resources/<dc_label>/<compute_name>")
- self.api.add_resource(DatacenterStatus, "/restapi/datacenter/<dc_label>")
+ self.api.add_resource(
+ DatacenterStatus, "/restapi/datacenter/<dc_label>")
self.api.add_resource(DatacenterList, "/restapi/datacenter")
-
# network related actions (setup chaining between VNFs)
self.api.add_resource(NetworkAction,
"/restapi/network")
# export a network interface traffic rate counter
self.api.add_resource(MonitorInterfaceAction,
"/restapi/monitor/interface")
- # export flow traffic counter, of a manually pre-installed flow entry, specified by its cookie
+ # export flow traffic counter, of a manually pre-installed flow entry,
+ # specified by its cookie
self.api.add_resource(MonitorFlowAction,
"/restapi/monitor/flow")
# install monitoring of a specific flow on a pre-existing link in the service.
self.api.add_resource(MonitorTerminal,
"/restapi/monitor/term")
-
- logging.debug("Created API endpoint %s(%s:%d)" % (self.__class__.__name__, self.ip, self.port))
-
+ logging.debug("Created API endpoint %s(%s:%d)" %
+ (self.__class__.__name__, self.ip, self.port))
def connectDatacenter(self, dc):
compute.dcs[dc.label] = dc
self.thread = threading.Thread(target=self._start_flask, args=())
self.thread.daemon = True
self.thread.start()
- logging.info("Started API endpoint @ http://%s:%d" % (self.ip, self.port))
+ logging.info("Started API endpoint @ http://%s:%d" %
+ (self.ip, self.port))
def stop(self):
if self.http_server:
self.http_server.close()
def _start_flask(self):
- #self.app.run(self.ip, self.port, debug=False, use_reloader=False)
- #this should be a more production-fit http-server
- #self.app.logger.setLevel(logging.ERROR)
+ # self.app.run(self.ip, self.port, debug=False, use_reloader=False)
+ # this should be a more production-fit http-server
+ # self.app.logger.setLevel(logging.ERROR)
self.http_server = WSGIServer((self.ip, self.port),
- self.app,
- log=open("/dev/null", "w") # This disables HTTP request logs to not mess up the CLI when e.g. the auto-updated dashboard is used
- )
+ self.app,
+ # This disables HTTP request logs to not
+ # mess up the CLI when e.g. the
+ # auto-updated dashboard is used
+ log=open("/dev/null", "w")
+ )
self.http_server.serve_forever()
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-This module implements a simple REST API that behaves like SONATA's gatekeeper.
-
-It is only used to support the development of SONATA's SDK tools and to demonstrate
-the year 1 version of the emulator until the integration with WP4's orchestrator is done.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
import threading
import dummygatekeeper as dgk
"""
def __init__(self, listenip, port, deploy_sap=False, docker_management=False,
- auto_deploy=False, auto_delete=False, sap_vnfd_path=None):
+ auto_deploy=False, auto_delete=False, sap_vnfd_path=None):
self.dcs = {}
self.ip = listenip
self.port = port
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-This module implements a simple REST API that behaves like SONATA's gatekeeper.
-
-It is only used to support the development of SONATA's SDK tools and to demonstrate
-the year 1 version of the emulator until the integration with WP4's orchestrator is done.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
import os
import uuid
import zipfile
import yaml
import threading
-from docker import DockerClient, APIClient
+from docker import DockerClient
from flask import Flask, request
import flask_restful as fr
from collections import defaultdict
import ipaddress
import copy
import time
+from functools import reduce
logging.basicConfig()
LOG = logging.getLogger("sonata-dummy-gatekeeper")
# Enable Dockerfile build functionality
BUILD_DOCKERFILE = False
-# flag to indicate that we run without the emulator (only the bare API for integration testing)
+# flag to indicate that we run without the emulator (only the bare API for
+# integration testing)
GK_STANDALONE_MODE = False
# should a new version of an image be pulled even if its available
FORCE_PULL = False
# Automatically deploy SAPs (endpoints) of the service as new containers
-# Attention: This is not a configuration switch but a global variable! Don't change its default value.
+# Attention: This is not a configuration switch but a global variable!
+# Don't change its default value.
DEPLOY_SAP = False
-# flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
+# flag to indicate if we use bidirectional forwarding rules in the
+# automatic chaining process
BIDIRECTIONAL_CHAIN = False
-# override the management interfaces in the descriptors with default docker0 interfaces in the containers
+# override the management interfaces in the descriptors with default
+# docker0 interfaces in the containers
USE_DOCKER_MGMT = False
-# automatically deploy uploaded packages (no need to execute son-access deploy --latest separately)
+# automatically deploy uploaded packages (no need to execute son-access
+# deploy --latest separately)
AUTO_DEPLOY = False
# and also automatically terminate any other running services
AUTO_DELETE = False
+
def generate_subnets(prefix, base, subnet_size=50, mask=24):
# Generate a list of ipaddress in subnets
r = list()
subnet = "{0}.{1}.0/{2}".format(prefix, net, mask)
r.append(ipaddress.ip_network(unicode(subnet)))
return r
+
+
# private subnet definitions for the generated interfaces
# 10.10.xxx.0/24
SAP_SUBNETS = generate_subnets('10.10', 0, subnet_size=50, mask=30)
ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
# path to the VNFD for the SAP VNF that is deployed as internal SAP point
-SAP_VNFD=None
+SAP_VNFD = None
# Time in seconds to wait for vnf stop scripts to execute fully
VNF_STOP_WAIT_TIME = 5
+
class Gatekeeper(object):
def __init__(self):
self.services = dict()
self.dcs = dict()
self.net = None
- self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation)
+ # used to generate short names for VNFs (Mininet limitation)
+ self.vnf_counter = 0
LOG.info("Create SONATA dummy gatekeeper.")
def register_service_package(self, service_uuid, service):
self.uuid = service_uuid
self.package_file_hash = package_file_hash
self.package_file_path = package_file_path
- self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
+ self.package_content_path = os.path.join(
+ CATALOG_FOLDER, "services/%s" % self.uuid)
self.manifest = None
self.nsd = None
self.vnfds = dict()
self.instances[instance_uuid] = dict()
self.instances[instance_uuid]["vnf_instances"] = list()
- # 2. compute placement of this service instance (adds DC names to VNFDs)
+ # 2. compute placement of this service instance (adds DC names to
+ # VNFDs)
if not GK_STANDALONE_MODE:
- #self._calculate_placement(FirstDcPlacement)
+ # self._calculate_placement(FirstDcPlacement)
self._calculate_placement(RoundRobinDcPlacementWithSAPs)
# 3. start all vnfds that we have in the service (except SAPs)
for vnf_id in self.vnfds:
if "virtual_links" in self.nsd and "forwarding_graphs" in self.nsd:
vlinks = self.nsd["virtual_links"]
# constituent virtual links are not checked
- #fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
- eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
- elan_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-LAN")]
+ # fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
+ eline_fwd_links = [l for l in vlinks if (
+ l["connectivity_type"] == "E-Line")]
+ elan_fwd_links = [l for l in vlinks if (
+ l["connectivity_type"] == "E-LAN")]
GK.net.deployed_elines.extend(eline_fwd_links)
GK.net.deployed_elans.extend(elan_fwd_links)
# 5b. deploy E-LAN links
self._connect_elans(elan_fwd_links, instance_uuid)
- # 6. run the emulator specific entrypoint scripts in the VNFIs of this service instance
- self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"])
+ # 6. run the emulator specific entrypoint scripts in the VNFIs of this
+ # service instance
+ self._trigger_emulator_start_scripts_in_vnfis(
+ self.instances[instance_uuid]["vnf_instances"])
LOG.info("Service started. Instance id: %r" % instance_uuid)
return instance_uuid
# instance_uuid = str(self.uuid.uuid4())
vnf_instances = self.instances[instance_uuid]["vnf_instances"]
- # trigger stop skripts in vnf instances and wait a few seconds for completion
+ # trigger stop skripts in vnf instances and wait a few seconds for
+ # completion
self._trigger_emulator_stop_scripts_in_vnfis(vnf_instances)
time.sleep(VNF_STOP_WAIT_TIME)
ext_sap = self.saps[sap_name]
target_dc = ext_sap.get("dc")
target_dc.removeExternalSAP(sap_name)
- LOG.info("Stopping the SAP instance: %r in DC %r" % (sap_name, target_dc))
+ LOG.info("Stopping the SAP instance: %r in DC %r" %
+ (sap_name, target_dc))
if not GK_STANDALONE_MODE:
# remove placement?
assert(docker_name is not None)
assert(target_dc is not None)
if not self._check_docker_image_exists(docker_name):
- raise Exception("Docker image %r not found. Abort." % docker_name)
+ raise Exception(
+ "Docker image %r not found. Abort." % docker_name)
# 3. get the resource limits
res_req = u.get("resource_requirements")
if cpu_list is None:
cpu_list = res_req.get("cpu").get("vcpus")
if cpu_list is None:
- cpu_list="1"
+ cpu_list = "1"
cpu_bw = res_req.get("cpu").get("cpu_bw")
if not cpu_bw:
- cpu_bw=1
+ cpu_bw = 1
mem_num = str(res_req.get("memory").get("size"))
- if len(mem_num)==0:
- mem_num="2"
+ if len(mem_num) == 0:
+ mem_num = "2"
mem_unit = str(res_req.get("memory").get("size_unit"))
- if str(mem_unit)==0:
- mem_unit="GB"
+ if str(mem_unit) == 0:
+ mem_unit = "GB"
mem_limit = float(mem_num)
- if mem_unit=="GB":
- mem_limit=mem_limit*1024*1024*1024
- elif mem_unit=="MB":
- mem_limit=mem_limit*1024*1024
- elif mem_unit=="KB":
- mem_limit=mem_limit*1024
+ if mem_unit == "GB":
+ mem_limit = mem_limit * 1024 * 1024 * 1024
+ elif mem_unit == "MB":
+ mem_limit = mem_limit * 1024 * 1024
+ elif mem_unit == "KB":
+ mem_limit = mem_limit * 1024
mem_lim = int(mem_limit)
- cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
+ cpu_period, cpu_quota = self._calculate_cpu_cfs_values(
+ float(cpu_bw))
- # check if we need to deploy the management ports (defined as type:management both on in the vnfd and nsd)
+ # check if we need to deploy the management ports (defined as
+ # type:management both on in the vnfd and nsd)
intfs = vnfd.get("connection_points", [])
mgmt_intf_names = []
if USE_DOCKER_MGMT:
- mgmt_intfs = [vnf_id + ':' + intf['id'] for intf in intfs if intf.get('type') == 'management']
- # check if any of these management interfaces are used in a management-type network in the nsd
+ mgmt_intfs = [vnf_id + ':' + intf['id']
+ for intf in intfs if intf.get('type') == 'management']
+ # check if any of these management interfaces are used in a
+ # management-type network in the nsd
for nsd_intf_name in mgmt_intfs:
- vlinks = [ l["connection_points_reference"] for l in self.nsd.get("virtual_links", [])]
+ vlinks = [l["connection_points_reference"]
+ for l in self.nsd.get("virtual_links", [])]
for link in vlinks:
- if nsd_intf_name in link and self.check_mgmt_interface(link):
- # this is indeed a management interface and can be skipped
- vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(nsd_intf_name)
- found_interfaces = [intf for intf in intfs if intf.get('id') == vnf_interface]
+ if nsd_intf_name in link and self.check_mgmt_interface(
+ link):
+ # this is indeed a management interface and can be
+ # skipped
+ vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
+ nsd_intf_name)
+ found_interfaces = [
+ intf for intf in intfs if intf.get('id') == vnf_interface]
intfs.remove(found_interfaces[0])
mgmt_intf_names.append(vnf_interface)
# 4. generate the volume paths for the docker container
- volumes=list()
+ volumes = list()
# a volume to extract log files
- docker_log_path = "/tmp/results/%s/%s"%(self.uuid,vnf_id)
- LOG.debug("LOG path for vnf %s is %s."%(vnf_id,docker_log_path))
+ docker_log_path = "/tmp/results/%s/%s" % (self.uuid, vnf_id)
+ LOG.debug("LOG path for vnf %s is %s." % (vnf_id, docker_log_path))
if not os.path.exists(docker_log_path):
- LOG.debug("Creating folder %s"%docker_log_path)
+ LOG.debug("Creating folder %s" % docker_log_path)
os.makedirs(docker_log_path)
- volumes.append(docker_log_path+":/mnt/share/")
-
+ volumes.append(docker_log_path + ":/mnt/share/")
# 5. do the dc.startCompute(name="foobar") call to run the container
# TODO consider flavors, and other annotations
# TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
# use the vnf_id in the nsd as docker name
# so deployed containers can be easily mapped back to the nsd
- LOG.info("Starting %r as %r in DC %r" % (vnf_name, vnf_id, vnfd.get("dc")))
+ LOG.info("Starting %r as %r in DC %r" %
+ (vnf_name, vnf_id, vnfd.get("dc")))
LOG.debug("Interfaces for %r: %r" % (vnf_id, intfs))
vnfi = target_dc.startCompute(
- vnf_id,
- network=intfs,
- image=docker_name,
- flavor_name="small",
- cpu_quota=cpu_quota,
- cpu_period=cpu_period,
- cpuset=cpu_list,
- mem_limit=mem_lim,
- volumes=volumes,
- type=kwargs.get('type','docker'))
-
- # rename the docker0 interfaces (eth0) to the management port name defined in the VNFD
+ vnf_id,
+ network=intfs,
+ image=docker_name,
+ flavor_name="small",
+ cpu_quota=cpu_quota,
+ cpu_period=cpu_period,
+ cpuset=cpu_list,
+ mem_limit=mem_lim,
+ volumes=volumes,
+ type=kwargs.get('type', 'docker'))
+
+ # rename the docker0 interfaces (eth0) to the management port name
+ # defined in the VNFD
if USE_DOCKER_MGMT:
for intf_name in mgmt_intf_names:
- self._vnf_reconfigure_network(vnfi, 'eth0', new_name=intf_name)
+ self._vnf_reconfigure_network(
+ vnfi, 'eth0', new_name=intf_name)
return vnfi
dc = vnfi.datacenter
# stop the vnfi
- LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc))
+ LOG.info("Stopping the vnf instance contained in %r in DC %r" %
+ (status["name"], dc))
dc.stopCompute(status["name"])
def _get_vnf_instance(self, instance_uuid, vnf_id):
intf = vnfi.intf(intf=if_name)
if intf is not None:
intf.setIP(net_str)
- LOG.debug("Reconfigured network of %s:%s to %r" % (vnfi.name, if_name, net_str))
+ LOG.debug("Reconfigured network of %s:%s to %r" %
+ (vnfi.name, if_name, net_str))
else:
- LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi.name, if_name))
+ LOG.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
+ vnfi.name, if_name))
if new_name is not None:
vnfi.cmd('ip link set', if_name, 'down')
vnfi.cmd('ip link set', if_name, 'name', new_name)
vnfi.cmd('ip link set', new_name, 'up')
- LOG.debug("Reconfigured interface name of %s:%s to %s" % (vnfi.name, if_name, new_name))
-
-
+ LOG.debug("Reconfigured interface name of %s:%s to %s" %
+ (vnfi.name, if_name, new_name))
def _trigger_emulator_start_scripts_in_vnfis(self, vnfi_list):
for vnfi in vnfi_list:
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
- LOG.debug("%r = %r" % (var , cmd))
- if var=="SON_EMU_CMD":
- LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd))
- # execute command in new thread to ensure that GK is not blocked by VNF
+ LOG.debug("%r = %r" % (var, cmd))
+ if var == "SON_EMU_CMD":
+ LOG.info("Executing entry point script in %r: %r" %
+ (vnfi.name, cmd))
+ # execute command in new thread to ensure that GK is not
+ # blocked by VNF
t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
- if var=="SON_EMU_CMD_STOP":
- LOG.info("Executing stop script in %r: %r" % (vnfi.name, cmd))
- # execute command in new thread to ensure that GK is not blocked by VNF
+ if var == "SON_EMU_CMD_STOP":
+ LOG.info("Executing stop script in %r: %r" %
+ (vnfi.name, cmd))
+ # execute command in new thread to ensure that GK is not
+ # blocked by VNF
t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,))
t.daemon = True
t.start()
-
-
def _unpack_service_package(self):
"""
unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
with zipfile.ZipFile(self.package_file_path, "r") as z:
z.extractall(self.package_content_path)
-
def _load_package_descriptor(self):
"""
Load the main package descriptor YAML and keep it as dict.
GK.net.deployed_nsds.append(self.nsd)
# create dict to find the vnf_name for any vnf id
self.vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
- reduce(lambda x, y: dict(x, **y),
+ reduce(lambda x, y: dict(x, **y),
map(lambda d: {d["vnf_id"]: d["vnf_name"]},
self.nsd["network_functions"])))
vnfd_set = dict()
if "package_content" in self.manifest:
for pc in self.manifest.get("package_content"):
- if pc.get("content-type") == "application/sonata.function_descriptor":
+ if pc.get(
+ "content-type") == "application/sonata.function_descriptor":
vnfd_path = os.path.join(
self.package_content_path,
make_relative_path(pc.get("name")))
vnfd = load_yaml(vnfd_path)
vnfd_set[vnfd.get("name")] = vnfd
# then link each vnf_id in the nsd to its vnfd
- for vnf_id in self.vnf_id2vnf_name:
+ for vnf_id in self.vnf_id2vnf_name:
vnf_name = self.vnf_id2vnf_name[vnf_id]
self.vnfds[vnf_id] = vnfd_set[vnf_name]
LOG.debug("Loaded VNFD: {0} id: {1}".format(vnf_name, vnf_id))
# create list of all SAPs
# check if we need to deploy management ports
if USE_DOCKER_MGMT:
- SAPs = [p for p in self.nsd["connection_points"] if 'management' not in p.get('type')]
+ SAPs = [p for p in self.nsd["connection_points"]
+ if 'management' not in p.get('type')]
else:
SAPs = [p for p in self.nsd["connection_points"]]
# make sure SAP has type set (default internal)
sap["type"] = sap.get("type", 'internal')
- # Each Service Access Point (connection_point) in the nsd is an IP address on the host
+ # Each Service Access Point (connection_point) in the nsd is an IP
+ # address on the host
if sap["type"] == "external":
# add to vnfds to calculate placement later on
sap_net = SAP_SUBNETS.pop(0)
- self.saps[sap_docker_name] = {"name": sap_docker_name , "type": "external", "net": sap_net}
+ self.saps[sap_docker_name] = {
+ "name": sap_docker_name, "type": "external", "net": sap_net}
# add SAP vnf to list in the NSD so it is deployed later on
- # each SAP gets a unique VNFD and vnf_id in the NSD and custom type (only defined in the dummygatekeeper)
+ # each SAP gets a unique VNFD and vnf_id in the NSD and custom
+ # type (only defined in the dummygatekeeper)
self.nsd["network_functions"].append(
{"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_ext"})
- # Each Service Access Point (connection_point) in the nsd is getting its own container (default)
+ # Each Service Access Point (connection_point) in the nsd is
+ # getting its own container (default)
elif sap["type"] == "internal" or sap["type"] == "management":
# add SAP to self.vnfds
if SAP_VNFD is None:
- sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml")
+ sapfile = pkg_resources.resource_filename(
+ __name__, "sap_vnfd.yml")
else:
sapfile = SAP_VNFD
sap_vnfd = load_yaml(sapfile)
self.nsd["network_functions"].append(
{"vnf_id": sap_docker_name, "vnf_name": sap_docker_name, "vnf_type": "sap_int"})
- LOG.debug("Loaded SAP: name: {0}, type: {1}".format(sap_docker_name, sap['type']))
+ LOG.debug("Loaded SAP: name: {0}, type: {1}".format(
+ sap_docker_name, sap['type']))
# create sap lists
- self.saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
- self.saps_int = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "internal"]
+ self.saps_ext = [self.saps[sap]['name']
+ for sap in self.saps if self.saps[sap]["type"] == "external"]
+ self.saps_int = [self.saps[sap]['name']
+ for sap in self.saps if self.saps[sap]["type"] == "internal"]
def _start_sap(self, sap, instance_uuid):
if not DEPLOY_SAP:
return
- LOG.info('start SAP: {0} ,type: {1}'.format(sap['name'],sap['type']))
+ LOG.info('start SAP: {0} ,type: {1}'.format(sap['name'], sap['type']))
if sap["type"] == "internal":
vnfi = None
if not GK_STANDALONE_MODE:
for link in eline_fwd_links:
# check if we need to deploy this link when its a management link:
if USE_DOCKER_MGMT:
- if self.check_mgmt_interface(link["connection_points_reference"]):
+ if self.check_mgmt_interface(
+ link["connection_points_reference"]):
continue
- src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
- dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
+ src_id, src_if_name, src_sap_id = parse_interface(
+ link["connection_points_reference"][0])
+ dst_id, dst_if_name, dst_sap_id = parse_interface(
+ link["connection_points_reference"][1])
setChaining = False
# check if there is a SAP in the link and chain everything together
if src_sap_id in self.saps and dst_sap_id in self.saps:
- LOG.info('2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id, dst_sap_id))
+ LOG.info(
+ '2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id, dst_sap_id))
continue
elif src_sap_id in self.saps_ext:
src_id = src_sap_id
- # set intf name to None so the chaining function will choose the first one
+ # set intf name to None so the chaining function will choose
+ # the first one
src_if_name = None
dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
if dst_vnfi is not None:
# choose first ip address in sap subnet
sap_net = self.saps[src_sap_id]['net']
- sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
- self._vnf_reconfigure_network(dst_vnfi, dst_if_name, sap_ip)
+ sap_ip = "{0}/{1}".format(str(sap_net[2]),
+ sap_net.prefixlen)
+ self._vnf_reconfigure_network(
+ dst_vnfi, dst_if_name, sap_ip)
setChaining = True
elif dst_sap_id in self.saps_ext:
dst_id = dst_sap_id
- # set intf name to None so the chaining function will choose the first one
+ # set intf name to None so the chaining function will choose
+ # the first one
dst_if_name = None
src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
if src_vnfi is not None:
sap_net = self.saps[dst_sap_id]['net']
- sap_ip = "{0}/{1}".format(str(sap_net[2]), sap_net.prefixlen)
- self._vnf_reconfigure_network(src_vnfi, src_if_name, sap_ip)
+ sap_ip = "{0}/{1}".format(str(sap_net[2]),
+ sap_net.prefixlen)
+ self._vnf_reconfigure_network(
+ src_vnfi, src_if_name, sap_ip)
setChaining = True
# Link between 2 VNFs
src_id = src_sap_id
if dst_sap_id in self.saps_int:
dst_id = dst_sap_id
- # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
+ # re-configure the VNFs IP assignment and ensure that a new
+ # subnet is used for each E-Link
src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
if src_vnfi is not None and dst_vnfi is not None:
eline_net = ELINE_SUBNETS.pop(0)
- ip1 = "{0}/{1}".format(str(eline_net[1]), eline_net.prefixlen)
- ip2 = "{0}/{1}".format(str(eline_net[2]), eline_net.prefixlen)
+ ip1 = "{0}/{1}".format(str(eline_net[1]),
+ eline_net.prefixlen)
+ ip2 = "{0}/{1}".format(str(eline_net[2]),
+ eline_net.prefixlen)
self._vnf_reconfigure_network(src_vnfi, src_if_name, ip1)
self._vnf_reconfigure_network(dst_vnfi, dst_if_name, ip2)
setChaining = True
# Set the chaining
if setChaining:
- ret = GK.net.setChain(
+ GK.net.setChain(
src_id, dst_id,
vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name,
bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10)
"Setting up E-Line link. (%s:%s) -> (%s:%s)" % (
src_id, src_if_name, dst_id, dst_if_name))
-
def _connect_elans(self, elan_fwd_links, instance_uuid):
"""
Connect all E-LAN links in the NSD
for link in elan_fwd_links:
# check if we need to deploy this link when its a management link:
if USE_DOCKER_MGMT:
- if self.check_mgmt_interface(link["connection_points_reference"]):
+ if self.check_mgmt_interface(
+ link["connection_points_reference"]):
continue
elan_vnf_list = []
- # check if an external SAP is in the E-LAN (then a subnet is already defined)
+ # check if an external SAP is in the E-LAN (then a subnet is
+ # already defined)
intfs_elan = [intf for intf in link["connection_points_reference"]]
lan_sap = self.check_ext_saps(intfs_elan)
if lan_sap:
lan_net = self.saps[lan_sap]['net']
lan_hosts = list(lan_net.hosts())
- sap_ip = str(lan_hosts.pop(0))
else:
lan_net = ELAN_SUBNETS.pop(0)
lan_hosts = list(lan_net.hosts())
for intf in link["connection_points_reference"]:
# skip external SAPs, they already have an ip
- vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf)
+ vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
+ intf)
if vnf_sap_docker_name in self.saps_ext:
- elan_vnf_list.append({'name': vnf_sap_docker_name, 'interface': vnf_interface})
+ elan_vnf_list.append(
+ {'name': vnf_sap_docker_name, 'interface': vnf_interface})
continue
- ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)), lan_net.prefixlen)
+ ip_address = "{0}/{1}".format(str(lan_hosts.pop(0)),
+ lan_net.prefixlen)
vnf_id, intf_name, vnf_sap_id = parse_interface(intf)
# make sure we use the correct sap vnf name
if vnfi is not None:
self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
# add this vnf and interface to the E-LAN for tagging
- elan_vnf_list.append({'name': src_docker_name, 'interface': intf_name})
+ elan_vnf_list.append(
+ {'name': src_docker_name, 'interface': intf_name})
# install the VLAN tags for this E-LAN
GK.net.setLAN(elan_vnf_list)
-
def _load_docker_files(self):
"""
Get all paths to Dockerfiles from VNFDs and store them in dict.
Get all URLs to pre-build docker images in some repo.
:return:
"""
- # also merge sap dicts, because internal saps also need a docker container
+ # also merge sap dicts, because internal saps also need a docker
+ # container
all_vnfs = self.vnfds.copy()
all_vnfs.update(self.saps)
if url is not None:
url = url.replace("http://", "")
self.remote_docker_image_urls[k] = url
- LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
+ LOG.debug("Found Docker image URL (%r): %r" %
+ (k, self.remote_docker_image_urls[k]))
def _build_images_from_dockerfiles(self):
"""
if GK_STANDALONE_MODE:
return # do not build anything in standalone mode
dc = DockerClient()
- LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
+ LOG.info("Building %d Docker images (this may take several minutes) ..." % len(
+ self.local_docker_files))
for k, v in self.local_docker_files.iteritems():
- for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
+ for line in dc.build(path=v.replace(
+ "Dockerfile", ""), tag=k, rm=False, nocache=False):
LOG.debug("DOCKER BUILD: %s" % line)
LOG.info("Docker image created: %s" % k)
"""
dc = DockerClient()
for url in self.remote_docker_image_urls.itervalues():
- if not FORCE_PULL: # only pull if not present (speedup for development)
+ # only pull if not present (speedup for development)
+ if not FORCE_PULL:
if len(dc.images.list(name=url)) > 0:
LOG.debug("Image %r present. Skipping pull." % url)
continue
# this seems to fail with latest docker api version 2.0.2
# dc.images.pull(url,
# insecure_registry=True)
- #using docker cli instead
+ # using docker cli instead
cmd = ["docker",
"pull",
url,
]
Popen(cmd).wait()
-
-
-
def _check_docker_image_exists(self, image_name):
"""
Query the docker service and check if the given image exists
sap_dict = self.saps[sap]
LOG.info("Placed SAP %r on DC %r" % (sap, str(sap_dict.get("dc"))))
-
def _calculate_cpu_cfs_values(self, cpu_time_percentage):
"""
Calculate cpu period and quota for CFS
# (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
# Attention minimum cpu_quota is 1ms (micro)
cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now
- LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage))
- cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
- # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
+ LOG.debug("cpu_period is %r, cpu_percentage is %r" %
+ (cpu_period, cpu_time_percentage))
+ # calculate the fraction of cpu time for this container
+ cpu_quota = cpu_period * cpu_time_percentage
+ # ATTENTION >= 1000 to avoid a invalid argument system error ... no
+ # idea why
if cpu_quota < 1000:
LOG.debug("cpu_quota before correcting: %r" % cpu_quota)
cpu_quota = 1000
LOG.warning("Increased CPU quota to avoid system error.")
- LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota))
+ LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
+ (cpu_period, cpu_quota))
return int(cpu_period), int(cpu_quota)
def check_ext_saps(self, intf_list):
# check if the list of interfacs contains an external SAP
- saps_ext = [self.saps[sap]['name'] for sap in self.saps if self.saps[sap]["type"] == "external"]
+ saps_ext = [self.saps[sap]['name']
+ for sap in self.saps if self.saps[sap]["type"] == "external"]
for intf_name in intf_list:
- vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(intf_name)
+ vnf_id, vnf_interface, vnf_sap_docker_name = parse_interface(
+ intf_name)
if vnf_sap_docker_name in saps_ext:
return vnf_sap_docker_name
def check_mgmt_interface(self, intf_list):
- SAPs_mgmt = [p.get('id') for p in self.nsd["connection_points"] if 'management' in p.get('type')]
+ SAPs_mgmt = [p.get('id') for p in self.nsd["connection_points"]
+ if 'management' in p.get('type')]
for intf_name in intf_list:
if intf_name in SAPs_mgmt:
return True
+
"""
Some (simple) placement algorithms
"""
"""
Placement: Always use one and the same data center from the GK.dcs dict.
"""
+
def place(self, nsd, vnfds, saps, dcs):
for id, vnfd in vnfds.iteritems():
vnfd["dc"] = list(dcs.itervalues())[0]
"""
Placement: Distribute VNFs across all available DCs in a round robin fashion.
"""
+
def place(self, nsd, vnfds, saps, dcs):
c = 0
dcs_list = list(dcs.itervalues())
vnfd["dc"] = dcs_list[c % len(dcs_list)]
c += 1 # inc. c to use next DC
+
class RoundRobinDcPlacementWithSAPs(object):
"""
Placement: Distribute VNFs across all available DCs in a round robin fashion,
every SAP is instantiated on the same DC as the connected VNF.
"""
+
def place(self, nsd, vnfds, saps, dcs):
# place vnfs
# place SAPs
vlinks = nsd.get("virtual_links", [])
- eline_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-Line")]
- elan_fwd_links = [l for l in vlinks if (l["connectivity_type"] == "E-LAN")]
+ eline_fwd_links = [l for l in vlinks if (
+ l["connectivity_type"] == "E-Line")]
+ elan_fwd_links = [l for l in vlinks if (
+ l["connectivity_type"] == "E-LAN")]
- # SAPs on E-Line links are placed on the same DC as the VNF on the E-Line
+ # SAPs on E-Line links are placed on the same DC as the VNF on the
+ # E-Line
for link in eline_fwd_links:
- src_id, src_if_name, src_sap_id = parse_interface(link["connection_points_reference"][0])
- dst_id, dst_if_name, dst_sap_id = parse_interface(link["connection_points_reference"][1])
+ src_id, src_if_name, src_sap_id = parse_interface(
+ link["connection_points_reference"][0])
+ dst_id, dst_if_name, dst_sap_id = parse_interface(
+ link["connection_points_reference"][1])
# check if there is a SAP in the link
if src_sap_id in saps:
# find SAP interfaces
intf_id, intf_name, intf_sap_id = parse_interface(intf)
if intf_sap_id in saps:
- dc = dcs_list[randint(0, dc_len-1)]
+ dc = dcs_list[randint(0, dc_len - 1)]
saps[intf_sap_id]['dc'] = dc
-
"""
Resource definitions and API endpoints
"""
elif len(request.data) > 0:
son_file = request.data
else:
- return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
+ return {"service_uuid": None, "size": 0, "sha1": None,
+ "error": "upload failed. file not found."}, 500
# generate a uuid to reference this package
service_uuid = str(uuid.uuid4())
file_hash = hashlib.sha1(str(son_file)).hexdigest()
if AUTO_DELETE:
service_list = copy.copy(GK.services)
for service_uuid in service_list:
- instances_list = copy.copy(GK.services[service_uuid].instances)
+ instances_list = copy.copy(
+ GK.services[service_uuid].instances)
for instance_uuid in instances_list:
# valid service and instance UUID, stop service
- GK.services.get(service_uuid).stop_service(instance_uuid)
- LOG.info("service instance with uuid %r stopped." % instance_uuid)
+ GK.services.get(service_uuid).stop_service(
+ instance_uuid)
+ LOG.info("service instance with uuid %r stopped." %
+ instance_uuid)
# create a service object and register it
s = Service(service_uuid, file_hash, upload_path)
if AUTO_DEPLOY:
# ok, we have a service uuid, lets start the service
reset_subnets()
- service_instance_uuid = GK.services.get(service_uuid).start_service()
+ GK.services.get(service_uuid).start_service()
# generate the JSON result
- return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201
- except Exception as ex:
+ return {"service_uuid": service_uuid, "size": size,
+ "sha1": file_hash, "error": None}, 201
+ except BaseException:
LOG.exception("Service package upload failed:")
- return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
+ return {"service_uuid": None, "size": 0,
+ "sha1": None, "error": "upload failed"}, 500
def get(self):
"""
service_uuid = json_data.get("service_uuid")
# lets be a bit fuzzy here to make testing easier
- if (service_uuid is None or service_uuid=="latest") and len(GK.services) > 0:
- # if we don't get a service uuid, we simple start the first service in the list
+ if (service_uuid is None or service_uuid ==
+ "latest") and len(GK.services) > 0:
+ # if we don't get a service uuid, we simple start the first service
+ # in the list
service_uuid = list(GK.services.iterkeys())[0]
if service_uuid in GK.services:
# ok, we have a service uuid, lets start the service
- service_instance_uuid = GK.services.get(service_uuid).start_service()
+ service_instance_uuid = GK.services.get(
+ service_uuid).start_service()
return {"service_instance_uuid": service_instance_uuid}, 201
return "Service not found", 404
# try to be fuzzy
if service_uuid is None and len(GK.services) > 0:
- #if we don't get a service uuid, we simply stop the last service in the list
+ # if we don't get a service uuid, we simply stop the last service
+ # in the list
service_uuid = list(GK.services.iterkeys())[0]
- if instance_uuid is None and len(GK.services[service_uuid].instances) > 0:
- instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0]
+ if instance_uuid is None and len(
+ GK.services[service_uuid].instances) > 0:
+ instance_uuid = list(
+ GK.services[service_uuid].instances.iterkeys())[0]
if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
# valid service and instance UUID, stop service
GK.services.get(service_uuid).stop_service(instance_uuid)
- return "service instance with uuid %r stopped." % instance_uuid,200
+ return "service instance with uuid %r stopped." % instance_uuid, 200
return "Service not found", 404
+
class Exit(fr.Resource):
def put(self):
GK = Gatekeeper()
-
# create a single, global GK object
GK = None
initialize_GK()
api = fr.Api(app)
# define endpoints
api.add_resource(Packages, '/packages', '/api/v2/packages')
-api.add_resource(Instantiations, '/instantiations', '/api/v2/instantiations', '/api/v2/requests')
+api.add_resource(Instantiations, '/instantiations',
+ '/api/v2/instantiations', '/api/v2/requests')
api.add_resource(Exit, '/emulator/exit')
-
def start_rest_api(host, port, datacenters=dict()):
GK.dcs = datacenters
GK.net = get_dc_network()
try:
r = yaml.load(f)
except yaml.YAMLError as exc:
- LOG.exception("YAML parse error")
+ LOG.exception("YAML parse error: %r" % str(exc))
r = dict()
return r
return vnf_id, vnf_interface, vnf_sap_docker_name
+
def reset_subnets():
# private subnet definitions for the generated interfaces
# 10.10.xxx.0/24
global ELINE_SUBNETS
ELINE_SUBNETS = generate_subnets('10.30', 0, subnet_size=50, mask=30)
+
if __name__ == '__main__':
"""
Lets allow to run the API in standalone mode.
GK_STANDALONE_MODE = True
logging.getLogger("werkzeug").setLevel(logging.INFO)
start_rest_api("0.0.0.0", 8000)
-
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import requests
# set this to localhost for now
# this is correct for son-emu started outside of a container or as a container with net=host
-#TODO if prometheus sdk DB is started outside of emulator, place these globals in an external SDK config file?
+# TODO if prometheus sdk DB is started outside of emulator, place these
+# globals in an external SDK config file?
prometheus_ip = 'localhost'
# when sdk is started with docker-compose, we could use
# prometheus_ip = 'prometheus'
# logging.info('return:{0}'.format(ret))
try:
ret = ret['data']['result'][0]['value']
- except:
+ except BaseException:
ret = None
else:
ret = None
- return ret
\ No newline at end of file
+ return ret
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from requests import get, put, delete
from tabulate import tabulate
import pprint
import argparse
-import json
from subprocess import Popen
pp = pprint.PrettyPrinter(indent=4)
def list(self, args):
- list = get('%s/restapi/compute/%s' % (args.get("endpoint"), args.get('datacenter'))).json()
+ list = get('%s/restapi/compute/%s' %
+ (args.get("endpoint"), args.get('datacenter'))).json()
table = []
for c in list:
if len(c) > 1:
name = c[0]
status = c[1]
- #eth0ip = status.get("docker_network", "-")
- netw_list = [netw_dict['intf_name'] for netw_dict in status.get("network")]
- dc_if_list = [netw_dict['dc_portname'] for netw_dict in status.get("network")]
+ # eth0ip = status.get("docker_network", "-")
+ netw_list = [netw_dict['intf_name']
+ for netw_dict in status.get("network")]
+ dc_if_list = [netw_dict['dc_portname']
+ for netw_dict in status.get("network")]
table.append([status.get("datacenter"),
name,
status.get("image"),
','.join(netw_list),
','.join(dc_if_list)])
- #status.get("state").get("Status")]
+ # status.get("state").get("Status")]
headers = ["Datacenter",
"Container",
Popen(['xterm', '-xrm', 'XTerm.vt100.allowTitleOps: false', '-T', vnf_name,
'-e', "docker exec -it mn.{0} /bin/bash".format(vnf_name)])
+
parser = argparse.ArgumentParser(description="""son-emu-cli compute
-
+
Examples:
- son-emu-cli compute start -d dc2 -n client -i sonatanfv/sonata-iperf3-vnf
- son-emu-cli list
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from requests import get
from tabulate import tabulate
import pprint
pp = pprint.PrettyPrinter(indent=4)
+
class RestApiClient():
def __init__(self):
else:
print("Command not implemented.")
- def list(self,args):
+ def list(self, args):
list = get('%s/restapi/datacenter' % args.get('endpoint')).json()
table = []
for d in list:
# for each dc add a line to the output table
if len(d) > 0:
table.append([d.get("label"),
- d.get("internalname"),
- d.get("switch"),
- d.get("n_running_containers"),
- len(d.get("metadata"))])
+ d.get("internalname"),
+ d.get("switch"),
+ d.get("n_running_containers"),
+ len(d.get("metadata"))])
headers = ["Label",
- "Internal Name",
- "Switch",
- "# Containers",
- "# Metadata Items"]
- print (tabulate(table, headers=headers, tablefmt="grid"))
-
- def status(self,args):
- list = get('%s/restapi/datacenter/%s' % ( args.get("endpoint"), args.get("datacenter"))).json()
+ "Internal Name",
+ "Switch",
+ "# Containers",
+ "# Metadata Items"]
+ print(tabulate(table, headers=headers, tablefmt="grid"))
+
+ def status(self, args):
+ list = get('%s/restapi/datacenter/%s' %
+ (args.get("endpoint"), args.get("datacenter"))).json()
table = []
table.append([list.get('label'),
- list.get('internalname'),
- list.get('switch'),
- list.get('n_running_containers'),
- len(list.get('metadata'))])
+ list.get('internalname'),
+ list.get('switch'),
+ list.get('n_running_containers'),
+ len(list.get('metadata'))])
headers = ["Label",
- "Internal Name",
- "Switch",
- "# Containers",
- "# Metadata Items"]
+ "Internal Name",
+ "Switch",
+ "# Containers",
+ "# Metadata Items"]
- print (tabulate(table, headers=headers, tablefmt="grid"))
+ print(tabulate(table, headers=headers, tablefmt="grid"))
parser = argparse.ArgumentParser(description='son-emu-cli datacenter')
args = vars(parser.parse_args(argv))
c = RestApiClient()
c.execute_command(args)
-
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-from requests import get, put, delete
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+from requests import get, put
import pprint
import argparse
from emuvim.cli import prometheus
pp = pprint.PrettyPrinter(indent=4)
+
class RestApiClient():
def __init__(self):
params = self._create_dict(
vnf_name=self._parse_vnf_name(args.get("vnf_name")),
- vnf_interface = self._parse_vnf_interface(args.get("vnf_name")),
- metric = args.get("metric"))
+ vnf_interface=self._parse_vnf_interface(args.get("vnf_name")),
+ metric=args.get("metric"))
url = "{0}/restapi/monitor/interface".format(args.get("endpoint"))
response = put(url, params=params)
# This functions makes it more user-friendly to create the correct prometheus query
# <uuid> is replaced by the correct uuid of the deployed vnf container
vnf_name = self._parse_vnf_name(args.get("vnf_name"))
- vnf_interface = self._parse_vnf_interface(args.get("vnf_name"))
- dc_label = args.get("datacenter")
query = args.get("query")
vnf_status = get("%s/restapi/compute/%s/%s" %
- (args.get("endpoint"),
- args.get("datacenter"),
- vnf_name)).json()
+ (args.get("endpoint"),
+ args.get("datacenter"),
+ vnf_name)).json()
uuid = vnf_status['id']
query = query.replace('<uuid>', uuid)
def _parse_vnf_interface(self, vnf_name_str):
try:
vnf_interface = vnf_name_str.split(':')[1]
- except:
+ except BaseException:
vnf_interface = None
return vnf_interface
def _create_dict(self, **kwargs):
return kwargs
+
parser = argparse.ArgumentParser(description='son-emu-cli monitor')
parser.add_argument(
"command",
- choices=['setup_metric', 'stop_metric', 'setup_flow', 'stop_flow','prometheus'],
+ choices=['setup_metric', 'stop_metric',
+ 'setup_flow', 'stop_flow', 'prometheus'],
help="setup/stop a metric/flow to be monitored or query Prometheus")
parser.add_argument(
"--vnf_name", "-vnf", dest="vnf_name",
default="http://127.0.0.1:5001",
help="REST API endpoint of son-emu (default:http://127.0.0.1:5001)")
+
def main(argv):
args = vars(parser.parse_args(argv))
c = RestApiClient()
- c.execute_command(args)
\ No newline at end of file
+ c.execute_command(args)
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from requests import get,put, delete
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+from requests import put, delete
import argparse
def add(self, args):
params = self._create_dict(
vnf_src_name=self._parse_vnf_name(args.get("source")),
- vnf_dst_name = self._parse_vnf_name(args.get("destination")),
+ vnf_dst_name=self._parse_vnf_name(args.get("destination")),
vnf_src_interface=self._parse_vnf_interface(args.get("source")),
- vnf_dst_interface=self._parse_vnf_interface(args.get("destination")),
+ vnf_dst_interface=self._parse_vnf_interface(
+ args.get("destination")),
weight=args.get("weight"),
match=args.get("match"),
bidirectional=args.get("bidirectional"),
def remove(self, args):
params = self._create_dict(
- vnf_src_name = self._parse_vnf_name(args.get("source")),
- vnf_dst_name = self._parse_vnf_name(args.get("destination")),
+ vnf_src_name=self._parse_vnf_name(args.get("source")),
+ vnf_dst_name=self._parse_vnf_name(args.get("destination")),
vnf_src_interface=self._parse_vnf_interface(args.get("source")),
- vnf_dst_interface=self._parse_vnf_interface(args.get("destination")),
+ vnf_dst_interface=self._parse_vnf_interface(
+ args.get("destination")),
weight=args.get("weight"),
match=args.get("match"),
bidirectional=args.get("bidirectional"),
def _parse_vnf_interface(self, vnf_name_str):
try:
vnf_interface = vnf_name_str.split(':')[1]
- except:
+ except BaseException:
vnf_interface = None
return vnf_interface
return kwargs
def _nice_print(self, text):
- # some modules seem to return unicode strings where newlines, other special characters are escaped
+ # some modules seem to return unicode strings where newlines, other
+ # special characters are escaped
text = str(text).replace('\\n', '\n')
text = str(text).replace('\\"', '"')
return text
+
parser = argparse.ArgumentParser(description='son-emu-cli network')
parser.add_argument(
"command",
default="http://127.0.0.1:5001",
help="REST API endpoint of son-emu (default:http://127.0.0.1:5001)")
+
def main(argv):
args = vars(parser.parse_args(argv))
c = RestApiClient()
#!/usr/bin/python
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
- Simple CLI client to interact with a running emulator.
-
- The CLI offers different tools, e.g., compute, network, ...
- Each of these tools is implemented as an independent Python
- module.
-
- cli compute start dc1 my_name flavor_a
- cli network create dc1 11.0.0.0/24
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import sys
from emuvim.cli.rest import compute as restcom
from emuvim.cli.rest import datacenter as restdc
from emuvim.cli.rest import monitor as restmon
from emuvim.cli.rest import network as restnetw
+
def help():
print("Missing arguments.\n")
print("Usage: son-emu-cli compute|datacenter|network|monitor <arguments>\n")
print("\tson-emu-cli monitor --help")
exit(0)
+
def main():
if len(sys.argv) < 2:
help()
-"""
-Copyright (c) 2017 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
\ No newline at end of file
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""\r
-Copyright (c) 2015 SONATA-NFV\r
-ALL RIGHTS RESERVED.\r
-\r
-Licensed under the Apache License, Version 2.0 (the "License");\r
-you may not use this file except in compliance with the License.\r
-You may obtain a copy of the License at\r
-\r
- http://www.apache.org/licenses/LICENSE-2.0\r
-\r
-Unless required by applicable law or agreed to in writing, software\r
-distributed under the License is distributed on an "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
-See the License for the specific language governing permissions and\r
-limitations under the License.\r
-\r
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]\r
-nor the names of its contributors may be used to endorse or promote\r
-products derived from this software without specific prior written\r
-permission.\r
-\r
-This work has been performed in the framework of the SONATA project,\r
-funded by the European Commission under Grant number 671517 through\r
-the Horizon 2020 and 5G-PPP programmes. The authors would like to\r
-acknowledge the contributions of their colleagues of the SONATA\r
-partner consortium (www.sonata-nfv.eu).\r
-"""\r
-\r
+# Copyright (c) 2015 SONATA-NFV and Paderborn University\r
+# ALL RIGHTS RESERVED.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+# http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+#\r
+# Neither the name of the SONATA-NFV, Paderborn University\r
+# nor the names of its contributors may be used to endorse or promote\r
+# products derived from this software without specific prior written\r
+# permission.\r
+#\r
+# This work has been performed in the framework of the SONATA project,\r
+# funded by the European Commission under Grant number 671517 through\r
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to\r
+# acknowledge the contributions of their colleagues of the SONATA\r
+# partner consortium (www.sonata-nfv.eu).\r
import logging\r
-import sys\r
-from mininet.node import OVSSwitch\r
+from mininet.node import OVSSwitch\r
import ast\r
import time\r
-from prometheus_client import start_http_server, Summary, Histogram, Gauge, Counter, REGISTRY, CollectorRegistry, \\r
- pushadd_to_gateway, push_to_gateway, delete_from_gateway\r
+from prometheus_client import Gauge, CollectorRegistry, \\r
+ pushadd_to_gateway, delete_from_gateway\r
import threading\r
from subprocess import Popen\r
import os\r
\r
COOKIE_MASK = 0xffffffff\r
\r
+\r
class DCNetworkMonitor():\r
def __init__(self, net):\r
self.net = net\r
self.prom_rx_byte_count = Gauge('sonemu_rx_count_bytes', 'Total number of bytes received',\r
['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)\r
\r
- self.prom_metrics={'tx_packets':self.prom_tx_packet_count, 'rx_packets':self.prom_rx_packet_count,\r
- 'tx_bytes':self.prom_tx_byte_count,'rx_bytes':self.prom_rx_byte_count}\r
+ self.prom_metrics = {'tx_packets': self.prom_tx_packet_count, 'rx_packets': self.prom_rx_packet_count,\r
+ 'tx_bytes': self.prom_tx_byte_count, 'rx_bytes': self.prom_rx_byte_count}\r
\r
# list of installed metrics to monitor\r
# each entry can contain this data\r
self.monitor_thread = threading.Thread(target=self.get_network_metrics)\r
self.monitor_thread.start()\r
\r
- self.monitor_flow_thread = threading.Thread(target=self.get_flow_metrics)\r
+ self.monitor_flow_thread = threading.Thread(\r
+ target=self.get_flow_metrics)\r
self.monitor_flow_thread.start()\r
\r
# helper tools\r
- # cAdvisor, Prometheus pushgateway are started as external container, to gather monitoring metric in son-emu\r
+ # cAdvisor, Prometheus pushgateway are started as external container,\r
+ # to gather monitoring metric in son-emu\r
self.pushgateway_process = self.start_PushGateway()\r
self.cadvisor_process = self.start_cAdvisor()\r
\r
-\r
# first set some parameters, before measurement can start\r
- def setup_flow(self, vnf_name, vnf_interface=None, metric='tx_packets', cookie=0):\r
+\r
+ def setup_flow(self, vnf_name, vnf_interface=None,\r
+ metric='tx_packets', cookie=0):\r
\r
flow_metric = {}\r
\r
break\r
\r
if not vnf_switch:\r
- logging.exception("vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface))\r
- return "vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface)\r
+ logging.exception("vnf switch of {0}:{1} not found!".format(\r
+ vnf_name, vnf_interface))\r
+ return "vnf switch of {0}:{1} not found!".format(\r
+ vnf_name, vnf_interface)\r
\r
try:\r
# default port direction to monitor\r
next_node = self.net.getNodeByName(vnf_switch)\r
\r
if not isinstance(next_node, OVSSwitch):\r
- logging.info("vnf: {0} is not connected to switch".format(vnf_name))\r
+ logging.info(\r
+ "vnf: {0} is not connected to switch".format(vnf_name))\r
return\r
\r
flow_metric['previous_measurement'] = 0\r
self.flow_metrics.append(flow_metric)\r
self.monitor_flow_lock.release()\r
\r
- logging.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))\r
- return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)\r
+ logging.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric, cookie))\r
+ return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric, cookie)\r
\r
except Exception as ex:\r
logging.exception("setup_metric error.")\r
labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
set(float('nan'))\r
\r
- delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
+ delete_from_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller')\r
\r
self.monitor_flow_lock.release()\r
\r
- logging.info('Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))\r
- return 'Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)\r
-\r
- return 'Error stopping monitoring flow: {0} on {1}:{2}'.format(metric, vnf_name, vnf_interface)\r
+ logging.info('Stopped monitoring flow {3}: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric, cookie))\r
+ return 'Stopped monitoring flow {3}: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric, cookie)\r
\r
+ return 'Error stopping monitoring flow: {0} on {1}:{2}'.format(\r
+ metric, vnf_name, vnf_interface)\r
\r
# first set some parameters, before measurement can start\r
+\r
def setup_metric(self, vnf_name, vnf_interface=None, metric='tx_packets'):\r
\r
network_metric = {}\r
break\r
\r
if 'mon_port' not in network_metric:\r
- logging.exception("vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface))\r
- return "vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface)\r
+ logging.exception("vnf interface {0}:{1} not found!".format(\r
+ vnf_name, vnf_interface))\r
+ return "vnf interface {0}:{1} not found!".format(\r
+ vnf_name, vnf_interface)\r
\r
try:\r
# default port direction to monitor\r
next_node = self.net.getNodeByName(vnf_switch)\r
\r
if not isinstance(next_node, OVSSwitch):\r
- logging.info("vnf: {0} is not connected to switch".format(vnf_name))\r
+ logging.info(\r
+ "vnf: {0} is not connected to switch".format(vnf_name))\r
return\r
\r
network_metric['previous_measurement'] = 0\r
network_metric['previous_monitor_time'] = 0\r
\r
-\r
network_metric['switch_dpid'] = int(str(next_node.dpid), 16)\r
network_metric['metric_key'] = metric\r
\r
self.network_metrics.append(network_metric)\r
self.monitor_lock.release()\r
\r
-\r
- logging.info('Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))\r
- return 'Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)\r
+ logging.info('Started monitoring: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric))\r
+ return 'Started monitoring: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric)\r
\r
except Exception as ex:\r
logging.exception("setup_metric error.")\r
self.network_metrics.remove(metric_dict)\r
\r
# set values to NaN, prometheus api currently does not support removal of metrics\r
- #self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))\r
+ # self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))\r
self.prom_metrics[metric_dict['metric_key']]. \\r
labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=None). \\r
set(float('nan'))\r
# 1 single monitor job for all metrics of the SDN controller\r
# we can only remove from the pushgateway grouping keys(labels) which we have defined for the add_to_pushgateway\r
# we can not specify labels from the metrics to be removed\r
- # if we need to remove the metrics seperatelty, we need to give them a separate grouping key, and probably a diffferent registry also\r
- delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
+ # if we need to remove the metrics seperatelty, we need to give\r
+ # them a separate grouping key, and probably a diffferent\r
+ # registry also\r
+ delete_from_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller')\r
\r
self.monitor_lock.release()\r
\r
- logging.info('Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))\r
- return 'Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)\r
+ logging.info('Stopped monitoring: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric))\r
+ return 'Stopped monitoring: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric)\r
\r
# delete everything from this vnf\r
elif metric_dict['vnf_name'] == vnf_name and vnf_interface is None and metric is None:\r
self.monitor_lock.acquire()\r
self.network_metrics.remove(metric_dict)\r
- logging.info('remove metric from monitor: vnf_name:{0} vnf_interface:{1} mon_port:{2}'.format(metric_dict['vnf_name'], metric_dict['vnf_interface'], metric_dict['mon_port']))\r
+ logging.info('remove metric from monitor: vnf_name:{0} vnf_interface:{1} mon_port:{2}'.format(\r
+ metric_dict['vnf_name'], metric_dict['vnf_interface'], metric_dict['mon_port']))\r
\r
- delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
+ delete_from_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller')\r
self.monitor_lock.release()\r
continue\r
\r
logging.info('Stopped monitoring vnf: {0}'.format(vnf_name))\r
return 'Stopped monitoring: {0}'.format(vnf_name)\r
else:\r
- return 'Error stopping monitoring metric: {0} on {1}:{2}'.format(metric, vnf_name, vnf_interface)\r
+ return 'Error stopping monitoring metric: {0} on {1}:{2}'.format(\r
+ metric, vnf_name, vnf_interface)\r
\r
\r
# get all metrics defined in the list and export it to Prometheus\r
+\r
def get_flow_metrics(self):\r
while self.start_monitoring:\r
\r
data['cookie_mask'] = COOKIE_MASK\r
\r
if 'tx' in flow_dict['metric_key']:\r
- data['match'] = {'in_port':flow_dict['mon_port']}\r
+ data['match'] = {'in_port': flow_dict['mon_port']}\r
elif 'rx' in flow_dict['metric_key']:\r
data['out_port'] = flow_dict['mon_port']\r
\r
-\r
# query Ryu\r
- ret = self.net.ryu_REST('stats/flow', dpid=flow_dict['switch_dpid'], data=data)\r
+ ret = self.net.ryu_REST(\r
+ 'stats/flow', dpid=flow_dict['switch_dpid'], data=data)\r
if isinstance(ret, dict):\r
flow_stat_dict = ret\r
elif isinstance(ret, basestring):\r
\r
self.set_flow_metric(flow_dict, flow_stat_dict)\r
\r
-\r
try:\r
if len(self.flow_metrics) > 0:\r
- pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
- except Exception, e:\r
- logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))\r
+ pushadd_to_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
+ except Exception as e:\r
+ logging.warning(\r
+ "Pushgateway not reachable: {0} {1}".format(Exception, e))\r
\r
self.monitor_flow_lock.release()\r
time.sleep(1)\r
self.monitor_lock.acquire()\r
\r
# group metrics by dpid to optimize the rest api calls\r
- dpid_list = [metric_dict['switch_dpid'] for metric_dict in self.network_metrics]\r
+ dpid_list = [metric_dict['switch_dpid']\r
+ for metric_dict in self.network_metrics]\r
dpid_set = set(dpid_list)\r
\r
for dpid in dpid_set:\r
port_stat_dict = None\r
\r
metric_list = [metric_dict for metric_dict in self.network_metrics\r
- if int(metric_dict['switch_dpid'])==int(dpid)]\r
+ if int(metric_dict['switch_dpid']) == int(dpid)]\r
\r
for metric_dict in metric_list:\r
self.set_network_metric(metric_dict, port_stat_dict)\r
\r
try:\r
if len(self.network_metrics) > 0:\r
- pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
- except Exception, e:\r
- logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))\r
+ pushadd_to_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
+ except Exception as e:\r
+ logging.warning(\r
+ "Pushgateway not reachable: {0} {1}".format(Exception, e))\r
\r
self.monitor_lock.release()\r
time.sleep(1)\r
\r
- # add metric to the list to export to Prometheus, parse the Ryu port-stats reply\r
+ # add metric to the list to export to Prometheus, parse the Ryu port-stats\r
+ # reply\r
def set_network_metric(self, metric_dict, port_stat_dict):\r
# vnf tx is the datacenter switch rx and vice-versa\r
metric_key = self.switch_tx_rx(metric_dict['metric_key'])\r
switch_dpid = metric_dict['switch_dpid']\r
vnf_name = metric_dict['vnf_name']\r
vnf_interface = metric_dict['vnf_interface']\r
- previous_measurement = metric_dict['previous_measurement']\r
previous_monitor_time = metric_dict['previous_monitor_time']\r
mon_port = metric_dict['mon_port']\r
for port_stat in port_stat_dict[str(switch_dpid)]:\r
if port_stat['port_no'] == 'LOCAL':\r
continue\r
if int(port_stat['port_no']) == int(mon_port):\r
- port_uptime = port_stat['duration_sec'] + port_stat['duration_nsec'] * 10 ** (-9)\r
+ port_uptime = port_stat['duration_sec'] + \\r
+ port_stat['duration_nsec'] * 10 ** (-9)\r
this_measurement = int(port_stat[metric_key])\r
\r
# set prometheus metric\r
# also the rate is calculated here, but not used for now\r
# (rate can be easily queried from prometheus also)\r
if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:\r
- metric_dict['previous_measurement'] = int(port_stat[metric_key])\r
+ metric_dict['previous_measurement'] = int(\r
+ port_stat[metric_key])\r
metric_dict['previous_monitor_time'] = port_uptime\r
# do first measurement\r
- #time.sleep(1)\r
- #self.monitor_lock.release()\r
+ # time.sleep(1)\r
+ # self.monitor_lock.release()\r
# rate cannot be calculated yet (need a first measurement)\r
- metric_rate = None\r
-\r
- else:\r
- time_delta = (port_uptime - metric_dict['previous_monitor_time'])\r
- #metric_rate = (this_measurement - metric_dict['previous_measurement']) / float(time_delta)\r
-\r
metric_dict['previous_measurement'] = this_measurement\r
metric_dict['previous_monitor_time'] = port_uptime\r
return\r
\r
- logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))\r
- logging.exception('monport:{0}, dpid:{1}'.format(mon_port, switch_dpid))\r
- logging.exception('monitored network_metrics:{0}'.format(self.network_metrics))\r
+ logging.exception('metric {0} not found on {1}:{2}'.format(\r
+ metric_key, vnf_name, vnf_interface))\r
+ logging.exception(\r
+ 'monport:{0}, dpid:{1}'.format(mon_port, switch_dpid))\r
+ logging.exception(\r
+ 'monitored network_metrics:{0}'.format(self.network_metrics))\r
logging.exception('port dict:{0}'.format(port_stat_dict))\r
- return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)\r
+ return 'metric {0} not found on {1}:{2}'.format(\r
+ metric_key, vnf_name, vnf_interface)\r
\r
def set_flow_metric(self, metric_dict, flow_stat_dict):\r
# vnf tx is the datacenter switch rx and vice-versa\r
switch_dpid = metric_dict['switch_dpid']\r
vnf_name = metric_dict['vnf_name']\r
vnf_interface = metric_dict['vnf_interface']\r
- previous_measurement = metric_dict['previous_measurement']\r
- previous_monitor_time = metric_dict['previous_monitor_time']\r
cookie = metric_dict['cookie']\r
\r
counter = 0\r
counter += flow_stat['packet_count']\r
\r
# flow_uptime disabled for now (can give error)\r
- #flow_stat = flow_stat_dict[str(switch_dpid)][0]\r
- #flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)\r
+ # flow_stat = flow_stat_dict[str(switch_dpid)][0]\r
+ # flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)\r
\r
self.prom_metrics[metric_dict['metric_key']]. \\r
labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
set(counter)\r
\r
def start_Prometheus(self, port=9090):\r
- # prometheus.yml configuration file is located in the same directory as this file\r
+ # prometheus.yml configuration file is located in the same directory as\r
+ # this file\r
cmd = ["docker",\r
"run",\r
"--rm",\r
"-p", "{0}:9090".format(port),\r
- "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(os.path.dirname(os.path.abspath(__file__))),\r
- "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(os.path.dirname(os.path.abspath(__file__))),\r
+ "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(\r
+ os.path.dirname(os.path.abspath(__file__))),\r
+ "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(\r
+ os.path.dirname(os.path.abspath(__file__))),\r
"--name", "prometheus",\r
"prom/prometheus"\r
]\r
"--volume=/var/lib/docker/:/var/lib/docker:ro",\r
"--publish={0}:8080".format(port),\r
"--name=cadvisor",\r
- "--label",'com.containernet=""',\r
+ "--label", 'com.containernet=""',\r
"--detach=true",\r
"google/cadvisor:latest",\r
- #"--storage_duration=1m0s",\r
- #"--allow_dynamic_housekeeping=true",\r
- #"--housekeeping_interval=1s",\r
+ # "--storage_duration=1m0s",\r
+ # "--allow_dynamic_housekeeping=true",\r
+ # "--housekeeping_interval=1s",\r
]\r
logging.info('Start cAdvisor container {0}'.format(cmd))\r
return Popen(cmd)\r
self.monitor_thread.join()\r
self.monitor_flow_thread.join()\r
\r
- # these containers are used for monitoring but are started now outside of son-emu\r
+ # these containers are used for monitoring but are started now outside\r
+ # of son-emu\r
\r
if self.pushgateway_process is not None:\r
logging.info('stopping pushgateway container')\r
logging.info('stopping cadvisor container')\r
self._stop_container('cadvisor')\r
\r
- def switch_tx_rx(self,metric=''):\r
+ def switch_tx_rx(self, metric=''):\r
# when monitoring vnfs, the tx of the datacenter switch is actually the rx of the vnf\r
- # so we need to change the metric name to be consistent with the vnf rx or tx\r
+ # so we need to change the metric name to be consistent with the vnf rx\r
+ # or tx\r
if 'tx' in metric:\r
- metric = metric.replace('tx','rx')\r
+ metric = metric.replace('tx', 'rx')\r
elif 'rx' in metric:\r
- metric = metric.replace('rx','tx')\r
+ metric = metric.replace('rx', 'tx')\r
\r
return metric\r
\r
def _stop_container(self, name):\r
\r
- #container = self.dockercli.containers.get(name)\r
- #container.stop()\r
- #container.remove(force=True)\r
+ # container = self.dockercli.containers.get(name)\r
+ # container.stop()\r
+ # container.remove(force=True)\r
\r
# the only robust way to stop these containers is via Popen, it seems\r
time.sleep(1)\r
cmd = ['docker', 'rm', '-f', name]\r
Popen(cmd)\r
\r
-\r
def update_skewmon(self, vnf_name, resource_name, action):\r
\r
ret = ''\r
configfile = open(config_file_path, 'a+')\r
try:\r
config = json.load(configfile)\r
- except:\r
- #not a valid json file or empty\r
+ except BaseException:\r
+ # not a valid json file or empty\r
config = {}\r
\r
- #initialize config file\r
+ # initialize config file\r
if len(self.skewmon_metrics) == 0:\r
config = {}\r
json.dump(config, configfile)\r
if action == 'start':\r
# add a new vnf to monitor\r
config[key] = dict(VNF_NAME=vnf_name,\r
- VNF_ID=vnf_id,\r
- VNF_METRIC=resource_name)\r
- ret = 'adding to skewness monitor: {0} {1} '.format(vnf_name, resource_name)\r
+ VNF_ID=vnf_id,\r
+ VNF_METRIC=resource_name)\r
+ ret = 'adding to skewness monitor: {0} {1} '.format(\r
+ vnf_name, resource_name)\r
logging.info(ret)\r
elif action == 'stop':\r
# remove vnf to monitor\r
config.pop(key)\r
- ret = 'removing from skewness monitor: {0} {1} '.format(vnf_name, resource_name)\r
+ ret = 'removing from skewness monitor: {0} {1} '.format(\r
+ vnf_name, resource_name)\r
logging.info(ret)\r
\r
self.skewmon_metrics = config\r
# start container if not running\r
ret += 'starting skewness monitor'\r
logging.info('starting skewness monitor')\r
- volumes = {'/sys/fs/cgroup':{'bind':'/sys/fs/cgroup', 'mode':'ro'},\r
- '/tmp/skewmon.cfg':{'bind':'/config.txt', 'mode':'ro'}}\r
+ volumes = {'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'},\r
+ '/tmp/skewmon.cfg': {'bind': '/config.txt', 'mode': 'ro'}}\r
self.dockercli.containers.run('skewmon',\r
detach=True,\r
volumes=volumes,\r
started = False\r
wait_time = 0\r
while not started:\r
- list1 = self.dockercli.containers.list(filters={'status': 'running', 'name': 'prometheus'})\r
+ list1 = self.dockercli.containers.list(\r
+ filters={'status': 'running', 'name': 'prometheus'})\r
if len(list1) >= 1:\r
time.sleep(1)\r
started = True\r
:return:\r
"""\r
\r
-\r
if vnf_list is None:\r
vnf_list = []\r
if not isinstance(vnf_list, list):\r
\r
return self.start_xterm(vnf_list)\r
\r
-\r
# start an xterm for the specfified vnfs\r
+\r
def start_xterm(self, vnf_names):\r
# start xterm for all vnfs\r
for vnf_name in vnf_names:\r
if len(vnf_names) == 0:\r
ret = 'vnf list is empty, no xterms started'\r
return ret\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-\r
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
import site
import json
from mininet.net import Containernet
-from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
+from mininet.node import OVSSwitch, OVSKernelSwitch, Docker, RemoteController
from mininet.cli import CLI
from mininet.link import TCLink
from mininet.clean import cleanup
import networkx as nx
from emuvim.dcemulator.monitoring import DCNetworkMonitor
-from emuvim.dcemulator.node import Datacenter, EmulatorCompute, EmulatorExtSAP
+from emuvim.dcemulator.node import Datacenter, EmulatorCompute
from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
LOG = logging.getLogger("dcemulator.net")
# default cookie number for new flow-rules
DEFAULT_COOKIE = 10
+
class DCNetwork(Containernet):
"""
Wraps the original Mininet/Containernet class and provides
"""
def __init__(self, controller=RemoteController, monitor=False,
- enable_learning=False, # learning switch behavior of the default ovs switches icw Ryu controller can be turned off/on, needed for E-LAN functionality
+ enable_learning=False,
+ # learning switch behavior of the default ovs switches icw Ryu
+ # controller can be turned off/on, needed for E-LAN
+ # functionality
dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
dc_emulation_max_mem=512, # emulation max mem in MB
**kwargs):
# members
self.dcs = {}
self.ryu_process = None
- #list of deployed nsds.E_Lines and E_LANs (uploaded from the dummy gatekeeper)
+ # list of deployed nsds.E_Lines and E_LANs (uploaded from the dummy
+ # gatekeeper)
self.deployed_nsds = []
self.deployed_elines = []
self.deployed_elans = []
self.installed_chains = []
-
# always cleanup environment before we start the emulator
self.killRyu()
cleanup()
# default switch configuration
enable_ryu_learning = False
- if enable_learning :
+ if enable_learning:
self.failMode = 'standalone'
enable_ryu_learning = True
else:
"""
if label in self.dcs:
raise Exception("Data center label already exists: %s" % label)
- dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
+ dc = Datacenter(label, metadata=metadata,
+ resource_log_path=resource_log_path)
dc.net = self # set reference to network
self.dcs[label] = dc
dc.create() # finally create the data center in our Mininet instance
assert node2 is not None
# ensure type of node1
- if isinstance( node1, basestring ):
+ if isinstance(node1, basestring):
if node1 in self.dcs:
node1 = self.dcs[node1].switch
- if isinstance( node1, Datacenter ):
+ if isinstance(node1, Datacenter):
node1 = node1.switch
# ensure type of node2
- if isinstance( node2, basestring ):
+ if isinstance(node2, basestring):
if node2 in self.dcs:
node2 = self.dcs[node2].switch
- if isinstance( node2, Datacenter ):
+ if isinstance(node2, Datacenter):
node2 = node2.switch
# try to give containers a default IP
- if isinstance( node1, Docker ):
+ if isinstance(node1, Docker):
if "params1" not in params:
params["params1"] = {}
if "ip" not in params["params1"]:
params["params1"]["ip"] = self.getNextIp()
- if isinstance( node2, Docker ):
+ if isinstance(node2, Docker):
if "params2" not in params:
params["params2"] = {}
if "ip" not in params["params2"]:
params["params2"]["ip"] = self.getNextIp()
# ensure that we allow TCLinks between data centers
# TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
- # see Containernet issue: https://github.com/mpeuster/containernet/issues/3
+ # see Containernet issue:
+ # https://github.com/mpeuster/containernet/issues/3
if "cls" not in params:
params["cls"] = TCLink
node2_port_id = params["params2"]["id"]
node2_port_name = link.intf2.name
-
# add edge and assigned port number to graph in both directions between node1 and node2
# port_id: id given in descriptor (if available, otherwise same as port)
# port: portnumber assigned by Containernet
attr_number = None
attr_dict[attr] = attr_number
-
attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],
'src_port_name': node1_port_name,
- 'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
+ 'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
'dst_port_name': node2_port_name}
attr_dict2.update(attr_dict)
- self.DCNetwork_graph.add_edge(node1.name, node2.name, attr_dict=attr_dict2)
+ self.DCNetwork_graph.add_edge(
+ node1.name, node2.name, attr_dict=attr_dict2)
attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],
'src_port_name': node2_port_name,
- 'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
+ 'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
'dst_port_name': node1_port_name}
attr_dict2.update(attr_dict)
- self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)
+ self.DCNetwork_graph.add_edge(
+ node2.name, node1.name, attr_dict=attr_dict2)
LOG.debug("addLink: n1={0} intf1={1} -- n2={2} intf2={3}".format(
- str(node1),node1_port_name, str(node2), node2_port_name))
+ str(node1), node1_port_name, str(node2), node2_port_name))
return link
# TODO we might decrease the loglevel to debug:
try:
self.DCNetwork_graph.remove_edge(node2.name, node1.name)
- except:
- LOG.warning("%s, %s not found in DCNetwork_graph." % ((node2.name, node1.name)))
+ except BaseException:
+ LOG.warning("%s, %s not found in DCNetwork_graph." %
+ ((node2.name, node1.name)))
try:
self.DCNetwork_graph.remove_edge(node1.name, node2.name)
- except:
- LOG.warning("%s, %s not found in DCNetwork_graph." % ((node1.name, node2.name)))
+ except BaseException:
+ LOG.warning("%s, %s not found in DCNetwork_graph." %
+ ((node1.name, node2.name)))
- def addDocker( self, label, **params ):
+ def addDocker(self, label, **params):
"""
Wrapper for addDocker method to use custom container class.
"""
self.DCNetwork_graph.add_node(label, type=params.get('type', 'docker'))
- return Containernet.addDocker(self, label, cls=EmulatorCompute, **params)
+ return Containernet.addDocker(
+ self, label, cls=EmulatorCompute, **params)
- def removeDocker( self, label, **params):
+ def removeDocker(self, label, **params):
"""
Wrapper for removeDocker method to update graph.
"""
Wrapper for addExtSAP method to store SAP also in graph.
"""
# make sure that 'type' is set
- params['type'] = params.get('type','sap_ext')
+ params['type'] = params.get('type', 'sap_ext')
self.DCNetwork_graph.add_node(sap_name, type=params['type'])
return Containernet.addExtSAP(self, sap_name, sap_ip, **params)
self.DCNetwork_graph.remove_node(sap_name)
return Containernet.removeExtSAP(self, sap_name)
- def addSwitch( self, name, add_to_graph=True, **params ):
+ def addSwitch(self, name, add_to_graph=True, **params):
"""
Wrapper for addSwitch method to store switch also in graph.
"""
# add this switch to the global topology overview
if add_to_graph:
- self.DCNetwork_graph.add_node(name, type=params.get('type','switch'))
+ self.DCNetwork_graph.add_node(
+ name, type=params.get('type', 'switch'))
# set the learning switch behavior
- if 'failMode' in params :
+ if 'failMode' in params:
failMode = params['failMode']
- else :
+ else:
failMode = self.failMode
- s = Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)
+ s = Containernet.addSwitch(
+ self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)
return s
# stop Ryu controller
self.killRyu()
-
def CLI(self):
CLI(self)
:return:
"""
src_sw = None
- src_sw_inport_nr = 0
src_sw_inport_name = None
# get a vlan tag for this E-LAN
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == vnf_src_interface or
- link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
+ link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
# found the right link and connected switch
src_sw = connected_sw
- src_sw_inport_nr = link_dict[link]['dst_port_nr']
src_sw_inport_name = link_dict[link]['dst_port_name']
break
# set the tag on the dc switch interface
- LOG.debug('set E-LAN: vnf name: {0} interface: {1} tag: {2}'.format(vnf_src_name, vnf_src_interface,vlan))
+ LOG.debug('set E-LAN: vnf name: {0} interface: {1} tag: {2}'.format(
+ vnf_src_name, vnf_src_interface, vlan))
switch_node = self.getNodeByName(src_sw)
self._set_vlan_tag(switch_node, src_sw_inport_name, vlan)
def _addMonitorFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None,
- tag=None, **kwargs):
+ tag=None, **kwargs):
"""
Add a monitoring flow entry that adds a special flowentry/counter at the begin or end of a chain.
So this monitoring flowrule exists on top of a previously defined chain rule and uses the same vlan tag/routing.
LOG.debug("call AddMonitorFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
- #check if port is specified (vnf:port)
+ # check if port is specified (vnf:port)
if vnf_src_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
try:
# returns the first found shortest path
# if all shortest paths are wanted, use: all_shortest_paths
- path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
- except:
+ path = nx.shortest_path(
+ self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
+ except BaseException:
LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
vnf_src_name, vnf_dst_name, src_sw, dst_sw))
LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
for e, v in self.DCNetwork_graph.edges():
LOG.debug("%r" % self.DCNetwork_graph[e][v])
- return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+ return "No path could be found between {0} and {1}".format(
+ vnf_src_name, vnf_dst_name)
- LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
+ LOG.info("Path between {0} and {1}: {2}".format(
+ vnf_src_name, vnf_dst_name, path))
current_hop = src_sw
switch_inport_nr = src_sw_inport_nr
cmd = kwargs.get('cmd')
- #iterate through the path to install the flow-entries
- for i in range(0,len(path)):
+ # iterate through the path to install the flow-entries
+ for i in range(0, len(path)):
current_node = self.getNodeByName(current_hop)
- if path.index(current_hop) < len(path)-1:
- next_hop = path[path.index(current_hop)+1]
+ if path.index(current_hop) < len(path) - 1:
+ next_hop = path[path.index(current_hop) + 1]
else:
- #last switch reached
+ # last switch reached
next_hop = vnf_dst_name
next_node = self.getNodeByName(next_hop)
if next_hop == vnf_dst_name:
switch_outport_nr = dst_sw_outport_nr
LOG.info("end node reached: {0}".format(vnf_dst_name))
- elif not isinstance( next_node, OVSSwitch ):
+ elif not isinstance(next_node, OVSSwitch):
LOG.info("Next node: {0} is not a switch".format(next_hop))
return "Next node: {0} is not a switch".format(next_hop)
else:
index_edge_out = 0
switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
-
- # set of entry via ovs-ofctl
- if isinstance( current_node, OVSSwitch ):
+ # set of entry via ovs-ofctl
+ if isinstance(current_node, OVSSwitch):
kwargs['vlan'] = tag
kwargs['path'] = path
kwargs['current_hop'] = current_hop
monitor_placement = kwargs.get('monitor_placement').strip()
# put monitor flow at the dst switch
insert_flow = False
- if monitor_placement == 'tx' and path.index(current_hop) == 0: # first node:
+ # first node:
+ if monitor_placement == 'tx' and path.index(current_hop) == 0:
insert_flow = True
# put monitoring flow at the src switch
- elif monitor_placement == 'rx' and path.index(current_hop) == len(path) - 1: # last node:
+ # last node:
+ elif monitor_placement == 'rx' and path.index(current_hop) == len(path) - 1:
insert_flow = True
elif monitor_placement not in ['rx', 'tx']:
- LOG.exception('invalid monitor command: {0}'.format(monitor_placement))
-
+ LOG.exception(
+ 'invalid monitor command: {0}'.format(monitor_placement))
if self.controller == RemoteController and insert_flow:
- ## set flow entry via ryu rest api
- self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+ # set flow entry via ryu rest api
+ self._set_flow_entry_ryu_rest(
+ current_node, switch_inport_nr, switch_outport_nr, **kwargs)
break
elif insert_flow:
- ## set flow entry via ovs-ofctl
- self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+ # set flow entry via ovs-ofctl
+ self._set_flow_entry_dpctl(
+ current_node, switch_inport_nr, switch_outport_nr, **kwargs)
break
# take first link between switches by default
- if isinstance( next_node, OVSSwitch ):
+ if isinstance(next_node, OVSSwitch):
switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
current_hop = next_hop
- return "path {2} between {0} and {1}".format(vnf_src_name, vnf_dst_name, cmd)
-
+ return "path {2} between {0} and {1}".format(
+ vnf_src_name, vnf_dst_name, cmd)
- def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
+ def setChain(self, vnf_src_name, vnf_dst_name,
+ vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
"""
Chain 2 vnf interfaces together by installing the flowrules in the switches along their path.
Currently the path is found using the default networkx shortest path function.
# check if chain already exists
found_chains = [chain_dict for chain_dict in self.installed_chains if
- (chain_dict['vnf_src_name'] == vnf_src_name and chain_dict['vnf_src_interface'] == vnf_src_interface
- and chain_dict['vnf_dst_name'] == vnf_dst_name and chain_dict['vnf_dst_interface'] == vnf_dst_interface)]
+ (chain_dict['vnf_src_name'] == vnf_src_name and
+ chain_dict['vnf_src_interface'] == vnf_src_interface and
+ chain_dict['vnf_dst_name'] == vnf_dst_name and
+ chain_dict['vnf_dst_interface'] == vnf_dst_interface)]
if len(found_chains) > 0:
# this chain exists, so need an extra monitoring flow
# assume only 1 chain per vnf/interface pair
LOG.debug('*** installing monitoring chain on top of pre-defined chain from {0}:{1} -> {2}:{3}'.
- format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
+ format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
tag = found_chains[0]['tag']
ret = self._addMonitorFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface,
- tag=tag, table_id=0, **kwargs)
+ tag=tag, table_id=0, **kwargs)
return ret
else:
# no chain existing (or E-LAN) -> install normal chain
format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
pass
-
cmd = kwargs.get('cmd', 'add-flow')
if cmd == 'add-flow' or cmd == 'del-flows':
- ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
+ ret = self._chainAddFlow(
+ vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
if kwargs.get('bidirectional'):
if kwargs.get('path') is not None:
kwargs['path'] = list(reversed(kwargs.get('path')))
- ret = ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
+ ret = ret + '\n' + \
+ self._chainAddFlow(
+ vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
else:
ret = "Command unknown"
return ret
-
- def _chainAddFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
+ def _chainAddFlow(self, vnf_src_name, vnf_dst_name,
+ vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
src_sw = None
src_sw_inport_nr = 0
LOG.debug("call chainAddFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
- #check if port is specified (vnf:port)
+ # check if port is specified (vnf:port)
if vnf_src_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
try:
# returns the first found shortest path
# if all shortest paths are wanted, use: all_shortest_paths
- path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
- except:
+ path = nx.shortest_path(
+ self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
+ except BaseException:
LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
vnf_src_name, vnf_dst_name, src_sw, dst_sw))
LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
for e, v in self.DCNetwork_graph.edges():
LOG.debug("%r" % self.DCNetwork_graph[e][v])
- return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+ return "No path could be found between {0} and {1}".format(
+ vnf_src_name, vnf_dst_name)
- LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
+ LOG.info("Path between {0} and {1}: {2}".format(
+ vnf_src_name, vnf_dst_name, path))
current_hop = src_sw
switch_inport_nr = src_sw_inport_nr
chain_dict['tag'] = vlan
self.installed_chains.append(chain_dict)
- #iterate through the path to install the flow-entries
- for i in range(0,len(path)):
+ # iterate through the path to install the flow-entries
+ for i in range(0, len(path)):
current_node = self.getNodeByName(current_hop)
if i < len(path) - 1:
if next_hop == vnf_dst_name:
switch_outport_nr = dst_sw_outport_nr
LOG.info("end node reached: {0}".format(vnf_dst_name))
- elif not isinstance( next_node, OVSSwitch ):
+ elif not isinstance(next_node, OVSSwitch):
LOG.info("Next node: {0} is not a switch".format(next_hop))
return "Next node: {0} is not a switch".format(next_hop)
else:
index_edge_out = 0
switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
-
- # set OpenFlow entry
- if isinstance( current_node, OVSSwitch ):
+ # set OpenFlow entry
+ if isinstance(current_node, OVSSwitch):
kwargs['vlan'] = vlan
kwargs['path'] = path
kwargs['current_hop'] = current_hop
kwargs['pathindex'] = i
if self.controller == RemoteController:
- ## set flow entry via ryu rest api
- self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+ # set flow entry via ryu rest api
+ self._set_flow_entry_ryu_rest(
+ current_node, switch_inport_nr, switch_outport_nr, **kwargs)
else:
- ## set flow entry via ovs-ofctl
- self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+ # set flow entry via ovs-ofctl
+ self._set_flow_entry_dpctl(
+ current_node, switch_inport_nr, switch_outport_nr, **kwargs)
# take first link between switches by default
- if isinstance( next_node, OVSSwitch ):
+ if isinstance(next_node, OVSSwitch):
switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
current_hop = next_hop
flow_options = {
- 'priority':kwargs.get('priority', DEFAULT_PRIORITY),
- 'cookie':kwargs.get('cookie', DEFAULT_COOKIE),
- 'vlan':kwargs['vlan'],
- 'path':kwargs['path'],
- 'match_input':kwargs.get('match')
+ 'priority': kwargs.get('priority', DEFAULT_PRIORITY),
+ 'cookie': kwargs.get('cookie', DEFAULT_COOKIE),
+ 'vlan': kwargs['vlan'],
+ 'path': kwargs['path'],
+ 'match_input': kwargs.get('match')
}
flow_options_str = json.dumps(flow_options, indent=1)
- return "success: {2} between {0} and {1} with options: {3}".format(vnf_src_name, vnf_dst_name, cmd, flow_options_str)
+ return "success: {2} between {0} and {1} with options: {3}".format(
+ vnf_src_name, vnf_dst_name, cmd, flow_options_str)
- def _set_flow_entry_ryu_rest(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
+ def _set_flow_entry_ryu_rest(
+ self, node, switch_inport_nr, switch_outport_nr, **kwargs):
match = 'in_port=%s' % switch_inport_nr
cookie = kwargs.get('cookie')
# http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#add-a-flow-entry
if cmd == 'add-flow':
prefix = 'stats/flowentry/add'
- if vlan != None:
+ if vlan is not None:
if index == 0: # first node
# set vlan tag in ovs instance (to isolate E-LANs)
if not skip_vlan_tag:
# set vlan push action if more than 1 switch in the path
if len(path) > 1:
action = {}
- action['type'] = 'PUSH_VLAN' # Push a new VLAN tag if a input frame is non-VLAN-tagged
- action['ethertype'] = 33024 # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame
+ # Push a new VLAN tag if a input frame is
+ # non-VLAN-tagged
+ action['type'] = 'PUSH_VLAN'
+ # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged
+ # frame
+ action['ethertype'] = 33024
flow['actions'].append(action)
action = {}
action['type'] = 'SET_FIELD'
if cookie:
# TODO: add cookie_mask as argument
- flow['cookie_mask'] = int('0xffffffffffffffff', 16) # need full mask to match complete cookie
+ # need full mask to match complete cookie
+ flow['cookie_mask'] = int('0xffffffffffffffff', 16)
action = {}
action['type'] = 'OUTPUT'
self.ryu_REST(prefix, data=flow)
def _set_vlan_tag(self, node, switch_port, tag):
- node.vsctl('set', 'port {0} tag={1}'.format(switch_port,tag))
- LOG.debug("set vlan in switch: {0} in_port: {1} vlan tag: {2}".format(node.name, switch_port, tag))
+ node.vsctl('set', 'port {0} tag={1}'.format(switch_port, tag))
+ LOG.debug("set vlan in switch: {0} in_port: {1} vlan tag: {2}".format(
+ node.name, switch_port, tag))
- def _set_flow_entry_dpctl(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
+ def _set_flow_entry_dpctl(
+ self, node, switch_inport_nr, switch_outport_nr, **kwargs):
match = 'in_port=%s' % switch_inport_nr
match = s.join([match, match_input])
if cmd == 'add-flow':
action = 'action=%s' % switch_outport_nr
- if vlan != None:
- if index == 0: # first node
- action = ('action=mod_vlan_vid:%s' % vlan) + (',output=%s' % switch_outport_nr)
+ if vlan is not None:
+ if index == 0: # first node
+ action = ('action=mod_vlan_vid:%s' % vlan) + \
+ (',output=%s' % switch_outport_nr)
match = '-O OpenFlow13 ' + match
elif index == len(path) - 1: # last node
match += ',dl_vlan=%s' % vlan
node.dpctl(cmd, ofcmd)
LOG.info("{3} in switch: {0} in_port: {1} out_port: {2}".format(node.name, switch_inport_nr,
- switch_outport_nr, cmd))
+ switch_outport_nr, cmd))
# start Ryu Openflow controller as Remote Controller for the DCNetwork
def startRyu(self, learning_switch=True):
# start Ryu controller with rest-API
python_install_path = site.getsitepackages()[0]
# ryu default learning switch
- #ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
- #custom learning switch that installs a default NORMAL action in the ovs switches
+ # ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
+ # custom learning switch that installs a default NORMAL action in the
+ # ovs switches
dir_path = os.path.dirname(os.path.realpath(__file__))
ryu_path = dir_path + '/son_emu_simple_switch_13.py'
ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
ryu_cmd = 'ryu-manager'
FNULL = open("/tmp/ryu.log", 'w')
if learning_switch:
- self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+ self.ryu_process = Popen(
+ [ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
LOG.debug('starting ryu-controller with {0}'.format(ryu_path))
LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
else:
# no learning switch, but with rest api
- self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+ self.ryu_process = Popen(
+ [ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
time.sleep(1)
else:
req = self.RyuSession.get(url)
-
# do extra logging if status code is not 200 (OK)
if req.status_code is not requests.codes.ok:
logging.info(
req.encoding, req.text,
req.headers, req.history))
LOG.info('url: {0}'.format(str(url)))
- if data: LOG.info('POST: {0}'.format(str(data)))
- LOG.info('status: {0} reason: {1}'.format(req.status_code, req.reason))
-
+ if data:
+ LOG.info('POST: {0}'.format(str(data)))
+ LOG.info('status: {0} reason: {1}'.format(
+ req.status_code, req.reason))
if 'json' in req.headers['content-type']:
ret = req.json()
ret = req.text.rstrip()
return ret
-
# need to respect that some match fields must be integers
# http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#description-of-match-and-actions
+
def _parse_match(self, match):
matches = match.split(',')
dict = {}
if len(match) == 2:
try:
m2 = int(match[1], 0)
- except:
+ except BaseException:
m2 = match[1]
- dict.update({match[0]:m2})
+ dict.update({match[0]: m2})
return dict
- def find_connected_dc_interface(self, vnf_src_name, vnf_src_interface=None):
+ def find_connected_dc_interface(
+ self, vnf_src_name, vnf_src_interface=None):
if vnf_src_interface is None:
# take first interface by default
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == vnf_src_interface or
- link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
+ link_dict[link]['src_port_name'] == vnf_src_interface):
+ # Fix: we might also get interface names, e.g, from a son-emu-cli call
# found the right link and connected switch
- src_sw = connected_sw
- src_sw_inport_nr = link_dict[link]['dst_port_nr']
src_sw_inport_name = link_dict[link]['dst_port_name']
return src_sw_inport_name
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from mininet.node import Docker, OVSBridge
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+from mininet.node import Docker
from mininet.link import Link
from emuvim.dcemulator.resourcemodel import NotEnoughResourcesAvailable
import logging
DCDPID_BASE = 1000 # start of switch dpid's used for data center switches
EXTSAPDPID_BASE = 2000 # start of switch dpid's used for external SAP switches
+
class EmulatorCompute(Docker):
"""
Emulator specific compute node class.
self, name, dimage, **kwargs):
self.datacenter = kwargs.get("datacenter") # pointer to current DC
self.flavor_name = kwargs.get("flavor_name")
- LOG.debug("Starting compute instance %r in data center %r" % (name, str(self.datacenter)))
+ LOG.debug("Starting compute instance %r in data center %r" %
+ (name, str(self.datacenter)))
# call original Docker.__init__
Docker.__init__(self, name, dimage, **kwargs)
for i in self.intfList():
vnf_name = self.name
vnf_interface = str(i)
- dc_port_name = self.datacenter.net.find_connected_dc_interface(vnf_name, vnf_interface)
+ dc_port_name = self.datacenter.net.find_connected_dc_interface(
+ vnf_name, vnf_interface)
# format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
- intf_dict = {'intf_name': str(i), 'ip': "{0}/{1}".format(i.IP(), i.prefixLen), 'netmask': i.prefixLen, 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
+ intf_dict = {'intf_name': str(i), 'ip': "{0}/{1}".format(i.IP(), i.prefixLen), 'netmask': i.prefixLen,
+ 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
networkStatusList.append(intf_dict)
return networkStatusList
status["state"] = self.dcli.inspect_container(self.dc)["State"]
status["id"] = self.dcli.inspect_container(self.dc)["Id"]
status["short_id"] = self.dcli.inspect_container(self.dc)["Id"][:12]
- status["hostname"] = self.dcli.inspect_container(self.dc)["Config"]['Hostname']
+ status["hostname"] = self.dcli.inspect_container(self.dc)[
+ "Config"]['Hostname']
status["datacenter"] = (None if self.datacenter is None
else self.datacenter.label)
self.net = self.datacenter.net
self.name = sap_name
- LOG.debug("Starting ext SAP instance %r in data center %r" % (sap_name, str(self.datacenter)))
+ LOG.debug("Starting ext SAP instance %r in data center %r" %
+ (sap_name, str(self.datacenter)))
# create SAP as separate OVS switch with an assigned ip address
self.ip = str(sap_net[1]) + '/' + str(sap_net.prefixlen)
self.subnet = sap_net
# allow connection to the external internet through the host
params = dict(NAT=True)
- self.switch = self.net.addExtSAP(sap_name, self.ip, dpid=hex(self._get_next_extSAP_dpid())[2:], **params)
+ self.switch = self.net.addExtSAP(sap_name, self.ip, dpid=hex(
+ self._get_next_extSAP_dpid())[2:], **params)
self.switch.start()
def _get_next_extSAP_dpid(self):
vnf_interface = str(i)
if vnf_interface == 'lo':
continue
- dc_port_name = self.datacenter.net.find_connected_dc_interface(vnf_name, vnf_interface)
+ dc_port_name = self.datacenter.net.find_connected_dc_interface(
+ vnf_name, vnf_interface)
# format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
- intf_dict = {'intf_name': str(i), 'ip': self.ip, 'netmask': i.prefixLen, 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
+ intf_dict = {'intf_name': str(i), 'ip': self.ip, 'netmask': i.prefixLen, 'mac': i.MAC(
+ ), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
networkStatusList.append(intf_dict)
return networkStatusList
"network": self.getNetworkStatus()
}
+
class Datacenter(object):
"""
Represents a logical data center to which compute resources
self.label = label
# dict to store arbitrary metadata (e.g. latitude and longitude)
self.metadata = metadata
- # path to which resource information should be logged (e.g. for experiments). None = no logging
+ # path to which resource information should be logged (e.g. for
+ # experiments). None = no logging
self.resource_log_path = resource_log_path
# first prototype assumes one "bigswitch" per DC
self.switch = None
def start(self):
pass
- def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny", properties=dict(), **params):
+ def startCompute(self, name, image=None, command=None, network=None,
+ flavor_name="tiny", properties=dict(), **params):
"""
Create a new container as compute resource and connect it to this
data center.
if network is None:
network = {} # {"ip": "10.0.0.254/8"}
if isinstance(network, dict):
- network = [network] # if we have only one network, put it in a list
+ # if we have only one network, put it in a list
+ network = [network]
if isinstance(network, list):
if len(network) < 1:
network.append({})
dcmd=command,
datacenter=self,
flavor_name=flavor_name,
- environment = env,
+ environment=env,
**params
)
-
-
# apply resource limits to container if a resource model is defined
if self._resource_model is not None:
try:
self._resource_model.allocate(d)
- self._resource_model.write_allocation_log(d, self.resource_log_path)
+ self._resource_model.write_allocation_log(
+ d, self.resource_log_path)
except NotEnoughResourcesAvailable as ex:
- LOG.warning("Allocation of container %r was blocked by resource model." % name)
+ LOG.warning(
+ "Allocation of container %r was blocked by resource model." % name)
LOG.info(ex.message)
# ensure that we remove the container
self.net.removeDocker(name)
# if no --net option is given, network = [{}], so 1 empty dict in the list
# this results in 1 default interface with a default ip address
for nw in network:
- # clean up network configuration (e.g. RTNETLINK does not allow ':' in intf names
+ # clean up network configuration (e.g. RTNETLINK does not allow ':'
+ # in intf names
if nw.get("id") is not None:
nw["id"] = self._clean_ifname(nw["id"])
- # TODO we cannot use TCLink here (see: https://github.com/mpeuster/containernet/issues/3)
- self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id'))
+ # TODO we cannot use TCLink here (see:
+ # https://github.com/mpeuster/containernet/issues/3)
+ self.net.addLink(d, self.switch, params1=nw,
+ cls=Link, intfName1=nw.get('id'))
# do bookkeeping
self.containers[name] = d
assert name is not None
if name not in self.containers:
raise Exception("Container with name %s not found." % name)
- LOG.debug("Stopping compute instance %r in data center %r" % (name, str(self)))
+ LOG.debug("Stopping compute instance %r in data center %r" %
+ (name, str(self)))
# stop the monitored metrics
if self.net.monitor_agent is not None:
# call resource model and free resources
if self._resource_model is not None:
self._resource_model.free(self.containers[name])
- self._resource_model.write_free_log(self.containers[name], self.resource_log_path)
+ self._resource_model.write_free_log(
+ self.containers[name], self.resource_log_path)
# remove links
self.net.removeLink(
def removeExternalSAP(self, sap_name):
sap_switch = self.extSAPs[sap_name].switch
- #sap_switch = self.net.getNodeByName(sap_name)
+ # sap_switch = self.net.getNodeByName(sap_name)
# remove link of SAP to the DC switch
self.net.removeLink(link=None, node1=sap_switch, node2=self.switch)
self.net.removeExtSAP(sap_name)
"switch": self.switch.name,
"n_running_containers": len(self.containers),
"metadata": self.metadata,
- "vnf_list" : container_list,
- "ext SAP list" : ext_saplist
+ "vnf_list": container_list,
+ "ext SAP list": ext_saplist
}
def assignResourceModel(self, rm):
:return:
"""
if self._resource_model is not None:
- raise Exception("There is already an resource model assigned to this DC.")
+ raise Exception(
+ "There is already an resource model assigned to this DC.")
self._resource_model = rm
self.net.rm_registrar.register(self, rm)
LOG.info("Assigned RM: %r to DC: %r" % (rm, self))
name = name.replace(".", "-")
name = name.replace("_", "-")
return name
-
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-Base classes needed for resource models support.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
LOG = logging.getLogger("resourcemodel")
LOG.setLevel(logging.DEBUG)
:return: None
"""
if dc in self._resource_models:
- raise Exception("There is already an resource model assigned to this DC.")
+ raise Exception(
+ "There is already an resource model assigned to this DC.")
self._resource_models[dc] = rm
rm.registrar = self
rm.dcs.append(dc)
Total number of data centers that are connected to a resource model
:return:
"""
- return sum([len(rm.dcs) for rm in list(self._resource_models.itervalues())])
+ return sum([len(rm.dcs)
+ for rm in list(self._resource_models.itervalues())])
class ResourceFlavor(object):
Simple class that represents resource flavors (c.f. OpenStack).
Can contain arbitrary metrics.
"""
+
def __init__(self, name, metrics):
self.name = name
self._metrics = metrics
initialize some default flavours (naming/sizes inspired by OpenStack)
"""
self.addFlavour(ResourceFlavor(
- "tiny", {"compute": 0.5, "memory": 32, "disk": 1}))
+ "tiny", {"compute": 0.5, "memory": 32, "disk": 1}))
self.addFlavour(ResourceFlavor(
- "small", {"compute": 1.0, "memory": 128, "disk": 20}))
+ "small", {"compute": 1.0, "memory": 128, "disk": 20}))
self.addFlavour(ResourceFlavor(
- "medium", {"compute": 4.0, "memory": 256, "disk": 40}))
+ "medium", {"compute": 4.0, "memory": 256, "disk": 40}))
self.addFlavour(ResourceFlavor(
- "large", {"compute": 8.0, "memory": 512, "disk": 80}))
+ "large", {"compute": 8.0, "memory": 512, "disk": 80}))
self.addFlavour(ResourceFlavor(
- "xlarge", {"compute": 16.0, "memory": 1024, "disk": 160}))
+ "xlarge", {"compute": 16.0, "memory": 1024, "disk": 160}))
def addFlavour(self, fl):
"""
This method has to be overwritten by a real resource model.
:param d: Container object
"""
- LOG.warning("Allocating in BaseResourceModel: %r with flavor: %r" % (d.name, d.flavor_name))
+ LOG.warning("Allocating in BaseResourceModel: %r with flavor: %r" % (
+ d.name, d.flavor_name))
self._allocated_compute_instances[d.name] = d.flavor_name
def free(self, d):
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-Playground for resource models created by University of Paderborn.
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import time
import json
import logging
fl_cu = self._get_flavor(d).get("compute")
# check for over provisioning
if self.dc_alloc_cu + fl_cu > self.dc_max_cu and self.raise_no_cpu_resources_left:
- raise NotEnoughResourcesAvailable("Not enough compute resources left.")
+ raise NotEnoughResourcesAvailable(
+ "Not enough compute resources left.")
self.dc_alloc_cu += fl_cu
def _allocate_mem(self, d):
fl_mu = self._get_flavor(d).get("memory")
# check for over provisioning
if self.dc_alloc_mu + fl_mu > self.dc_max_mu and self.raise_no_mem_resources_left:
- raise NotEnoughResourcesAvailable("Not enough memory resources left.")
+ raise NotEnoughResourcesAvailable(
+ "Not enough memory resources left.")
self.dc_alloc_mu += fl_mu
def free(self, d):
# calculate cpu time fraction for container with given flavor
cpu_time_percentage = self.single_cu * number_cu
# calculate input values for CFS scheduler bandwidth limitation
- cpu_period, cpu_quota = self._calculate_cpu_cfs_values(cpu_time_percentage)
+ cpu_period, cpu_quota = self._calculate_cpu_cfs_values(
+ cpu_time_percentage)
# apply limits to container if changed
if d.resources['cpu_period'] != cpu_period or d.resources['cpu_quota'] != cpu_quota:
LOG.debug("Setting CPU limit for %r: cpu_quota = cpu_period * limit = %f * %f = %f (op_factor=%f)" % (
d.name, cpu_period, cpu_time_percentage, cpu_quota, self.cpu_op_factor))
- d.updateCpuLimit(cpu_period=int(cpu_period), cpu_quota=int(cpu_quota))
+ d.updateCpuLimit(cpu_period=int(cpu_period),
+ cpu_quota=int(cpu_quota))
def _compute_single_cu(self):
"""
# get cpu time fraction for entire emulation
e_cpu = self.registrar.e_cpu
# calculate
- return float(e_cpu) / sum([rm.dc_max_cu for rm in list(self.registrar.resource_models)])
+ return float(
+ e_cpu) / sum([rm.dc_max_cu for rm in list(self.registrar.resource_models)])
def _calculate_cpu_cfs_values(self, cpu_time_percentage):
"""
# (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
# Attention minimum cpu_quota is 1ms (micro)
cpu_period = CPU_PERIOD # lets consider a fixed period of 1000000 microseconds for now
- cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
- # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
+ # calculate the fraction of cpu time for this container
+ cpu_quota = cpu_period * cpu_time_percentage
+ # ATTENTION >= 1000 to avoid a invalid argument system error ... no
+ # idea why
if cpu_quota < 1000:
cpu_quota = 1000
LOG.warning("Increased CPU quota to avoid system error.")
# get memory amount for entire emulation
e_mem = self.registrar.e_mem
# calculate amount of memory for a single mu
- self.single_mu = float(e_mem) / sum([rm.dc_max_mu for rm in list(self.registrar.resource_models)])
+ self.single_mu = float(
+ e_mem) / sum([rm.dc_max_mu for rm in list(self.registrar.resource_models)])
# calculate mem for given flavor
mem_limit = self.single_mu * number_mu
mem_limit = self._calculate_mem_limit_value(mem_limit)
# apply to container if changed
if d.resources['mem_limit'] != mem_limit:
LOG.debug("Setting MEM limit for %r: mem_limit = %f MB (op_factor=%f)" %
- (d.name, mem_limit/1024/1024, self.mem_op_factor))
+ (d.name, mem_limit / 1024 / 1024, self.mem_op_factor))
d.updateMemoryLimit(mem_limit=mem_limit)
def _calculate_mem_limit_value(self, mem_limit):
mem_limit = 4
LOG.warning("Increased MEM limit because it was less than 4.0 MB.")
# to byte!
- return int(mem_limit*1024*1024)
+ return int(mem_limit * 1024 * 1024)
def get_state_dict(self):
"""
if path is None:
return
# we have a path: write out RM info
- l = dict()
- l["t"] = time.time()
- l["container_state"] = d.getStatus()
- l["action"] = action
- l["rm_state"] = self.get_state_dict()
+ logd = dict()
+ logd["t"] = time.time()
+ logd["container_state"] = d.getStatus()
+ logd["action"] = action
+ logd["rm_state"] = self.get_state_dict()
# append to logfile
with open(path, "a") as f:
- f.write("%s\n" % json.dumps(l))
+ f.write("%s\n" % json.dumps(logd))
class UpbOverprovisioningCloudDcRM(UpbSimpleCloudDcRM):
containers whenever a data-center is over provisioned.
"""
# TODO add parts for memory
+
def __init__(self, *args, **kvargs):
super(UpbOverprovisioningCloudDcRM, self).__init__(*args, **kvargs)
self.raise_no_cpu_resources_left = False
# get cpu time fraction for entire emulation
e_cpu = self.registrar.e_cpu
# calculate over provisioning scale factor
- self.cpu_op_factor = float(self.dc_max_cu) / (max(self.dc_max_cu, self.dc_alloc_cu))
+ self.cpu_op_factor = float(self.dc_max_cu) / \
+ (max(self.dc_max_cu, self.dc_alloc_cu))
# calculate
- return float(e_cpu) / sum([rm.dc_max_cu for rm in list(self.registrar.resource_models)]) * self.cpu_op_factor
+ return float(e_cpu) / sum([rm.dc_max_cu for rm in list(
+ self.registrar.resource_models)]) * self.cpu_op_factor
class UpbDummyRM(UpbSimpleCloudDcRM):
"""
No limits. But log allocations.
"""
+
def __init__(self, *args, **kvargs):
super(UpbDummyRM, self).__init__(*args, **kvargs)
self.raise_no_cpu_resources_left = False
def _apply_limits(self):
# do nothing here
pass
-
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
-from ryu.topology.event import EventSwitchEnter, EventSwitchLeave, EventSwitchReconnected
+from ryu.topology.event import EventSwitchEnter, EventSwitchReconnected
+
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
- #actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
+ # actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
# ofproto.OFPCML_NO_BUFFER)]
actions = [parser.OFPActionOutput(ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
- def add_flow(self, datapath, priority, match, actions, buffer_id=None, table_id=0):
+ def add_flow(self, datapath, priority, match,
+ actions, buffer_id=None, table_id=0):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
actions = [ofp_parser.OFPActionOutput(ofproto_v1_3.OFPP_NORMAL)]
self.add_flow(datapath, 0, None, actions, table_id=0)
-
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
from mininet.log import setLogLevel
from emuvim.dcemulator.net import DCNetwork
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
from mininet.log import setLogLevel
from emuvim.dcemulator.net import DCNetwork
-"""
-Copyright (c) 2017 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
import time
import signal
logging.getLogger('api.openstack.glance').setLevel(logging.DEBUG)
logging.getLogger('api.openstack.helper').setLevel(logging.DEBUG)
+
class DaemonTopology(object):
"""
Topology with two datacenters:
- dc1 <-- 50ms --> dc2
+ dc1 <-- 50ms --> dc2
"""
def __init__(self):
def main():
- t = DaemonTopology()
+ DaemonTopology()
if __name__ == '__main__':
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Helper module that implements helpers for test implementations.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import unittest
import os
import subprocess
from mininet.clean import cleanup
from mininet.node import Controller
+
class SimpleTestTopology(unittest.TestCase):
"""
Helper class to do basic test setups.
# add some switches
# start from s1 because ovs does not like to have dpid = 0
# and switch name-number is being used by mininet to set the dpid
- for i in range(1, nswitches+1):
+ for i in range(1, nswitches + 1):
self.s.append(self.net.addSwitch('s%d' % i))
# if specified, chain all switches
if autolinkswitches:
self.h.append(self.net.addHost('h%d' % i))
# add some dockers
for i in range(0, ndockers):
- self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
+ self.d.append(self.net.addDocker('d%d' %
+ i, dimage="ubuntu:trusty"))
def startApi(self):
self.api.start()
def stopApi(self):
self.api.stop()
-
+
def startNet(self):
self.net.start()
"""
List the containers managed by containernet
"""
- return self.getDockerCli().containers(filters={"label": "com.containernet"})
+ return self.getDockerCli().containers(
+ filters={"label": "com.containernet"})
@staticmethod
def setUp():
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Helper module that implements helpers for test implementations.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import unittest
import os
import subprocess
from mininet.clean import cleanup
from mininet.node import Controller
+
class ApiBaseOpenStack(unittest.TestCase):
"""
Helper class to do basic test setups.
"""
self.net = DCNetwork(controller=controller, **kwargs)
for i in range(0, ndatacenter):
- self.api.append(OpenstackApiEndpoint("0.0.0.0", 15000+i))
+ self.api.append(OpenstackApiEndpoint("0.0.0.0", 15000 + i))
# add some switches
# start from s1 because ovs does not like to have dpid = 0
# and switch name-number is being used by mininet to set the dpid
- for i in range(1, nswitches+1):
+ for i in range(1, nswitches + 1):
self.s.append(self.net.addSwitch('s%d' % i))
# if specified, chain all switches
if autolinkswitches:
for i in range(0, len(self.s) - 1):
self.net.addLink(self.s[i], self.s[i + 1])
- self.net.addLink(self.s[2], self.s[0]) # link switches s1, s2 and s3
+ # link switches s1, s2 and s3
+ self.net.addLink(self.s[2], self.s[0])
# add some data centers
for i in range(0, ndatacenter):
self.net.addDatacenter(
'dc%d' % i,
metadata={"unittest_dc": i}))
- self.net.addLink(self.dc[0].switch, self.s[0]) # link switches dc0.s1 with s1
+ # link switches dc0.s1 with s1
+ self.net.addLink(self.dc[0].switch, self.s[0])
# connect data centers to the endpoint
for i in range(0, ndatacenter):
self.api[i].connect_datacenter(self.dc[i])
self.h.append(self.net.addHost('h%d' % i))
# add some dockers
for i in range(0, ndockers):
- self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
+ self.d.append(self.net.addDocker('d%d' %
+ i, dimage="ubuntu:trusty"))
def startApi(self):
for i in self.api:
"""
List the containers managed by containernet
"""
- return self.getDockerCli().containers(filters={"label": "com.containernet"})
+ return self.getDockerCli().containers(
+ filters={"label": "com.containernet"})
@staticmethod
def setUp():
pass
-
def tearDown(self):
time.sleep(2)
print('->>>>>>> tear everything down ->>>>>>>>>>>>>>>')
- self.stopApi() # stop all flask threads
- self.stopNet() # stop some mininet and containernet stuff
+ self.stopApi() # stop all flask threads
+ self.stopNet() # stop some mininet and containernet stuff
cleanup()
# make sure that all pending docker containers are killed
- with open(os.devnull, 'w') as devnull: # kill a possibly running docker process that blocks the open ports
+ # kill a possibly running docker process that blocks the open ports
+ with open(os.devnull, 'w') as devnull:
subprocess.call("kill $(netstat -npl | grep '15000' | grep -o -e'[0-9]\+/docker' | grep -o -e '[0-9]\+')",
- stdout=devnull,
- stderr=devnull,
- shell=True)
+ stdout=devnull,
+ stderr=devnull,
+ shell=True)
with open(os.devnull, 'w') as devnull:
subprocess.call(
stderr=devnull,
shell=True)
time.sleep(2)
-
-
-
-
-
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Helper module that implements helpers for test implementations.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import unittest
import os
import subprocess
from mininet.clean import cleanup
from mininet.node import Controller
+
class SimpleTestTopology(unittest.TestCase):
"""
Helper class to do basic test setups.
# add some switches
# start from s1 because ovs does not like to have dpid = 0
# and switch name-number is being used by mininet to set the dpid
- for i in range(1, nswitches+1):
+ for i in range(1, nswitches + 1):
self.s.append(self.net.addSwitch('s%d' % i))
# if specified, chain all switches
if autolinkswitches:
self.h.append(self.net.addHost('h%d' % i))
# add some dockers
for i in range(0, ndockers):
- self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu:trusty"))
+ self.d.append(self.net.addDocker('d%d' %
+ i, dimage="ubuntu:trusty"))
def startNet(self):
self.net.start()
"""
List the containers managed by containernet
"""
- return self.getDockerCli().containers(filters={"label": "com.containernet"})
+ return self.getDockerCli().containers(
+ filters={"label": "com.containernet"})
@staticmethod
def setUp():
"sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)",
stdout=devnull,
stderr=devnull,
- shell=True)
\ No newline at end of file
+ shell=True)
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-Test suite to automatically test emulator functionalities.
-Directly interacts with the emulator through the Mininet-like
-Python API.
-
-Does not test API endpoints. This is done in separated test suites.
-"""
-
-import time
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import unittest
from emuvim.dcemulator.node import EmulatorCompute
from emuvim.test.base import SimpleTestTopology
from mininet.node import RemoteController
-#@unittest.skip("disabled topology tests for development")
-class testEmulatorTopology( SimpleTestTopology ):
+# @unittest.skip("disabled topology tests for development")
+class testEmulatorTopology(SimpleTestTopology):
"""
Tests to check the topology API of the emulator.
"""
# stop Mininet network
self.stopNet()
- #@unittest.skip("disabled to test if CI fails because this is the first test.")
+ # @unittest.skip("disabled to test if CI fails because this is the first test.")
def testMultipleDatacenterDirect(self):
"""
Create a two data centers and interconnect them.
# stop Mininet network
self.stopNet()
-class testEmulatorNetworking( SimpleTestTopology ):
+
+class testEmulatorNetworking(SimpleTestTopology):
def testSDNChainingSingleService_withLearning(self):
"""
self.startNet()
# add compute resources
- vnf1 = self.dc[0].startCompute("vnf1", network=[{'id':'intf1', 'ip':'10.0.10.1/24'}])
- vnf2 = self.dc[1].startCompute("vnf2", network=[{'id':'intf2', 'ip':'10.0.10.2/24'}])
+ vnf1 = self.dc[0].startCompute(
+ "vnf1", network=[{'id': 'intf1', 'ip': '10.0.10.1/24'}])
+ vnf2 = self.dc[1].startCompute(
+ "vnf2", network=[{'id': 'intf2', 'ip': '10.0.10.2/24'}])
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 2)
self.assertTrue(len(self.net.hosts) == 2)
# should be connected because learning = True
self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
# setup links
- self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2', bidirectional=True, cmd='add-flow')
+ self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
+ bidirectional=True, cmd='add-flow')
# should still be connected
self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
# stop Mininet network
self.startNet()
# add compute resources
- vnf1 = self.dc[0].startCompute("vnf1", network=[{'id':'intf1', 'ip':'10.0.10.1/24'}])
- vnf2 = self.dc[1].startCompute("vnf2", network=[{'id':'intf2', 'ip':'10.0.10.2/24'}])
+ vnf1 = self.dc[0].startCompute(
+ "vnf1", network=[{'id': 'intf1', 'ip': '10.0.10.1/24'}])
+ vnf2 = self.dc[1].startCompute(
+ "vnf2", network=[{'id': 'intf2', 'ip': '10.0.10.2/24'}])
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 2)
self.assertTrue(len(self.net.hosts) == 2)
# should be not not yet connected
self.assertTrue(self.net.ping([vnf1, vnf2]) > 0.0)
# setup links
- self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2', bidirectional=True, cmd='add-flow')
+ self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
+ bidirectional=True, cmd='add-flow')
# check connectivity by using ping
self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
# stop Mininet network
# start Mininet network
self.startNet()
- ## First Service
+ # First Service
# add compute resources
- vnf1 = self.dc[0].startCompute("vnf1", network=[{'id': 'intf1', 'ip': '10.0.10.1/24'}])
- vnf2 = self.dc[1].startCompute("vnf2", network=[{'id': 'intf2', 'ip': '10.0.10.2/24'}])
+ vnf1 = self.dc[0].startCompute(
+ "vnf1", network=[{'id': 'intf1', 'ip': '10.0.10.1/24'}])
+ vnf2 = self.dc[1].startCompute(
+ "vnf2", network=[{'id': 'intf2', 'ip': '10.0.10.2/24'}])
# setup links
- self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2', bidirectional=True, cmd='add-flow', cookie=1)
+ self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
+ bidirectional=True, cmd='add-flow', cookie=1)
# check connectivity by using ping
self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
- ## Second Service
+ # Second Service
# add compute resources
- vnf11 = self.dc[0].startCompute("vnf11", network=[{'id': 'intf1', 'ip': '10.0.20.1/24'}])
- vnf22 = self.dc[1].startCompute("vnf22", network=[{'id': 'intf2', 'ip': '10.0.20.2/24'}])
+ vnf11 = self.dc[0].startCompute(
+ "vnf11", network=[{'id': 'intf1', 'ip': '10.0.20.1/24'}])
+ vnf22 = self.dc[1].startCompute(
+ "vnf22", network=[{'id': 'intf2', 'ip': '10.0.20.2/24'}])
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 4)
self.assertTrue(len(self.net.switches) == 5)
# setup links
- self.net.setChain('vnf11', 'vnf22', 'intf1', 'intf2', bidirectional=True, cmd='add-flow', cookie=2)
+ self.net.setChain('vnf11', 'vnf22', 'intf1', 'intf2',
+ bidirectional=True, cmd='add-flow', cookie=2)
# check connectivity by using ping
self.assertTrue(self.net.ping([vnf11, vnf22]) <= 0.0)
# check first service cannot ping second service
self.assertTrue(self.net.ping([vnf2, vnf11]) > 0.0)
# delete the first service chain
- self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2', bidirectional=True, cmd='del-flows', cookie=1)
+ self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2',
+ bidirectional=True, cmd='del-flows', cookie=1)
# check connectivity of first service is down
self.assertTrue(self.net.ping([vnf1, vnf2]) > 0.0)
- #time.sleep(100)
+ # time.sleep(100)
# check connectivity of second service is still up
self.assertTrue(self.net.ping([vnf11, vnf22]) <= 0.0)
# stop Mininet network
self.stopNet()
-#@unittest.skip("disabled compute tests for development")
-class testEmulatorCompute( SimpleTestTopology ):
+# @unittest.skip("disabled compute tests for development")
+
+
+class testEmulatorCompute(SimpleTestTopology):
"""
Tests to check the emulator's API to add and remove
compute resources at runtime.
self.assertTrue(len(self.net.switches) == 1)
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 1)
- self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+ self.assertTrue(isinstance(
+ self.dc[0].listCompute()[0], EmulatorCompute))
self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
# check connectivity by using ping
self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
self.assertTrue(len(self.net.switches) == 1)
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 1)
- self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+ self.assertTrue(isinstance(
+ self.dc[0].listCompute()[0], EmulatorCompute))
self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
# check connectivity by using ping
self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
Test multiple, interleaved add and remove operations and ensure
that always all expected compute instances are reachable.
"""
- # create network
+ # create network
self.createNet(
nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
autolinkswitches=True)
# stop Mininet network
self.stopNet()
+
if __name__ == '__main__':
unittest.main()
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Test suite to automatically test emulator REST API endpoints.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import os
import unittest
import requests
import simplejson as json
import yaml
-import time
from emuvim.test.api_base_openstack import ApiBaseOpenStack
def setUp(self):
# create network
- self.createNet(nswitches=3, ndatacenter=2, nhosts=2, ndockers=0, autolinkswitches=True)
+ self.createNet(nswitches=3, ndatacenter=2, nhosts=2,
+ ndockers=0, autolinkswitches=True)
# setup links
self.net.addLink(self.dc[0], self.h[0])
print(" ")
headers = {'Content-type': 'application/json'}
- test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "templates/test_heatapi_template_create_stack.yml")).read()
+ test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(
+ __file__), "templates/test_heatapi_template_create_stack.yml")).read()
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
requests.post(url, data=json.dumps(yaml.load(test_heatapi_template_create_stack)),
headers=headers)
url = "http://0.0.0.0:18774/"
listapiversionnovaresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionnovaresponse.status_code, 200)
- self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["id"], "v2.1")
- self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["status"], "CURRENT")
- self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["version"], "2.38")
- self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["min_version"], "2.1")
- self.assertEqual(json.loads(listapiversionnovaresponse.content)["versions"][0]["updated"], "2013-07-23T11:33:21Z")
+ self.assertEqual(json.loads(listapiversionnovaresponse.content)[
+ "versions"][0]["id"], "v2.1")
+ self.assertEqual(json.loads(listapiversionnovaresponse.content)[
+ "versions"][0]["status"], "CURRENT")
+ self.assertEqual(json.loads(listapiversionnovaresponse.content)[
+ "versions"][0]["version"], "2.38")
+ self.assertEqual(json.loads(listapiversionnovaresponse.content)[
+ "versions"][0]["min_version"], "2.1")
+ self.assertEqual(json.loads(listapiversionnovaresponse.content)[
+ "versions"][0]["updated"], "2013-07-23T11:33:21Z")
print(" ")
print('->>>>>>> test Nova Version Show ->>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla"
listapiversion21novaresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversion21novaresponse.status_code, 200)
- self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["id"], "v2.1")
- self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["status"], "CURRENT")
- self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["version"], "2.38")
- self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["min_version"], "2.1")
- self.assertEqual(json.loads(listapiversion21novaresponse.content)["version"]["updated"], "2013-07-23T11:33:21Z")
+ self.assertEqual(json.loads(listapiversion21novaresponse.content)[
+ "version"]["id"], "v2.1")
+ self.assertEqual(json.loads(listapiversion21novaresponse.content)[
+ "version"]["status"], "CURRENT")
+ self.assertEqual(json.loads(listapiversion21novaresponse.content)[
+ "version"]["version"], "2.38")
+ self.assertEqual(json.loads(listapiversion21novaresponse.content)[
+ "version"]["min_version"], "2.1")
+ self.assertEqual(json.loads(listapiversion21novaresponse.content)[
+ "version"]["updated"], "2013-07-23T11:33:21Z")
print(" ")
print('->>>>>>> test Nova Version List Server APIs ->>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
listserverapisnovaresponse = requests.get(url, headers=headers)
self.assertEqual(listserverapisnovaresponse.status_code, 200)
- self.assertNotEqual(json.loads(listserverapisnovaresponse.content)["servers"][0]["name"], "")
+ self.assertNotEqual(json.loads(listserverapisnovaresponse.content)[
+ "servers"][0]["name"], "")
print(" ")
print('->>>>>>> test Nova Delete Server APIs ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (json.loads(listserverapisnovaresponse.content)["servers"][0]["id"])
+ url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (
+ json.loads(listserverapisnovaresponse.content)["servers"][0]["id"])
deleteserverapisnovaresponse = requests.delete(url, headers=headers)
self.assertEqual(deleteserverapisnovaresponse.status_code, 204)
print(" ")
self.assertEqual(deleteserverapisnovaresponse.status_code, 404)
print(" ")
-
print('->>>>>>> testNovaVersionListServerAPIs_withPortInformation ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/andPorts"
listserverapisnovaresponse = requests.get(url, headers=headers)
self.assertEqual(listserverapisnovaresponse.status_code, 200)
- self.assertNotEqual(json.loads(listserverapisnovaresponse.content)["servers"][0]["name"], "")
+ self.assertNotEqual(json.loads(listserverapisnovaresponse.content)[
+ "servers"][0]["name"], "")
print(" ")
print('->>>>>>> test Nova List Flavors ->>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors"
listflavorsresponse = requests.get(url, headers=headers)
self.assertEqual(listflavorsresponse.status_code, 200)
- self.assertIn(json.loads(listflavorsresponse.content)["flavors"][0]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
- self.assertIn(json.loads(listflavorsresponse.content)["flavors"][1]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
- self.assertIn(json.loads(listflavorsresponse.content)["flavors"][2]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+ self.assertIn(json.loads(listflavorsresponse.content)["flavors"][0]["name"], [
+ "m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+ self.assertIn(json.loads(listflavorsresponse.content)["flavors"][1]["name"], [
+ "m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+ self.assertIn(json.loads(listflavorsresponse.content)["flavors"][2]["name"], [
+ "m1.nano", "m1.tiny", "m1.micro", "m1.small"])
print(" ")
print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')
data='{"flavor":{"name": "testFlavor", "vcpus": "test_vcpus", "ram": 1024, "disk": 10}}',
headers=headers)
self.assertEqual(addflavorsresponse.status_code, 200)
- self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["id"])
- self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["links"][0]['href'])
+ self.assertIsNotNone(json.loads(
+ addflavorsresponse.content)["flavor"]["id"])
+ self.assertIsNotNone(json.loads(addflavorsresponse.content)[
+ "flavor"]["links"][0]['href'])
print(" ")
print('->>>>>>> test Nova List Flavors Detail ->>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/detail"
listflavorsdetailresponse = requests.get(url, headers=headers)
self.assertEqual(listflavorsdetailresponse.status_code, 200)
- self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][0]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
- self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][1]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
- self.assertIn(json.loads(listflavorsdetailresponse.content)["flavors"][2]["name"],["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+ self.assertIn(json.loads(listflavorsdetailresponse.content)[
+ "flavors"][0]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+ self.assertIn(json.loads(listflavorsdetailresponse.content)[
+ "flavors"][1]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
+ self.assertIn(json.loads(listflavorsdetailresponse.content)[
+ "flavors"][2]["name"], ["m1.nano", "m1.tiny", "m1.micro", "m1.small"])
print(" ")
print('->>>>>>> testNovaAddFlavors ->>>>>>>>>>>>>>>')
data='{"flavor":{"name": "testFlavor", "vcpus": "test_vcpus", "ram": 1024, "disk": 10}}',
headers=headers)
self.assertEqual(addflavorsresponse.status_code, 200)
- self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["id"])
- self.assertIsNotNone(json.loads(addflavorsresponse.content)["flavor"]["links"][0]['href'])
+ self.assertIsNotNone(json.loads(
+ addflavorsresponse.content)["flavor"]["id"])
+ self.assertIsNotNone(json.loads(addflavorsresponse.content)[
+ "flavor"]["links"][0]['href'])
print(" ")
print('->>>>>>> test Nova List Flavor By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/%s" % (json.loads(listflavorsdetailresponse.content)["flavors"][0]["name"])
+ url = "http://0.0.0.0:18774/v2.1/id_bla/flavors/%s" % (
+ json.loads(listflavorsdetailresponse.content)["flavors"][0]["name"])
listflavorsbyidresponse = requests.get(url, headers=headers)
self.assertEqual(listflavorsbyidresponse.status_code, 200)
- self.assertEqual(json.loads(listflavorsbyidresponse.content)["flavor"]["id"], json.loads(listflavorsdetailresponse.content)["flavors"][0]["id"])
+ self.assertEqual(json.loads(listflavorsbyidresponse.content)[
+ "flavor"]["id"], json.loads(listflavorsdetailresponse.content)["flavors"][0]["id"])
print(" ")
print('->>>>>>> test Nova List Images ->>>>>>>>>>>>>>>')
listimagesresponse = requests.get(url, headers=headers)
self.assertEqual(listimagesresponse.status_code, 200)
print(listimagesresponse.content)
- # deactivated: highly depends on the environment in which the tests are executed. one cannot make such an assumption.
- #self.assertIn(json.loads(listimagesresponse.content)["images"][0]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
- #self.assertIn(json.loads(listimagesresponse.content)["images"][1]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
- #self.assertIn(json.loads(listimagesresponse.content)["images"][2]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
print(" ")
print('->>>>>>> test Nova List Images Details ->>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/images/detail"
listimagesdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(listimagesdetailsresponse.status_code, 200)
- # deactivated: highly depends on the environment in which the tests are executed. one cannot make such an assumption.
- #self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][0]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
- #self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][1]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
- #self.assertIn(json.loads(listimagesdetailsresponse.content)["images"][2]["name"],["google/cadvisor:latest", "ubuntu:trusty", "prom/pushgateway:latest"])
- self.assertEqual(json.loads(listimagesdetailsresponse.content)["images"][0]["metadata"]["architecture"],"x86_64")
+ self.assertEqual(json.loads(listimagesdetailsresponse.content)[
+ "images"][0]["metadata"]["architecture"], "x86_64")
print(" ")
print('->>>>>>> test Nova List Image By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:18774/v2.1/id_bla/images/%s" % (json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
+ url = "http://0.0.0.0:18774/v2.1/id_bla/images/%s" % (
+ json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
listimagebyidresponse = requests.get(url, headers=headers)
self.assertEqual(listimagebyidresponse.status_code, 200)
- self.assertEqual(json.loads(listimagebyidresponse.content)["image"]["id"],json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
+ self.assertEqual(json.loads(listimagebyidresponse.content)[
+ "image"]["id"], json.loads(listimagesdetailsresponse.content)["images"][0]["id"])
print(" ")
print('->>>>>>> test Nova List Image By Non-Existend Id ->>>>>>>>>>>>>>>')
self.assertEqual(listimagebynonexistingidresponse.status_code, 404)
print(" ")
- #find ubuntu id
+ # find ubuntu id
for image in json.loads(listimagesresponse.content)["images"]:
if image["name"] == "ubuntu:trusty":
ubuntu_image_id = image["id"]
print('->>>>>>> test Nova Create Server Instance ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
- data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
+ data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (
+ json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
createserverinstance = requests.post(url, data=data, headers=headers)
self.assertEqual(createserverinstance.status_code, 200)
- self.assertEqual(json.loads(createserverinstance.content)["server"]["image"]["id"], ubuntu_image_id)
+ self.assertEqual(json.loads(createserverinstance.content)[
+ "server"]["image"]["id"], ubuntu_image_id)
print(" ")
print('->>>>>>> test Nova Create Server Instance With Already Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers"
- data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
+ data = '{"server": {"name": "X", "flavorRef": "%s", "imageRef":"%s"}}' % (
+ json.loads(listflavorsresponse.content)["flavors"][0]["id"], ubuntu_image_id)
createserverinstance = requests.post(url, data=data, headers=headers)
self.assertEqual(createserverinstance.status_code, 409)
print(" ")
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/detail"
listserverapisdetailedresponse = requests.get(url, headers=headers)
self.assertEqual(listserverapisdetailedresponse.status_code, 200)
- self.assertEqual(json.loads(listserverapisdetailedresponse.content)["servers"][0]["status"], "ACTIVE")
+ self.assertEqual(json.loads(listserverapisdetailedresponse.content)[
+ "servers"][0]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Nova Show Server Details ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (json.loads(listserverapisdetailedresponse.content)["servers"][0]["id"])
+ url = "http://0.0.0.0:18774/v2.1/id_bla/servers/%s" % (
+ json.loads(listserverapisdetailedresponse.content)["servers"][0]["id"])
listserverdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(listserverdetailsresponse.status_code, 200)
- self.assertEqual(json.loads(listserverdetailsresponse.content)["server"]["flavor"]["links"][0]["rel"], "bookmark")
+ self.assertEqual(json.loads(listserverdetailsresponse.content)[
+ "server"]["flavor"]["links"][0]["rel"], "bookmark")
print(" ")
print('->>>>>>> test Nova Show Non-Existing Server Details ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18774/v2.1/id_bla/servers/non_existing_server_id"
- listnonexistingserverdetailsresponse = requests.get(url, headers=headers)
+ listnonexistingserverdetailsresponse = requests.get(
+ url, headers=headers)
self.assertEqual(listnonexistingserverdetailsresponse.status_code, 404)
print(" ")
print(" ")
headers = {'Content-type': 'application/json'}
- test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "templates/test_heatapi_template_create_stack.yml")).read()
+ test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(
+ __file__), "templates/test_heatapi_template_create_stack.yml")).read()
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
- requests.post(url, data=json.dumps(yaml.load(test_heatapi_template_create_stack)), headers=headers)
+ requests.post(url, data=json.dumps(
+ yaml.load(test_heatapi_template_create_stack)), headers=headers)
# test_heatapi_keystone_get_token = open("test_heatapi_keystone_get_token.json").read()
print('->>>>>>> test Neutron List Versions ->>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/"
listapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionstackresponse.status_code, 200)
- self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"][0]["id"], "v2.0")
+ self.assertEqual(json.loads(listapiversionstackresponse.content)[
+ "versions"][0]["id"], "v2.0")
print(" ")
print('->>>>>>> test Neutron Show API v2.0 ->>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0"
listapiversionv20response = requests.get(url, headers=headers)
self.assertEqual(listapiversionv20response.status_code, 200)
- self.assertEqual(json.loads(listapiversionv20response.content)["resources"][0]["name"], "subnet")
- self.assertEqual(json.loads(listapiversionv20response.content)["resources"][1]["name"], "network")
- self.assertEqual(json.loads(listapiversionv20response.content)["resources"][2]["name"], "ports")
+ self.assertEqual(json.loads(listapiversionv20response.content)[
+ "resources"][0]["name"], "subnet")
+ self.assertEqual(json.loads(listapiversionv20response.content)[
+ "resources"][1]["name"], "network")
+ self.assertEqual(json.loads(listapiversionv20response.content)[
+ "resources"][2]["name"], "ports")
print(" ")
print('->>>>>>> test Neutron List Networks ->>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks"
listnetworksesponse1 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse1.status_code, 200)
- self.assertEqual(json.loads(listnetworksesponse1.content)["networks"][0]["status"], "ACTIVE")
- listNetworksId = json.loads(listnetworksesponse1.content)["networks"][0]["id"]
- listNetworksName = json.loads(listnetworksesponse1.content)["networks"][0]["name"]
- listNetworksId2 = json.loads(listnetworksesponse1.content)["networks"][1]["id"]
+ self.assertEqual(json.loads(listnetworksesponse1.content)[
+ "networks"][0]["status"], "ACTIVE")
+ listNetworksId = json.loads(listnetworksesponse1.content)[
+ "networks"][0]["id"]
+ listNetworksName = json.loads(listnetworksesponse1.content)[
+ "networks"][0]["name"]
+ listNetworksId2 = json.loads(listnetworksesponse1.content)[
+ "networks"][1]["id"]
print(" ")
print('->>>>>>> test Neutron List Non-Existing Networks ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks?name=non_existent_network_name"
- listnetworksesponse2 = requests.get(url,headers=headers)
+ listnetworksesponse2 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse2.status_code, 404)
print(" ")
print('->>>>>>> test Neutron List Networks By Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:19696/v2.0/networks?name=" + listNetworksName #tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
+ # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
+ url = "http://0.0.0.0:19696/v2.0/networks?name=" + listNetworksName
listnetworksesponse3 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse3.status_code, 200)
- self.assertEqual(json.loads(listnetworksesponse3.content)["networks"][0]["name"], listNetworksName)
+ self.assertEqual(json.loads(listnetworksesponse3.content)[
+ "networks"][0]["name"], listNetworksName)
print(" ")
print('->>>>>>> test Neutron List Networks By Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
+ # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
+ url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId
listnetworksesponse4 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse4.status_code, 200)
- self.assertEqual(json.loads(listnetworksesponse4.content)["networks"][0]["id"], listNetworksId)
+ self.assertEqual(json.loads(listnetworksesponse4.content)[
+ "networks"][0]["id"], listNetworksId)
print(" ")
print('->>>>>>> test Neutron List Networks By Multiple Ids ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId + "&id="+ listNetworksId2 # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
+ url = "http://0.0.0.0:19696/v2.0/networks?id=" + listNetworksId + "&id=" + \
+ listNetworksId2 # tcpdump-vnf:input:net:9df6a98f-9e11-4cb7-b3c0-InAdUnitTest
listnetworksesponse5 = requests.get(url, headers=headers)
self.assertEqual(listnetworksesponse5.status_code, 200)
- self.assertEqual(json.loads(listnetworksesponse5.content)["networks"][0]["id"], listNetworksId)
- self.assertEqual(json.loads(listnetworksesponse5.content)["networks"][1]["id"], listNetworksId2)
+ self.assertEqual(json.loads(listnetworksesponse5.content)[
+ "networks"][0]["id"], listNetworksId)
+ self.assertEqual(json.loads(listnetworksesponse5.content)[
+ "networks"][1]["id"], listNetworksId2)
print(" ")
print('->>>>>>> test Neutron Show Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:19696/v2.0/networks/"+listNetworksId
+ url = "http://0.0.0.0:19696/v2.0/networks/" + listNetworksId
shownetworksesponse = requests.get(url, headers=headers)
self.assertEqual(shownetworksesponse.status_code, 200)
- self.assertEqual(json.loads(shownetworksesponse.content)["network"]["status"], "ACTIVE")
+ self.assertEqual(json.loads(shownetworksesponse.content)[
+ "network"]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Neutron Show Network Non-ExistendNetwork ->>>>>>>>>>>>>>>')
print('->>>>>>> test Neutron Create Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks"
- createnetworkresponse = requests.post(url, data='{"network": {"name": "sample_network","admin_state_up": true}}', headers=headers)
+ createnetworkresponse = requests.post(
+ url, data='{"network": {"name": "sample_network","admin_state_up": true}}', headers=headers)
self.assertEqual(createnetworkresponse.status_code, 201)
- self.assertEqual(json.loads(createnetworkresponse.content)["network"]["status"], "ACTIVE")
+ self.assertEqual(json.loads(createnetworkresponse.content)[
+ "network"]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Neutron Create Network With Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks"
- createnetworkresponsefailure = requests.post(url,data='{"network": {"name": "sample_network","admin_state_up": true}}',headers=headers)
+ createnetworkresponsefailure = requests.post(
+ url, data='{"network": {"name": "sample_network","admin_state_up": true}}', headers=headers)
self.assertEqual(createnetworkresponsefailure.status_code, 400)
print(" ")
print('->>>>>>> test Neutron Update Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:19696/v2.0/networks/%s" % (json.loads(createnetworkresponse.content)["network"]["id"])
- updatenetworkresponse = requests.put(url, data='{"network": {"status": "ACTIVE", "admin_state_up":true, "tenant_id":"abcd123", "name": "sample_network_new_name", "shared":false}}' , headers=headers)
+ url = "http://0.0.0.0:19696/v2.0/networks/%s" % (
+ json.loads(createnetworkresponse.content)["network"]["id"])
+ updatenetworkresponse = requests.put(
+ url, data='{"network": {"status": "ACTIVE", "admin_state_up":true, "tenant_id":"abcd123", "name": "sample_network_new_name", "shared":false}}', headers=headers)
self.assertEqual(updatenetworkresponse.status_code, 200)
- self.assertEqual(json.loads(updatenetworkresponse.content)["network"]["name"], "sample_network_new_name")
- self.assertEqual(json.loads(updatenetworkresponse.content)["network"]["tenant_id"], "abcd123")
+ self.assertEqual(json.loads(updatenetworkresponse.content)[
+ "network"]["name"], "sample_network_new_name")
+ self.assertEqual(json.loads(updatenetworkresponse.content)[
+ "network"]["tenant_id"], "abcd123")
print(" ")
print('->>>>>>> test Neutron Update Non-Existing Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/networks/non-existing-name123"
- updatenetworkresponse = requests.put(url, data='{"network": {"name": "sample_network_new_name"}}', headers=headers)
+ updatenetworkresponse = requests.put(
+ url, data='{"network": {"name": "sample_network_new_name"}}', headers=headers)
self.assertEqual(updatenetworkresponse.status_code, 404)
print(" ")
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets"
listsubnetsresponse = requests.get(url, headers=headers)
- listSubnetName = json.loads(listsubnetsresponse.content)["subnets"][0]["name"]
- listSubnetId = json.loads(listsubnetsresponse.content)["subnets"][0]["id"]
- listSubnetId2 = json.loads(listsubnetsresponse.content)["subnets"][1]["id"]
+ listSubnetName = json.loads(listsubnetsresponse.content)[
+ "subnets"][0]["name"]
+ listSubnetId = json.loads(listsubnetsresponse.content)[
+ "subnets"][0]["id"]
+ listSubnetId2 = json.loads(listsubnetsresponse.content)[
+ "subnets"][1]["id"]
self.assertEqual(listsubnetsresponse.status_code, 200)
self.assertNotIn('None', listSubnetName)
print(" ")
print('->>>>>>> test Neutron List Subnets By Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:19696/v2.0/subnets?name="+listSubnetName
+ url = "http://0.0.0.0:19696/v2.0/subnets?name=" + listSubnetName
listsubnetByNameresponse = requests.get(url, headers=headers)
self.assertEqual(listsubnetByNameresponse.status_code, 200)
- self.assertNotIn('None', json.loads(listsubnetByNameresponse.content)["subnets"][0]["name"])
+ self.assertNotIn('None', json.loads(
+ listsubnetByNameresponse.content)["subnets"][0]["name"])
print(" ")
print('->>>>>>> test Neutron List Subnets By Id ->>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets?id=" + listSubnetId
listsubnetsbyidresponse = requests.get(url, headers=headers)
self.assertEqual(listsubnetsbyidresponse.status_code, 200)
- self.assertNotIn("None", json.loads(listsubnetsbyidresponse.content)["subnets"][0]["name"])
+ self.assertNotIn("None", json.loads(
+ listsubnetsbyidresponse.content)["subnets"][0]["name"])
print(" ")
print('->>>>>>> test Neutron List Subnets By Multiple Id ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:19696/v2.0/subnets?id=" + listSubnetId +"&id="+listSubnetId2
+ url = "http://0.0.0.0:19696/v2.0/subnets?id=" + \
+ listSubnetId + "&id=" + listSubnetId2
listsubnetsbymultipleidsresponse = requests.get(url, headers=headers)
self.assertEqual(listsubnetsbymultipleidsresponse.status_code, 200)
- self.assertNotIn("None", json.loads(listsubnetsbymultipleidsresponse.content)["subnets"][0]["name"])
+ self.assertNotIn("None", json.loads(
+ listsubnetsbymultipleidsresponse.content)["subnets"][0]["name"])
print(" ")
-
-
print('->>>>>>> test Neutron Show Subnet->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(listsubnetsresponse.content)["subnets"][0]["id"])
+ url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (
+ json.loads(listsubnetsresponse.content)["subnets"][0]["id"])
showsubnetsresponse = requests.get(url, headers=headers)
self.assertEqual(showsubnetsresponse.status_code, 200)
- self.assertNotIn("None", json.loads(showsubnetsresponse.content)["subnet"]["name"])
+ self.assertNotIn("None", json.loads(
+ showsubnetsresponse.content)["subnet"]["name"])
print(" ")
print('->>>>>>> test Neutron Show Non-Existing Subnet->>>>>>>>>>>>>>>')
self.assertEqual(showsubnetsresponse.status_code, 404)
print(" ")
-
print('->>>>>>> test Neutron Create Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets"
- createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
- createsubnetresponse = requests.post(url, data=createsubnetdata, headers=headers)
+ createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (
+ json.loads(createnetworkresponse.content)["network"]["id"])
+ createsubnetresponse = requests.post(
+ url, data=createsubnetdata, headers=headers)
self.assertEqual(createsubnetresponse.status_code, 201)
- self.assertEqual(json.loads(createsubnetresponse.content)["subnet"]["name"], "new_subnet")
+ self.assertEqual(json.loads(createsubnetresponse.content)[
+ "subnet"]["name"], "new_subnet")
print(" ")
print('->>>>>>> test Neutron Create Second Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets"
- createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
- createsubnetfailureresponse = requests.post(url, data=createsubnetdata, headers=headers)
+ createsubnetdata = '{"subnet": {"name": "new_subnet", "network_id": "%s","ip_version": 4,"cidr": "10.0.0.1/24"} }' % (
+ json.loads(createnetworkresponse.content)["network"]["id"])
+ createsubnetfailureresponse = requests.post(
+ url, data=createsubnetdata, headers=headers)
self.assertEqual(createsubnetfailureresponse.status_code, 409)
print(" ")
print('->>>>>>> test Neutron Update Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(createsubnetresponse.content)["subnet"]["id"])
+ url = "http://0.0.0.0:19696/v2.0/subnets/%s" % (
+ json.loads(createsubnetresponse.content)["subnet"]["id"])
updatesubnetdata = '{"subnet": {"name": "new_subnet_new_name", "network_id":"some_id", "tenant_id":"new_tenant_id", "allocation_pools":"change_me", "gateway_ip":"192.168.1.120", "ip_version":4, "cidr":"10.0.0.1/24", "id":"some_new_id", "enable_dhcp":true} }'
- updatesubnetresponse = requests.put(url, data=updatesubnetdata, headers=headers)
+ updatesubnetresponse = requests.put(
+ url, data=updatesubnetdata, headers=headers)
self.assertEqual(updatesubnetresponse.status_code, 200)
- self.assertEqual(json.loads(updatesubnetresponse.content)["subnet"]["name"], "new_subnet_new_name")
+ self.assertEqual(json.loads(updatesubnetresponse.content)[
+ "subnet"]["name"], "new_subnet_new_name")
print(" ")
print('->>>>>>> test Neutron Update Non-Existing Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/subnets/non-existing-subnet-12345"
updatenonexistingsubnetdata = '{"subnet": {"name": "new_subnet_new_name"} }'
- updatenonexistingsubnetresponse = requests.put(url, data=updatenonexistingsubnetdata, headers=headers)
+ updatenonexistingsubnetresponse = requests.put(
+ url, data=updatenonexistingsubnetdata, headers=headers)
self.assertEqual(updatenonexistingsubnetresponse.status_code, 404)
print(" ")
url = "http://0.0.0.0:19696/v2.0/ports"
listportsesponse = requests.get(url, headers=headers)
self.assertEqual(listportsesponse.status_code, 200)
- self.assertEqual(json.loads(listportsesponse.content)["ports"][0]["status"], "ACTIVE")
- listPortsName = json.loads(listportsesponse.content)["ports"][0]["name"]
+ self.assertEqual(json.loads(listportsesponse.content)
+ ["ports"][0]["status"], "ACTIVE")
+ listPortsName = json.loads(listportsesponse.content)[
+ "ports"][0]["name"]
listPortsId1 = json.loads(listportsesponse.content)["ports"][0]["id"]
listPortsId2 = json.loads(listportsesponse.content)["ports"][1]["id"]
print(" ")
url = "http://0.0.0.0:19696/v2.0/ports?name=" + listPortsName
listportsbynameesponse = requests.get(url, headers=headers)
self.assertEqual(listportsbynameesponse.status_code, 200)
- self.assertEqual(json.loads(listportsbynameesponse.content)["ports"][0]["name"], listPortsName)
+ self.assertEqual(json.loads(listportsbynameesponse.content)[
+ "ports"][0]["name"], listPortsName)
print(" ")
print('->>>>>>> test Neutron List Ports By Id ->>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports?id=" + listPortsId1
listportsbyidesponse = requests.get(url, headers=headers)
self.assertEqual(listportsbyidesponse.status_code, 200)
- self.assertEqual(json.loads(listportsbyidesponse.content)["ports"][0]["id"], listPortsId1)
+ self.assertEqual(json.loads(listportsbyidesponse.content)[
+ "ports"][0]["id"], listPortsId1)
print(" ")
print('->>>>>>> test Neutron List Ports By Multiple Ids ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:19696/v2.0/ports?id=" + listPortsId1 +"&id="+listPortsId2
+ url = "http://0.0.0.0:19696/v2.0/ports?id=" + \
+ listPortsId1 + "&id=" + listPortsId2
listportsbymultipleidsesponse = requests.get(url, headers=headers)
self.assertEqual(listportsbymultipleidsesponse.status_code, 200)
- self.assertEqual(json.loads(listportsbymultipleidsesponse.content)["ports"][0]["id"], listPortsId1)
+ self.assertEqual(json.loads(listportsbymultipleidsesponse.content)[
+ "ports"][0]["id"], listPortsId1)
print(" ")
print('->>>>>>> test Neutron List Non-Existing Ports ->>>>>>>>>>>>>>>')
print('->>>>>>> test Neutron Show Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(listportsesponse.content)["ports"][0]["id"])
+ url = "http://0.0.0.0:19696/v2.0/ports/%s" % (
+ json.loads(listportsesponse.content)["ports"][0]["id"])
showportresponse = requests.get(url, headers=headers)
self.assertEqual(showportresponse.status_code, 200)
- self.assertEqual(json.loads(showportresponse.content)["port"]["status"], "ACTIVE")
+ self.assertEqual(json.loads(showportresponse.content)
+ ["port"]["status"], "ACTIVE")
print(" ")
print('->>>>>>> test Neutron Show Non-Existing Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
createnonexistingportdata = '{"port": {"name": "new_port", "network_id": "non-existing-id"} }'
- createnonexistingnetworkportresponse = requests.post(url, data=createnonexistingportdata, headers=headers)
+ createnonexistingnetworkportresponse = requests.post(
+ url, data=createnonexistingportdata, headers=headers)
self.assertEqual(createnonexistingnetworkportresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Create Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
- createportdata = '{"port": {"name": "new_port", "network_id": "%s", "admin_state_up":true, "device_id":"device_id123", "device_owner":"device_owner123", "fixed_ips":"change_me","id":"new_id1234", "mac_address":"12:34:56:78:90", "status":"change_me", "tenant_id":"tenant_id123"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
- createportresponse = requests.post(url, data=createportdata, headers=headers)
+ createportdata = '{"port": {"name": "new_port", "network_id": "%s", "admin_state_up":true, "device_id":"device_id123", "device_owner":"device_owner123", "fixed_ips":"change_me","id":"new_id1234", "mac_address":"12:34:56:78:90", "status":"change_me", "tenant_id":"tenant_id123"} }' % (json.loads(createnetworkresponse.content)[
+ "network"]["id"])
+ createportresponse = requests.post(
+ url, data=createportdata, headers=headers)
self.assertEqual(createportresponse.status_code, 201)
- print (createportresponse.content)
- self.assertEqual(json.loads(createportresponse.content)["port"]["name"], "new_port")
+ print(createportresponse.content)
+ self.assertEqual(json.loads(createportresponse.content)[
+ "port"]["name"], "new_port")
print(" ")
print('->>>>>>> test Neutron Create Port With Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
- createportwithexistingnamedata = '{"port": {"name": "new_port", "network_id": "%s"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
- createportwithexistingnameresponse = requests.post(url, data=createportwithexistingnamedata, headers=headers)
+ createportwithexistingnamedata = '{"port": {"name": "new_port", "network_id": "%s"} }' % (
+ json.loads(createnetworkresponse.content)["network"]["id"])
+ createportwithexistingnameresponse = requests.post(
+ url, data=createportwithexistingnamedata, headers=headers)
self.assertEqual(createportwithexistingnameresponse.status_code, 500)
print(" ")
print('->>>>>>> test Neutron Create Port Without Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports"
- createportdatawithoutname = '{"port": {"network_id": "%s"} }' % (json.loads(createnetworkresponse.content)["network"]["id"])
- createportwithoutnameresponse = requests.post(url, data=createportdatawithoutname, headers=headers)
+ createportdatawithoutname = '{"port": {"network_id": "%s"} }' % (
+ json.loads(createnetworkresponse.content)["network"]["id"])
+ createportwithoutnameresponse = requests.post(
+ url, data=createportdatawithoutname, headers=headers)
self.assertEqual(createportwithoutnameresponse.status_code, 201)
- self.assertIn("port:cp", json.loads(createportwithoutnameresponse.content)["port"]["name"])
+ self.assertIn("port:cp", json.loads(
+ createportwithoutnameresponse.content)["port"]["name"])
print(" ")
print('->>>>>>> test Neutron Update Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print(json.loads(createportresponse.content)["port"]["name"])
- url = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(createportresponse.content)["port"]["name"])
+ url = "http://0.0.0.0:19696/v2.0/ports/%s" % (
+ json.loads(createportresponse.content)["port"]["name"])
updateportdata = '{"port": {"name": "new_port_new_name", "admin_state_up":true, "device_id":"device_id123", "device_owner":"device_owner123", "fixed_ips":"change_me","mac_address":"12:34:56:78:90", "status":"change_me", "tenant_id":"tenant_id123", "network_id":"network_id123"} }'
- updateportresponse = requests.put(url, data=updateportdata, headers=headers)
+ updateportresponse = requests.put(
+ url, data=updateportdata, headers=headers)
self.assertEqual(updateportresponse.status_code, 200)
- self.assertEqual(json.loads(updateportresponse.content)["port"]["name"], "new_port_new_name")
+ self.assertEqual(json.loads(updateportresponse.content)[
+ "port"]["name"], "new_port_new_name")
print(" ")
print('->>>>>>> test Neutron Update Non-Existing Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/ports/non-existing-port-ip"
updatenonexistingportdata = '{"port": {"name": "new_port_new_name"} }'
- updatenonexistingportresponse = requests.put(url, data=updatenonexistingportdata, headers=headers)
+ updatenonexistingportresponse = requests.put(
+ url, data=updatenonexistingportdata, headers=headers)
self.assertEqual(updatenonexistingportresponse.status_code, 404)
print(" ")
print('->>>>>>> test Neutron Delete Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- righturl = "http://0.0.0.0:19696/v2.0/ports/%s" % (json.loads(createportresponse.content)["port"]["id"])
+ righturl = "http://0.0.0.0:19696/v2.0/ports/%s" % (
+ json.loads(createportresponse.content)["port"]["id"])
deleterightportresponse = requests.delete(righturl, headers=headers)
self.assertEqual(deleterightportresponse.status_code, 204)
print(" ")
-
print('->>>>>>> test Neutron Delete Non-Existing Port ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
wrongurl = "http://0.0.0.0:19696/v2.0/ports/unknownid"
print('->>>>>>> test Neutron Delete Subnet ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
wrongurl = "http://0.0.0.0:19696/v2.0/subnets/unknownid"
- righturl = "http://0.0.0.0:19696/v2.0/subnets/%s" % (json.loads(updatesubnetresponse.content)["subnet"]["id"])
+ righturl = "http://0.0.0.0:19696/v2.0/subnets/%s" % (
+ json.loads(updatesubnetresponse.content)["subnet"]["id"])
deletewrongsubnetresponse = requests.delete(wrongurl, headers=headers)
deleterightsubnetresponse = requests.delete(righturl, headers=headers)
self.assertEqual(deletewrongsubnetresponse.status_code, 404)
print('->>>>>>> test Neutron Delete Network ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- righturl = "http://0.0.0.0:19696/v2.0/networks/%s" % (json.loads(createnetworkresponse.content)["network"]["id"])
+ righturl = "http://0.0.0.0:19696/v2.0/networks/%s" % (
+ json.loads(createnetworkresponse.content)["network"]["id"])
deleterightnetworkresponse = requests.delete(righturl, headers=headers)
self.assertEqual(deleterightnetworkresponse.status_code, 204)
print(" ")
print(" ")
headers = {'Content-type': 'application/json'}
- test_heatapi_keystone_get_token = open(os.path.join(os.path.dirname(__file__), "templates/test_heatapi_keystone_get_token.yml")).read()
+ test_heatapi_keystone_get_token = open(os.path.join(os.path.dirname(
+ __file__), "templates/test_heatapi_keystone_get_token.yml")).read()
print('->>>>>>> test Keystone List Versions ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:15000/"
listapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionstackresponse.status_code, 200)
- self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"]["values"][0]["id"], "v2.0")
+ self.assertEqual(json.loads(listapiversionstackresponse.content)[
+ "versions"]["values"][0]["id"], "v2.0")
print(" ")
print('->>>>>>> test Keystone Show ApiV2 ->>>>>>>>>>>>>>>')
url = "http://0.0.0.0:15000/v2.0"
showapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(showapiversionstackresponse.status_code, 200)
- self.assertEqual(json.loads(showapiversionstackresponse.content)["version"]["id"], "v2.0")
+ self.assertEqual(json.loads(showapiversionstackresponse.content)[
+ "version"]["id"], "v2.0")
print(" ")
print('->>>>>>> test Keystone Get Token ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:15000/v2.0/tokens"
- gettokenstackresponse = requests.post(url, data=json.dumps(yaml.load(test_heatapi_keystone_get_token)), headers=headers)
+ gettokenstackresponse = requests.post(url, data=json.dumps(
+ yaml.load(test_heatapi_keystone_get_token)), headers=headers)
self.assertEqual(gettokenstackresponse.status_code, 200)
- self.assertEqual(json.loads(gettokenstackresponse.content)["access"]["user"]["name"], "tenantName")
+ self.assertEqual(json.loads(gettokenstackresponse.content)[
+ "access"]["user"]["name"], "tenantName")
print(" ")
def testHeatDummy(self):
print(" ")
headers = {'Content-type': 'application/json'}
- test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(__file__), "templates/test_heatapi_template_create_stack.yml")).read()
- test_heatapi_template_update_stack = open(os.path.join(os.path.dirname(__file__), "templates/test_heatapi_template_update_stack.yml")).read()
+ test_heatapi_template_create_stack = open(os.path.join(os.path.dirname(
+ __file__), "templates/test_heatapi_template_create_stack.yml")).read()
+ test_heatapi_template_update_stack = open(os.path.join(os.path.dirname(
+ __file__), "templates/test_heatapi_template_update_stack.yml")).read()
print('->>>>>>> test Heat List API Versions Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/"
listapiversionstackresponse = requests.get(url, headers=headers)
self.assertEqual(listapiversionstackresponse.status_code, 200)
- self.assertEqual(json.loads(listapiversionstackresponse.content)["versions"][0]["id"], "v1.0")
+ self.assertEqual(json.loads(listapiversionstackresponse.content)[
+ "versions"][0]["id"], "v1.0")
print(" ")
print('->>>>>>> test Create Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
- createstackresponse = requests.post(url, data=json.dumps(yaml.load(test_heatapi_template_create_stack)), headers=headers)
+ createstackresponse = requests.post(url, data=json.dumps(
+ yaml.load(test_heatapi_template_create_stack)), headers=headers)
self.assertEqual(createstackresponse.status_code, 201)
- self.assertNotEqual(json.loads(createstackresponse.content)["stack"]["id"], "")
+ self.assertNotEqual(json.loads(
+ createstackresponse.content)["stack"]["id"], "")
print(" ")
print('->>>>>>> test Create Stack With Existing Name ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
- createstackwithexistingnameresponse = requests.post(url, data='{"stack_name" : "s1"}', headers=headers)
+ createstackwithexistingnameresponse = requests.post(
+ url, data='{"stack_name" : "s1"}', headers=headers)
self.assertEqual(createstackwithexistingnameresponse.status_code, 409)
print(" ")
print('->>>>>>> test Create Stack With Unsupported Version ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
- createstackwitheunsupportedversionresponse = requests.post(url, data='{"stack_name" : "stackname123", "template" : {"heat_template_version": "2015-04-29"}}', headers=headers)
- self.assertEqual(createstackwitheunsupportedversionresponse.status_code, 400)
+ createstackwitheunsupportedversionresponse = requests.post(
+ url, data='{"stack_name" : "stackname123", "template" : {"heat_template_version": "2015-04-29"}}', headers=headers)
+ self.assertEqual(
+ createstackwitheunsupportedversionresponse.status_code, 400)
print(" ")
-
print('->>>>>>> test List Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123/stacks"
liststackresponse = requests.get(url, headers=headers)
self.assertEqual(liststackresponse.status_code, 200)
- self.assertEqual(json.loads(liststackresponse.content)["stacks"][0]["stack_status"], "CREATE_COMPLETE")
+ self.assertEqual(json.loads(liststackresponse.content)[
+ "stacks"][0]["stack_status"], "CREATE_COMPLETE")
print(" ")
-
print('->>>>>>> test Show Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/%s"% json.loads(createstackresponse.content)['stack']['id']
+ url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/%s" % json.loads(
+ createstackresponse.content)['stack']['id']
liststackdetailsresponse = requests.get(url, headers=headers)
self.assertEqual(liststackdetailsresponse.status_code, 200)
- self.assertEqual(json.loads(liststackdetailsresponse.content)["stack"]["stack_status"], "CREATE_COMPLETE")
+ self.assertEqual(json.loads(liststackdetailsresponse.content)[
+ "stack"]["stack_status"], "CREATE_COMPLETE")
print(" ")
print('->>>>>>> test Show Non-Exisitng Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123showStack/stacks/non_exisitng_id123"
- listnonexistingstackdetailsresponse = requests.get(url, headers=headers)
+ listnonexistingstackdetailsresponse = requests.get(
+ url, headers=headers)
self.assertEqual(listnonexistingstackdetailsresponse.status_code, 404)
print(" ")
print('->>>>>>> test Update Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/%s"% json.loads(createstackresponse.content)['stack']['id']
+ url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/%s" % json.loads(
+ createstackresponse.content)['stack']['id']
updatestackresponse = requests.put(url, data=json.dumps(yaml.load(test_heatapi_template_update_stack)),
- headers=headers)
+ headers=headers)
self.assertEqual(updatestackresponse.status_code, 202)
liststackdetailsresponse = requests.get(url, headers=headers)
- self.assertEqual(json.loads(liststackdetailsresponse.content)["stack"]["stack_status"], "UPDATE_COMPLETE")
+ self.assertEqual(json.loads(liststackdetailsresponse.content)[
+ "stack"]["stack_status"], "UPDATE_COMPLETE")
print(" ")
print('->>>>>>> test Update Non-Existing Stack ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:18004/v1/tenantabc123updateStack/stacks/non_existing_id_1234"
- updatenonexistingstackresponse = requests.put(url, data={"non": "sense"}, headers=headers)
+ updatenonexistingstackresponse = requests.put(
+ url, data={"non": "sense"}, headers=headers)
self.assertEqual(updatenonexistingstackresponse.status_code, 404)
print(" ")
print('->>>>>>> Create ports p1 - p4 ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
# Get network id
- network_resp = requests.get("http://0.0.0.0:19696/v2.0/networks?name=default", headers=headers)
+ network_resp = requests.get(
+ "http://0.0.0.0:19696/v2.0/networks?name=default", headers=headers)
self.assertEqual(network_resp.status_code, 200)
network_id = json.loads(network_resp.content)["networks"][0]["id"]
url = "http://0.0.0.0:19696/v2.0/ports"
port_request = '{"port": {"name": "%s", "network_id": "%s"}}'
- p1_resp = requests.post(url, data=port_request % ("p1", network_id), headers=headers)
+ p1_resp = requests.post(url, data=port_request %
+ ("p1", network_id), headers=headers)
self.assertEqual(p1_resp.status_code, 201)
- p2_resp = requests.post(url, data=port_request % ("p2", network_id), headers=headers)
+ p2_resp = requests.post(url, data=port_request %
+ ("p2", network_id), headers=headers)
self.assertEqual(p2_resp.status_code, 201)
- p3_resp = requests.post(url, data=port_request % ("p3", network_id), headers=headers)
+ p3_resp = requests.post(url, data=port_request %
+ ("p3", network_id), headers=headers)
self.assertEqual(p3_resp.status_code, 201)
- p4_resp = requests.post(url, data=port_request % ("p4", network_id), headers=headers)
+ p4_resp = requests.post(url, data=port_request %
+ ("p4", network_id), headers=headers)
self.assertEqual(p4_resp.status_code, 201)
p1_id = json.loads(p1_resp.content)["port"]["id"]
print('->>>>>>> test Neutron SFC Port Pair Create ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs"
- pp1_resp = requests.post(url, data='{"port_pair": {"name": "pp1", "ingress": "%s", "egress": "%s"}}' % (p1_id, p2_id), headers=headers)
+ pp1_resp = requests.post(url, data='{"port_pair": {"name": "pp1", "ingress": "%s", "egress": "%s"}}' % (
+ p1_id, p2_id), headers=headers)
self.assertEqual(pp1_resp.status_code, 201)
- pp2_resp = requests.post(url, data='{"port_pair": {"name": "pp2", "ingress": "%s", "egress": "%s"}}' % (p3_id, p4_id), headers=headers)
+ pp2_resp = requests.post(url, data='{"port_pair": {"name": "pp2", "ingress": "%s", "egress": "%s"}}' % (
+ p3_id, p4_id), headers=headers)
self.assertEqual(pp2_resp.status_code, 201)
- pp3_resp = requests.post(url, data='{"port_pair": {"name": "pp3", "ingress": "%s", "egress": "%s"}}' % (p3_id, p4_id), headers=headers)
+ pp3_resp = requests.post(url, data='{"port_pair": {"name": "pp3", "ingress": "%s", "egress": "%s"}}' % (
+ p3_id, p4_id), headers=headers)
self.assertEqual(pp3_resp.status_code, 201)
pp1_id = json.loads(pp1_resp.content)["port_pair"]["id"]
print('->>>>>>> test Neutron SFC Port Pair Update ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s" % pp3_id
- pp3_update_resp = requests.put(url, data='{"port_pair": {"description": "port_pair_update"}}', headers=headers)
+ pp3_update_resp = requests.put(
+ url, data='{"port_pair": {"description": "port_pair_update"}}', headers=headers)
self.assertEqual(pp3_update_resp.status_code, 200)
- self.assertEqual(json.loads(pp3_update_resp.content)["port_pair"]["description"], "port_pair_update")
+ self.assertEqual(json.loads(pp3_update_resp.content)[
+ "port_pair"]["description"], "port_pair_update")
print('->>>>>>> test Neutron SFC Port Pair Delete ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs"
pp_list_resp = requests.get(url, headers=headers)
self.assertEqual(pp_list_resp.status_code, 200)
- self.assertEqual(len(json.loads(pp_list_resp.content)["port_pairs"]), 2) # only pp1 and pp2 should be left
+ # only pp1 and pp2 should be left
+ self.assertEqual(
+ len(json.loads(pp_list_resp.content)["port_pairs"]), 2)
print('->>>>>>> test Neutron SFC Port Pair Show ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s" % pp2_id
pp2_show_resp = requests.get(url, headers=headers)
self.assertEqual(pp2_show_resp.status_code, 200)
- self.assertEqual(json.loads(pp2_show_resp.content)["port_pair"]["name"], "pp2")
-
+ self.assertEqual(json.loads(pp2_show_resp.content)
+ ["port_pair"]["name"], "pp2")
print('->>>>>>> test Neutron SFC Port Pair Group Create ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups"
- ppg1_resp = requests.post(url, data='{"port_pair_group": {"name": "ppg1", "port_pairs": ["%s"]}}' % (pp1_id), headers=headers)
+ ppg1_resp = requests.post(
+ url, data='{"port_pair_group": {"name": "ppg1", "port_pairs": ["%s"]}}' % (pp1_id), headers=headers)
self.assertEqual(ppg1_resp.status_code, 201)
- ppg2_resp = requests.post(url, data='{"port_pair_group": {"name": "ppg2", "port_pairs": ["%s"]}}' % (pp2_id), headers=headers)
+ ppg2_resp = requests.post(
+ url, data='{"port_pair_group": {"name": "ppg2", "port_pairs": ["%s"]}}' % (pp2_id), headers=headers)
self.assertEqual(ppg2_resp.status_code, 201)
- ppg3_resp = requests.post(url, data='{"port_pair_group": {"name": "ppg3", "port_pairs": ["%s"]}}' % (pp2_id), headers=headers)
+ ppg3_resp = requests.post(
+ url, data='{"port_pair_group": {"name": "ppg3", "port_pairs": ["%s"]}}' % (pp2_id), headers=headers)
self.assertEqual(ppg3_resp.status_code, 201)
ppg1_id = json.loads(ppg1_resp.content)["port_pair_group"]["id"]
print('->>>>>>> test Neutron SFC Port Pair Group Update ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s" % ppg3_id
- ppg3_update_resp = requests.put(url, data='{"port_pair_group": {"description": "port_pair_group_update"}}', headers=headers)
+ ppg3_update_resp = requests.put(
+ url, data='{"port_pair_group": {"description": "port_pair_group_update"}}', headers=headers)
self.assertEqual(ppg3_update_resp.status_code, 200)
- self.assertEqual(json.loads(ppg3_update_resp.content)["port_pair_group"]["description"], "port_pair_group_update")
+ self.assertEqual(json.loads(ppg3_update_resp.content)[
+ "port_pair_group"]["description"], "port_pair_group_update")
print('->>>>>>> test Neutron SFC Port Pair Group Delete ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups"
ppg_list_resp = requests.get(url, headers=headers)
self.assertEqual(ppg_list_resp.status_code, 200)
- self.assertEqual(len(json.loads(ppg_list_resp.content)["port_pair_groups"]), 2) # only ppg1 and ppg2 should be left
+ # only ppg1 and ppg2 should be left
+ self.assertEqual(
+ len(json.loads(ppg_list_resp.content)["port_pair_groups"]), 2)
print('->>>>>>> test Neutron SFC Port Pair Group Show ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s" % ppg2_id
ppg2_show_resp = requests.get(url, headers=headers)
self.assertEqual(ppg2_show_resp.status_code, 200)
- self.assertEqual(json.loads(ppg2_show_resp.content)["port_pair_group"]["name"], "ppg2")
+ self.assertEqual(json.loads(ppg2_show_resp.content)[
+ "port_pair_group"]["name"], "ppg2")
print('->>>>>>> test Neutron SFC Flow Classifier Create ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers"
- fc1_resp = requests.post(url, data='{"flow_classifier": {"name": "fc1", "source_port_range_min": 22, "source_port_range_max": 4000}}', headers=headers)
+ fc1_resp = requests.post(
+ url, data='{"flow_classifier": {"name": "fc1", "source_port_range_min": 22, "source_port_range_max": 4000}}', headers=headers)
self.assertEqual(fc1_resp.status_code, 201)
- fc2_resp = requests.post(url, data='{"flow_classifier": {"name": "fc2", "source_port_range_min": 22, "source_port_range_max": 4000}}', headers=headers)
+ fc2_resp = requests.post(
+ url, data='{"flow_classifier": {"name": "fc2", "source_port_range_min": 22, "source_port_range_max": 4000}}', headers=headers)
self.assertEqual(fc2_resp.status_code, 201)
fc1_id = json.loads(fc1_resp.content)["flow_classifier"]["id"]
print('->>>>>>> test Neutron SFC Flow Classifier Update ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s" % fc2_id
- fc2_update_resp = requests.put(url, data='{"flow_classifier": {"description": "flow_classifier_update"}}', headers=headers)
+ fc2_update_resp = requests.put(
+ url, data='{"flow_classifier": {"description": "flow_classifier_update"}}', headers=headers)
self.assertEqual(fc2_update_resp.status_code, 200)
- self.assertEqual(json.loads(fc2_update_resp.content)["flow_classifier"]["description"], "flow_classifier_update")
+ self.assertEqual(json.loads(fc2_update_resp.content)[
+ "flow_classifier"]["description"], "flow_classifier_update")
print('->>>>>>> test Neutron SFC Flow Classifier Delete ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers"
fc_list_resp = requests.get(url, headers=headers)
self.assertEqual(fc_list_resp.status_code, 200)
- self.assertEqual(len(json.loads(fc_list_resp.content)["flow_classifiers"]), 1) # only fc1
+ self.assertEqual(len(json.loads(fc_list_resp.content)
+ ["flow_classifiers"]), 1) # only fc1
print('->>>>>>> test Neutron SFC Flow Classifier Show ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s" % fc1_id
fc1_show_resp = requests.get(url, headers=headers)
self.assertEqual(fc1_show_resp.status_code, 200)
- self.assertEqual(json.loads(fc1_show_resp.content)["flow_classifier"]["name"], "fc1")
-
+ self.assertEqual(json.loads(fc1_show_resp.content)[
+ "flow_classifier"]["name"], "fc1")
print('->>>>>>> test Neutron SFC Port Chain Create ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_chains"
- pc1_resp = requests.post(url, data='{"port_chain": {"name": "pc1", "port_pair_groups": ["%s"], "flow_classifiers": ["%s"]}}' % (ppg1_id, fc1_id), headers=headers)
+ pc1_resp = requests.post(url, data='{"port_chain": {"name": "pc1", "port_pair_groups": ["%s"], "flow_classifiers": ["%s"]}}' % (
+ ppg1_id, fc1_id), headers=headers)
self.assertEqual(pc1_resp.status_code, 201)
- pc2_resp = requests.post(url, data='{"port_chain": {"name": "pc2", "port_pair_groups": ["%s"], "flow_classifiers": ["%s"]}}' % (ppg1_id, fc1_id), headers=headers)
+ pc2_resp = requests.post(url, data='{"port_chain": {"name": "pc2", "port_pair_groups": ["%s"], "flow_classifiers": ["%s"]}}' % (
+ ppg1_id, fc1_id), headers=headers)
self.assertEqual(pc2_resp.status_code, 201)
pc1_id = json.loads(pc1_resp.content)["port_chain"]["id"]
print('->>>>>>> test Neutron SFC Port Chain Update ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_chains/%s" % pc2_id
- pc2_update_resp = requests.put(url, data='{"port_chain": {"description": "port_chain_update"}}', headers=headers)
+ pc2_update_resp = requests.put(
+ url, data='{"port_chain": {"description": "port_chain_update"}}', headers=headers)
self.assertEqual(pc2_update_resp.status_code, 200)
- self.assertEqual(json.loads(pc2_update_resp.content)["port_chain"]["description"], "port_chain_update")
+ self.assertEqual(json.loads(pc2_update_resp.content)[
+ "port_chain"]["description"], "port_chain_update")
print('->>>>>>> test Neutron SFC Port Chain Delete ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_chains"
pc_list_resp = requests.get(url, headers=headers)
self.assertEqual(pc_list_resp.status_code, 200)
- self.assertEqual(len(json.loads(pc_list_resp.content)["port_chains"]), 1) # only pc1
+ self.assertEqual(len(json.loads(pc_list_resp.content)
+ ["port_chains"]), 1) # only pc1
print('->>>>>>> test Neutron SFC Port Chain Show ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
url = "http://0.0.0.0:19696/v2.0/sfc/port_chains/%s" % pc1_id
pc1_show_resp = requests.get(url, headers=headers)
self.assertEqual(pc1_show_resp.status_code, 200)
- self.assertEqual(json.loads(pc1_show_resp.content)["port_chain"]["name"], "pc1")
+ self.assertEqual(json.loads(pc1_show_resp.content)
+ ["port_chain"]["name"], "pc1")
+
if __name__ == '__main__':
unittest.main()
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import time
import os
import unittest
from emuvim.dcemulator.resourcemodel.upb.simple import UpbSimpleCloudDcRM, UpbOverprovisioningCloudDcRM, UpbDummyRM
-
class testResourceModel(SimpleTestTopology):
"""
Test the general resource model API and functionality.
def __init__(self):
# take defaukt values from son-emu
self.resources = dict(
- cpu_period = -1,
- cpu_quota = -1,
- mem_limit = -1,
- memswap_limit = -1
+ cpu_period=-1,
+ cpu_quota=-1,
+ mem_limit=-1,
+ memswap_limit=-1
)
- #self.cpu_period = self.resources['cpu_period']
- #self.cpu_quota = self.resources['cpu_quota']
- #self.mem_limit = self.resources['mem_limit']
- #self.memswap_limit = self.resources['memswap_limit']
+ # self.cpu_period = self.resources['cpu_period']
+ # self.cpu_quota = self.resources['cpu_quota']
+ # self.mem_limit = self.resources['mem_limit']
+ # self.memswap_limit = self.resources['memswap_limit']
def updateCpuLimit(self, cpu_period, cpu_quota):
self.resources['cpu_period'] = cpu_period
return d
-
-
class testUpbSimpleCloudDcRM(SimpleTestTopology):
"""
Test the UpbSimpleCloudDc resource model.
E_MEM = 512
MAX_MU = 2048
# create dummy resource model environment
- reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
+ reg = ResourceModelRegistrar(
+ dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
reg.register("test_dc", rm)
c1 = createDummyContainerObject("c1", flavor="tiny")
rm.allocate(c1) # calculate allocation
- self.assertEqual(float(c1.resources['cpu_quota']) / c1.resources['cpu_period'], E_CPU / MAX_CU * 0.5) # validate compute result
- self.assertEqual(float(c1.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 32) # validate memory result
+ # validate compute result
+ self.assertEqual(float(
+ c1.resources['cpu_quota']) / c1.resources['cpu_period'], E_CPU / MAX_CU * 0.5)
+ # validate memory result
+ self.assertEqual(
+ float(c1.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 32)
c2 = createDummyContainerObject("c2", flavor="small")
rm.allocate(c2) # calculate allocation
- self.assertEqual(float(c2.resources['cpu_quota']) / c2.resources['cpu_period'], E_CPU / MAX_CU * 1) # validate compute result
- self.assertEqual(float(c2.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128) # validate memory result
+ # validate compute result
+ self.assertEqual(float(
+ c2.resources['cpu_quota']) / c2.resources['cpu_period'], E_CPU / MAX_CU * 1)
+ # validate memory result
+ self.assertEqual(
+ float(c2.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128)
c3 = createDummyContainerObject("c3", flavor="medium")
rm.allocate(c3) # calculate allocation
- self.assertEqual(float(c3.resources['cpu_quota']) / c3.resources['cpu_period'], E_CPU / MAX_CU * 4) # validate compute result
- self.assertEqual(float(c3.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 256) # validate memory result
+ # validate compute result
+ self.assertEqual(float(
+ c3.resources['cpu_quota']) / c3.resources['cpu_period'], E_CPU / MAX_CU * 4)
+ # validate memory result
+ self.assertEqual(
+ float(c3.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 256)
c4 = createDummyContainerObject("c4", flavor="large")
rm.allocate(c4) # calculate allocation
- self.assertEqual(float(c4.resources['cpu_quota']) / c4.resources['cpu_period'], E_CPU / MAX_CU * 8) # validate compute result
- self.assertEqual(float(c4.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 512) # validate memory result
+ # validate compute result
+ self.assertEqual(float(
+ c4.resources['cpu_quota']) / c4.resources['cpu_period'], E_CPU / MAX_CU * 8)
+ # validate memory result
+ self.assertEqual(
+ float(c4.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 512)
c5 = createDummyContainerObject("c5", flavor="xlarge")
rm.allocate(c5) # calculate allocation
- self.assertEqual(float(c5.resources['cpu_quota']) / c5.resources['cpu_period'], E_CPU / MAX_CU * 16) # validate compute result
- self.assertEqual(float(c5.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 1024) # validate memory result
-
+ # validate compute result
+ self.assertEqual(float(
+ c5.resources['cpu_quota']) / c5.resources['cpu_period'], E_CPU / MAX_CU * 16)
+ # validate memory result
+ self.assertEqual(
+ float(c5.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 1024)
def testAllocationCpuLimit(self):
"""
E_MEM = 512
MAX_MU = 4096
# create dummy resource model environment
- reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
+ reg = ResourceModelRegistrar(
+ dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
reg.register("test_dc", rm)
E_MEM = 512
MAX_MU = 2048
# create dummy resource model environment
- reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
+ reg = ResourceModelRegistrar(
+ dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
reg.register("test_dc", rm)
Test the free procedure.
:return:
"""
- # config
- E_CPU = 1.0
- MAX_CU = 100
# create dummy resource model environment
- reg = ResourceModelRegistrar(dc_emulation_max_cpu=1.0, dc_emulation_max_mem=512)
+ reg = ResourceModelRegistrar(
+ dc_emulation_max_cpu=1.0, dc_emulation_max_mem=512)
rm = UpbSimpleCloudDcRM(max_cu=100, max_mu=100)
reg.register("test_dc", rm)
c1 = createDummyContainerObject("c6", flavor="tiny")
self.assertTrue(len(r._allocated_compute_instances) == 1)
# check if there is a real limitation set for containers cgroup
- # deactivated for now, seems not to work in docker-in-docker setup used in CI
- self.assertEqual(float(tc1.resources['cpu_quota'])/tc1.resources['cpu_period'], 0.005)
+ # deactivated for now, seems not to work in docker-in-docker setup used
+ # in CI
+ self.assertEqual(
+ float(tc1.resources['cpu_quota']) / tc1.resources['cpu_period'], 0.005)
# check if free was called during stopCompute
self.dc[0].stopCompute("tc1")
E_MEM = 512
MAX_MU = 2048
# create dummy resource model environment
- reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
+ reg = ResourceModelRegistrar(
+ dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
rm = UpbOverprovisioningCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU)
reg.register("test_dc", rm)
c1 = createDummyContainerObject("c1", flavor="small")
rm.allocate(c1) # calculate allocation
- self.assertAlmostEqual(float(c1.resources['cpu_quota']) / c1.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
- self.assertAlmostEqual(float(c1.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
+ self.assertAlmostEqual(float(
+ c1.resources['cpu_quota']) / c1.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
+ self.assertAlmostEqual(
+ float(c1.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128)
self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
c2 = createDummyContainerObject("c2", flavor="small")
rm.allocate(c2) # calculate allocation
- self.assertAlmostEqual(float(c2.resources['cpu_quota']) / c2.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
- self.assertAlmostEqual(float(c2.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
+ self.assertAlmostEqual(float(
+ c2.resources['cpu_quota']) / c2.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
+ self.assertAlmostEqual(
+ float(c2.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128)
self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
c3 = createDummyContainerObject("c3", flavor="small")
rm.allocate(c3) # calculate allocation
- self.assertAlmostEqual(float(c3.resources['cpu_quota']) / c3.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
- self.assertAlmostEqual(float(c3.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
+ self.assertAlmostEqual(float(
+ c3.resources['cpu_quota']) / c3.resources['cpu_period'], E_CPU / MAX_CU * 1.0, places=5)
+ self.assertAlmostEqual(
+ float(c3.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128)
self.assertAlmostEqual(rm.cpu_op_factor, 1.0)
# from this container onwards, we should go to over provisioning mode:
c4 = createDummyContainerObject("c4", flavor="small")
rm.allocate(c4) # calculate allocation
- self.assertAlmostEqual(float(c4.resources['cpu_quota']) / c4.resources['cpu_period'], E_CPU / MAX_CU * (float(3) / 4), places=5)
- self.assertAlmostEqual(float(c4.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128, places=5)
+ self.assertAlmostEqual(float(
+ c4.resources['cpu_quota']) / c4.resources['cpu_period'], E_CPU / MAX_CU * (float(3) / 4), places=5)
+ self.assertAlmostEqual(float(
+ c4.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128, places=5)
self.assertAlmostEqual(rm.cpu_op_factor, 0.75)
c5 = createDummyContainerObject("c5", flavor="small")
rm.allocate(c5) # calculate allocation
- self.assertAlmostEqual(float(c5.resources['cpu_quota']) / c5.resources['cpu_period'], E_CPU / MAX_CU * (float(3) / 5), places=5)
- self.assertAlmostEqual(float(c5.resources['mem_limit']/1024/1024), float(E_MEM) / MAX_MU * 128)
+ self.assertAlmostEqual(float(
+ c5.resources['cpu_quota']) / c5.resources['cpu_period'], E_CPU / MAX_CU * (float(3) / 5), places=5)
+ self.assertAlmostEqual(
+ float(c5.resources['mem_limit'] / 1024 / 1024), float(E_MEM) / MAX_MU * 128)
self.assertAlmostEqual(rm.cpu_op_factor, 0.6)
E_MEM = 512
MAX_MU = 2048
# create dummy resource model environment
- reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
+ reg = ResourceModelRegistrar(
+ dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM)
rm = UpbDummyRM(max_cu=MAX_CU, max_mu=MAX_MU)
reg.register("test_dc", rm)
c2 = createDummyContainerObject("c2", flavor="small")
rm.allocate(c2) # calculate allocation
self.assertEqual(len(rm._allocated_compute_instances), 2)
-
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
-"""
-Test suite to automatically test emulator REST API endpoints.
-"""
-
-import time
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import unittest
from emuvim.test.api_base import SimpleTestTopology
import subprocess
print('->>>>>>> vim-emu compute start -d datacenter0 -n vnf1 ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- subprocess.call("vim-emu compute start -d datacenter0 -n vnf1", shell=True)
+ subprocess.call(
+ "vim-emu compute start -d datacenter0 -n vnf1", shell=True)
print('->>>>>>> vim-emu compute start -d datacenter0 -n vnf2 ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- subprocess.call("vim-emu compute start -d datacenter0 -n vnf2", shell=True)
+ subprocess.call(
+ "vim-emu compute start -d datacenter0 -n vnf2", shell=True)
print('->>>>>>> vim-emu compute start -d datacenter0 -n vnf3 ->>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- subprocess.call("vim-emu compute start -d datacenter1 -n vnf3", shell=True)
+ subprocess.call(
+ "vim-emu compute start -d datacenter1 -n vnf3", shell=True)
subprocess.call("vim-emu compute list", shell=True)
print('->>>>>>> checking running nodes, compute list, and connectivity >>>>>>>>>>')
# check compute list result
self.assertTrue(len(self.dc[0].listCompute()) == 2)
self.assertTrue(len(self.dc[1].listCompute()) == 1)
- self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
- self.assertTrue(isinstance(self.dc[0].listCompute()[1], EmulatorCompute))
- self.assertTrue(isinstance(self.dc[1].listCompute()[0], EmulatorCompute))
+ self.assertTrue(isinstance(
+ self.dc[0].listCompute()[0], EmulatorCompute))
+ self.assertTrue(isinstance(
+ self.dc[0].listCompute()[1], EmulatorCompute))
+ self.assertTrue(isinstance(
+ self.dc[1].listCompute()[0], EmulatorCompute))
self.assertTrue(self.dc[0].listCompute()[1].name == "vnf1")
self.assertTrue(self.dc[0].listCompute()[0].name == "vnf2")
self.assertTrue(self.dc[1].listCompute()[0].name == "vnf3")
# check connectivity by using ping
- self.assertTrue(self.net.ping([self.dc[0].listCompute()[1], self.dc[0].listCompute()[0]]) <= 0.0)
- self.assertTrue(self.net.ping([self.dc[0].listCompute()[0], self.dc[1].listCompute()[0]]) <= 0.0)
- self.assertTrue(self.net.ping([self.dc[1].listCompute()[0], self.dc[0].listCompute()[1]]) <= 0.0)
+ self.assertTrue(self.net.ping(
+ [self.dc[0].listCompute()[1], self.dc[0].listCompute()[0]]) <= 0.0)
+ self.assertTrue(self.net.ping(
+ [self.dc[0].listCompute()[0], self.dc[1].listCompute()[0]]) <= 0.0)
+ self.assertTrue(self.net.ping(
+ [self.dc[1].listCompute()[0], self.dc[0].listCompute()[1]]) <= 0.0)
print('network add vnf1 vnf2->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- output = subprocess.check_output("vim-emu network add -src vnf1 -dst vnf2 -b -c 10", shell=True)
+ output = subprocess.check_output(
+ "vim-emu network add -src vnf1 -dst vnf2 -b -c 10", shell=True)
self.assertTrue("add-flow" in output)
self.assertTrue("success" in output)
print('network remove vnf1 vnf2->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- output = subprocess.check_output("vim-emu network remove -src vnf1 -dst vnf2 -b", shell=True)
+ output = subprocess.check_output(
+ "vim-emu network remove -src vnf1 -dst vnf2 -b", shell=True)
self.assertTrue("del-flows" in output)
self.assertTrue("success" in output)
print('>>>>> checking --> vim-emu compute stop -d datacenter0 -n vnf2 ->>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- output = subprocess.check_output("vim-emu compute stop -d datacenter0 -n vnf2", shell=True)
+ output = subprocess.check_output(
+ "vim-emu compute stop -d datacenter0 -n vnf2", shell=True)
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 2)
print('>>>>> checking --> vim-emu compute status -d datacenter0 -n vnf1 ->>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- output = subprocess.check_output("vim-emu compute status -d datacenter0 -n vnf1", shell=True)
+ output = subprocess.check_output(
+ "vim-emu compute status -d datacenter0 -n vnf1", shell=True)
output = ast.literal_eval(output)
# check compute status result
print('->>>>> checking --> vim-emu datacenter status -d datacenter0 ->>>>>>>>')
print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')
- output = subprocess.check_output("vim-emu datacenter status -d datacenter0", shell=True)
+ output = subprocess.check_output(
+ "vim-emu datacenter status -d datacenter0", shell=True)
# check datacenter status result
self.assertTrue("datacenter0" in output)
self.stopApi()
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import time
import requests
import json
-import os
import unittest
from emuvim.test.base import SimpleTestTopology
from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
from emuvim.api.sonata.dummygatekeeper import initialize_GK, parse_interface
-import mininet.clean
from ipaddress import ip_network
PACKAGE_PATH = "misc/sonata-demo-service.son"
class testSonataDummyGatekeeper(SimpleTestTopology):
-# @unittest.skip("disabled")
+ # @unittest.skip("disabled")
def test_GK_Api_start_service(self):
# create network
- self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0, enable_learning=True)
+ self.createNet(nswitches=0, ndatacenter=2, nhosts=2,
+ ndockers=0, enable_learning=True)
# setup links
self.net.addLink(self.dc[0], self.h[0])
self.net.addLink(self.dc[0], self.dc[1])
# instantiate service
self.service_uuid = json.loads(r.text).get("service_uuid")
- r2 = requests.post("http://127.0.0.1:55000/instantiations", data=json.dumps({"service_uuid": self.service_uuid}))
+ r2 = requests.post("http://127.0.0.1:55000/instantiations",
+ data=json.dumps({"service_uuid": self.service_uuid}))
self.assertEqual(r2.status_code, 201)
# give the emulator some time to instantiate everything
r3 = requests.get("http://127.0.0.1:55000/packages")
self.assertEqual(len(json.loads(r3.text).get("service_uuid_list")), 1)
r4 = requests.get("http://127.0.0.1:55000/instantiations")
- self.assertEqual(len(json.loads(r4.text).get("service_instantiations_list")), 1)
+ self.assertEqual(len(json.loads(r4.text).get(
+ "service_instantiations_list")), 1)
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 3)
# check compute list result
self.assertEqual(len(self.dc[0].listCompute()), 2)
# check connectivity by using ping
- ELAN_list=[]
+ ELAN_list = []
# check E-Line connection, by checking the IP addresses
for link in self.net.deployed_elines:
- vnf_src, intf_src, vnf_sap_docker_name = parse_interface(link['connection_points_reference'][0])
+ vnf_src, intf_src, vnf_sap_docker_name = parse_interface(
+ link['connection_points_reference'][0])
print vnf_src, intf_src
src = self.net.getNodeByName(vnf_src)
if not src:
continue
network_list = src.getNetworkStatus()
- src_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == intf_src][0]
- src_mask = [intf['netmask'] for intf in network_list if intf['intf_name'] == intf_src][0]
+ src_ip = [intf['ip']
+ for intf in network_list if intf['intf_name'] == intf_src][0]
+ src_mask = [intf['netmask']
+ for intf in network_list if intf['intf_name'] == intf_src][0]
- vnf_dst, intf_dst, vnf_sap_docker_name = parse_interface(link['connection_points_reference'][1])
+ vnf_dst, intf_dst, vnf_sap_docker_name = parse_interface(
+ link['connection_points_reference'][1])
dst = self.net.getNodeByName(vnf_dst)
if not dst:
continue
network_list = dst.getNetworkStatus()
- dst_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == intf_dst][0]
- dst_mask = [intf['netmask'] for intf in network_list if intf['intf_name'] == intf_dst][0]
+ dst_ip = [intf['ip']
+ for intf in network_list if intf['intf_name'] == intf_dst][0]
+ dst_mask = [intf['netmask']
+ for intf in network_list if intf['intf_name'] == intf_dst][0]
- print "src = {0}:{1} ip={2} ".format(vnf_src, intf_src, src_ip, src_mask)
- print "dst = {0}:{1} ip={2} ".format(vnf_dst, intf_dst, dst_ip, dst_mask)
+ print "src = {0}:{1} ip={2} ".format(
+ vnf_src, intf_src, src_ip, src_mask)
+ print "dst = {0}:{1} ip={2} ".format(
+ vnf_dst, intf_dst, dst_ip, dst_mask)
# check if the E-Line IP's are in the same subnet
ret = ip_network(u'{0}'.format(src_ip, src_mask), strict=False)\
- .compare_networks(ip_network(u'{0}'.format(dst_ip, dst_mask),strict=False))
+ .compare_networks(ip_network(u'{0}'.format(dst_ip, dst_mask), strict=False))
self.assertTrue(ret == 0)
-
for vnf in self.dc[0].listCompute():
# check E LAN connection
network_list = vnf.getNetworkStatus()
- mgmt_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == 'mgmt']
+ mgmt_ip = [intf['ip']
+ for intf in network_list if intf['intf_name'] == 'mgmt']
self.assertTrue(len(mgmt_ip) > 0)
ip_address = mgmt_ip[0]
ELAN_list.append(ip_address)
print ip_address
- # check ELAN connection by ping over the mgmt network (needs to be configured as ELAN in the test service)
+ # check ELAN connection by ping over the mgmt network (needs to be
+ # configured as ELAN in the test service)
for vnf in self.dc[0].listCompute():
network_list = vnf.getNetworkStatus()
- mgmt_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == 'mgmt']
+ mgmt_ip = [intf['ip']
+ for intf in network_list if intf['intf_name'] == 'mgmt']
self.assertTrue(len(mgmt_ip) > 0)
ip_address = mgmt_ip[0]
print ELAN_list
test_ip_list.remove(ip_address)
for ip in test_ip_list:
# only take ip address, without netmask
- p = self.net.ping([vnf],manualdestip=ip.split('/')[0])
+ p = self.net.ping([vnf], manualdestip=ip.split('/')[0])
print p
self.assertTrue(p <= 0.0)
self.stopNet()
initialize_GK()
- #@unittest.skip("disabled")
+ # @unittest.skip("disabled")
def test_GK_Api_stop_service(self):
# create network
self.createNet(ndatacenter=2, nhosts=2)
# instantiate service
self.service_uuid = json.loads(r.text).get("service_uuid")
- r2 = requests.post("http://127.0.0.1:55001/instantiations", data=json.dumps({"service_uuid": self.service_uuid}))
+ r2 = requests.post("http://127.0.0.1:55001/instantiations",
+ data=json.dumps({"service_uuid": self.service_uuid}))
self.assertEqual(r2.status_code, 201)
# give the emulator some time to instantiate everything
r3 = requests.get("http://127.0.0.1:55001/packages")
self.assertEqual(len(json.loads(r3.text).get("service_uuid_list")), 1)
r4 = requests.get("http://127.0.0.1:55001/instantiations")
- self.assertEqual(len(json.loads(r4.text).get("service_instantiations_list")), 1)
+ self.assertEqual(len(json.loads(r4.text).get(
+ "service_instantiations_list")), 1)
# check number of running nodes
self.assertTrue(len(self.getContainernetContainers()) == 3)
self.assertEqual(len(self.dc[0].listCompute()), 2)
# stop the service
- service_instance_uuid = json.loads(r2.text).get("service_instance_uuid")
+ service_instance_uuid = json.loads(
+ r2.text).get("service_instance_uuid")
self.assertTrue(service_instance_uuid is not None)
- requests.delete("http://127.0.0.1:55001/instantiations", data=json.dumps({"service_uuid": self.service_uuid, "service_instance_uuid":service_instance_uuid}))
+ requests.delete("http://127.0.0.1:55001/instantiations", data=json.dumps(
+ {"service_uuid": self.service_uuid, "service_instance_uuid": service_instance_uuid}))
r5 = requests.get("http://127.0.0.1:55001/instantiations")
- self.assertTrue(len(json.loads(r5.text).get("service_instantiations_list")), 0) # note that there was 1 instance before
+ # note that there was 1 instance before
+ self.assertTrue(len(json.loads(r5.text).get(
+ "service_instantiations_list")), 0)
# stop Mininet network
self.stopNet()
initialize_GK()
-
@unittest.skip("disabled")
def test_GK_stress_service(self):
# create network
# instantiate service
self.service_uuid = json.loads(r.text).get("service_uuid")
- r2 = requests.post("http://127.0.0.1:55002/instantiations", data=json.dumps({"service_uuid": self.service_uuid}))
+ r2 = requests.post("http://127.0.0.1:55002/instantiations",
+ data=json.dumps({"service_uuid": self.service_uuid}))
self.assertEqual(r2.status_code, 201)
# give the emulator some time to instantiate everything
r3 = requests.get("http://127.0.0.1:55002/packages")
self.assertEqual(len(json.loads(r3.text).get("service_uuid_list")), 1)
r4 = requests.get("http://127.0.0.1:55002/instantiations")
- self.assertEqual(len(json.loads(r4.text).get("service_instantiations_list")), 1)
+ self.assertEqual(len(json.loads(r4.text).get(
+ "service_instantiations_list")), 1)
# stop the service
- service_instance_uuid = json.loads(r2.text).get("service_instance_uuid")
+ service_instance_uuid = json.loads(
+ r2.text).get("service_instance_uuid")
self.assertTrue(service_instance_uuid is not None)
- requests.delete("http://127.0.0.1:55002/instantiations", data=json.dumps({"service_uuid": self.service_uuid, "service_instance_uuid":service_instance_uuid}))
+ requests.delete("http://127.0.0.1:55002/instantiations", data=json.dumps(
+ {"service_uuid": self.service_uuid, "service_instance_uuid": service_instance_uuid}))
r5 = requests.get("http://127.0.0.1:55002/instantiations")
- self.assertTrue(len(json.loads(r5.text).get("service_instantiations_list")), 0) # note that there was 1 instance before
+ # note that there was 1 instance before
+ self.assertTrue(len(json.loads(r5.text).get(
+ "service_instantiations_list")), 0)
# stop Mininet network
self.stopNet()
initialize_GK()
-
-