Refactoring: Made complete codebase PEP8 compatible.
Only PEP8 rule E501 (line too long) is not yet reflected
by this change.
The patch also adds automated code style checks to the
CI test stage using flake8.
It will let the tests fail if there is a code style
violation.
Change-Id: I90956dd424a46691546ef720351757d3c43451a7
Signed-off-by: peusterm <manuel.peuster@uni-paderborn.de>
diff --git a/src/emuvim/dcemulator/__init__.py b/src/emuvim/dcemulator/__init__.py
index 395c0ce..d888119 100755
--- a/src/emuvim/dcemulator/__init__.py
+++ b/src/emuvim/dcemulator/__init__.py
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
diff --git a/src/emuvim/dcemulator/link.py b/src/emuvim/dcemulator/link.py
index 395c0ce..d888119 100755
--- a/src/emuvim/dcemulator/link.py
+++ b/src/emuvim/dcemulator/link.py
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
diff --git a/src/emuvim/dcemulator/monitoring.py b/src/emuvim/dcemulator/monitoring.py
index 21985cb..6ed87cb 100755
--- a/src/emuvim/dcemulator/monitoring.py
+++ b/src/emuvim/dcemulator/monitoring.py
@@ -1,38 +1,34 @@
-"""
-Copyright (c) 2015 SONATA-NFV
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
-import sys
-from mininet.node import OVSSwitch
+from mininet.node import OVSSwitch
import ast
import time
-from prometheus_client import start_http_server, Summary, Histogram, Gauge, Counter, REGISTRY, CollectorRegistry, \
- pushadd_to_gateway, push_to_gateway, delete_from_gateway
+from prometheus_client import Gauge, CollectorRegistry, \
+ pushadd_to_gateway, delete_from_gateway
import threading
from subprocess import Popen
import os
@@ -52,6 +48,7 @@
COOKIE_MASK = 0xffffffff
+
class DCNetworkMonitor():
def __init__(self, net):
self.net = net
@@ -71,8 +68,8 @@
self.prom_rx_byte_count = Gauge('sonemu_rx_count_bytes', 'Total number of bytes received',
['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
- self.prom_metrics={'tx_packets':self.prom_tx_packet_count, 'rx_packets':self.prom_rx_packet_count,
- 'tx_bytes':self.prom_tx_byte_count,'rx_bytes':self.prom_rx_byte_count}
+ self.prom_metrics = {'tx_packets': self.prom_tx_packet_count, 'rx_packets': self.prom_rx_packet_count,
+ 'tx_bytes': self.prom_tx_byte_count, 'rx_bytes': self.prom_rx_byte_count}
# list of installed metrics to monitor
# each entry can contain this data
@@ -98,17 +95,20 @@
self.monitor_thread = threading.Thread(target=self.get_network_metrics)
self.monitor_thread.start()
- self.monitor_flow_thread = threading.Thread(target=self.get_flow_metrics)
+ self.monitor_flow_thread = threading.Thread(
+ target=self.get_flow_metrics)
self.monitor_flow_thread.start()
# helper tools
- # cAdvisor, Prometheus pushgateway are started as external container, to gather monitoring metric in son-emu
+ # cAdvisor, Prometheus pushgateway are started as external container,
+ # to gather monitoring metric in son-emu
self.pushgateway_process = self.start_PushGateway()
self.cadvisor_process = self.start_cAdvisor()
-
# first set some parameters, before measurement can start
- def setup_flow(self, vnf_name, vnf_interface=None, metric='tx_packets', cookie=0):
+
+ def setup_flow(self, vnf_name, vnf_interface=None,
+ metric='tx_packets', cookie=0):
flow_metric = {}
@@ -133,8 +133,10 @@
break
if not vnf_switch:
- logging.exception("vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface))
- return "vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface)
+ logging.exception("vnf switch of {0}:{1} not found!".format(
+ vnf_name, vnf_interface))
+ return "vnf switch of {0}:{1} not found!".format(
+ vnf_name, vnf_interface)
try:
# default port direction to monitor
@@ -144,7 +146,8 @@
next_node = self.net.getNodeByName(vnf_switch)
if not isinstance(next_node, OVSSwitch):
- logging.info("vnf: {0} is not connected to switch".format(vnf_name))
+ logging.info(
+ "vnf: {0} is not connected to switch".format(vnf_name))
return
flow_metric['previous_measurement'] = 0
@@ -158,8 +161,10 @@
self.flow_metrics.append(flow_metric)
self.monitor_flow_lock.release()
- logging.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))
- return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)
+ logging.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(
+ vnf_name, vnf_interface, metric, cookie))
+ return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(
+ vnf_name, vnf_interface, metric, cookie)
except Exception as ex:
logging.exception("setup_metric error.")
@@ -187,17 +192,21 @@
labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \
set(float('nan'))
- delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')
+ delete_from_gateway(
+ self.pushgateway, job='sonemu-SDNcontroller')
self.monitor_flow_lock.release()
- logging.info('Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))
- return 'Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)
+ logging.info('Stopped monitoring flow {3}: {2} on {0}:{1}'.format(
+ vnf_name, vnf_interface, metric, cookie))
+ return 'Stopped monitoring flow {3}: {2} on {0}:{1}'.format(
+ vnf_name, vnf_interface, metric, cookie)
- return 'Error stopping monitoring flow: {0} on {1}:{2}'.format(metric, vnf_name, vnf_interface)
-
+ return 'Error stopping monitoring flow: {0} on {1}:{2}'.format(
+ metric, vnf_name, vnf_interface)
# first set some parameters, before measurement can start
+
def setup_metric(self, vnf_name, vnf_interface=None, metric='tx_packets'):
network_metric = {}
@@ -221,8 +230,10 @@
break
if 'mon_port' not in network_metric:
- logging.exception("vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface))
- return "vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface)
+ logging.exception("vnf interface {0}:{1} not found!".format(
+ vnf_name, vnf_interface))
+ return "vnf interface {0}:{1} not found!".format(
+ vnf_name, vnf_interface)
try:
# default port direction to monitor
@@ -242,13 +253,13 @@
next_node = self.net.getNodeByName(vnf_switch)
if not isinstance(next_node, OVSSwitch):
- logging.info("vnf: {0} is not connected to switch".format(vnf_name))
+ logging.info(
+ "vnf: {0} is not connected to switch".format(vnf_name))
return
network_metric['previous_measurement'] = 0
network_metric['previous_monitor_time'] = 0
-
network_metric['switch_dpid'] = int(str(next_node.dpid), 16)
network_metric['metric_key'] = metric
@@ -256,9 +267,10 @@
self.network_metrics.append(network_metric)
self.monitor_lock.release()
-
- logging.info('Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))
- return 'Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)
+ logging.info('Started monitoring: {2} on {0}:{1}'.format(
+ vnf_name, vnf_interface, metric))
+ return 'Started monitoring: {2} on {0}:{1}'.format(
+ vnf_name, vnf_interface, metric)
except Exception as ex:
logging.exception("setup_metric error.")
@@ -282,7 +294,7 @@
self.network_metrics.remove(metric_dict)
# set values to NaN, prometheus api currently does not support removal of metrics
- #self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))
+ # self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))
self.prom_metrics[metric_dict['metric_key']]. \
labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=None). \
set(float('nan'))
@@ -291,21 +303,28 @@
# 1 single monitor job for all metrics of the SDN controller
# we can only remove from the pushgateway grouping keys(labels) which we have defined for the add_to_pushgateway
# we can not specify labels from the metrics to be removed
- # if we need to remove the metrics seperatelty, we need to give them a separate grouping key, and probably a diffferent registry also
- delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')
+ # if we need to remove the metrics seperatelty, we need to give
+ # them a separate grouping key, and probably a diffferent
+ # registry also
+ delete_from_gateway(
+ self.pushgateway, job='sonemu-SDNcontroller')
self.monitor_lock.release()
- logging.info('Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))
- return 'Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)
+ logging.info('Stopped monitoring: {2} on {0}:{1}'.format(
+ vnf_name, vnf_interface, metric))
+ return 'Stopped monitoring: {2} on {0}:{1}'.format(
+ vnf_name, vnf_interface, metric)
# delete everything from this vnf
elif metric_dict['vnf_name'] == vnf_name and vnf_interface is None and metric is None:
self.monitor_lock.acquire()
self.network_metrics.remove(metric_dict)
- logging.info('remove metric from monitor: vnf_name:{0} vnf_interface:{1} mon_port:{2}'.format(metric_dict['vnf_name'], metric_dict['vnf_interface'], metric_dict['mon_port']))
+ logging.info('remove metric from monitor: vnf_name:{0} vnf_interface:{1} mon_port:{2}'.format(
+ metric_dict['vnf_name'], metric_dict['vnf_interface'], metric_dict['mon_port']))
- delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')
+ delete_from_gateway(
+ self.pushgateway, job='sonemu-SDNcontroller')
self.monitor_lock.release()
continue
@@ -313,10 +332,12 @@
logging.info('Stopped monitoring vnf: {0}'.format(vnf_name))
return 'Stopped monitoring: {0}'.format(vnf_name)
else:
- return 'Error stopping monitoring metric: {0} on {1}:{2}'.format(metric, vnf_name, vnf_interface)
+ return 'Error stopping monitoring metric: {0} on {1}:{2}'.format(
+ metric, vnf_name, vnf_interface)
# get all metrics defined in the list and export it to Prometheus
+
def get_flow_metrics(self):
while self.start_monitoring:
@@ -329,13 +350,13 @@
data['cookie_mask'] = COOKIE_MASK
if 'tx' in flow_dict['metric_key']:
- data['match'] = {'in_port':flow_dict['mon_port']}
+ data['match'] = {'in_port': flow_dict['mon_port']}
elif 'rx' in flow_dict['metric_key']:
data['out_port'] = flow_dict['mon_port']
-
# query Ryu
- ret = self.net.ryu_REST('stats/flow', dpid=flow_dict['switch_dpid'], data=data)
+ ret = self.net.ryu_REST(
+ 'stats/flow', dpid=flow_dict['switch_dpid'], data=data)
if isinstance(ret, dict):
flow_stat_dict = ret
elif isinstance(ret, basestring):
@@ -347,12 +368,13 @@
self.set_flow_metric(flow_dict, flow_stat_dict)
-
try:
if len(self.flow_metrics) > 0:
- pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
- except Exception, e:
- logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))
+ pushadd_to_gateway(
+ self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
+ except Exception as e:
+ logging.warning(
+ "Pushgateway not reachable: {0} {1}".format(Exception, e))
self.monitor_flow_lock.release()
time.sleep(1)
@@ -363,7 +385,8 @@
self.monitor_lock.acquire()
# group metrics by dpid to optimize the rest api calls
- dpid_list = [metric_dict['switch_dpid'] for metric_dict in self.network_metrics]
+ dpid_list = [metric_dict['switch_dpid']
+ for metric_dict in self.network_metrics]
dpid_set = set(dpid_list)
for dpid in dpid_set:
@@ -378,28 +401,30 @@
port_stat_dict = None
metric_list = [metric_dict for metric_dict in self.network_metrics
- if int(metric_dict['switch_dpid'])==int(dpid)]
+ if int(metric_dict['switch_dpid']) == int(dpid)]
for metric_dict in metric_list:
self.set_network_metric(metric_dict, port_stat_dict)
try:
if len(self.network_metrics) > 0:
- pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
- except Exception, e:
- logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))
+ pushadd_to_gateway(
+ self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
+ except Exception as e:
+ logging.warning(
+ "Pushgateway not reachable: {0} {1}".format(Exception, e))
self.monitor_lock.release()
time.sleep(1)
- # add metric to the list to export to Prometheus, parse the Ryu port-stats reply
+ # add metric to the list to export to Prometheus, parse the Ryu port-stats
+ # reply
def set_network_metric(self, metric_dict, port_stat_dict):
# vnf tx is the datacenter switch rx and vice-versa
metric_key = self.switch_tx_rx(metric_dict['metric_key'])
switch_dpid = metric_dict['switch_dpid']
vnf_name = metric_dict['vnf_name']
vnf_interface = metric_dict['vnf_interface']
- previous_measurement = metric_dict['previous_measurement']
previous_monitor_time = metric_dict['previous_monitor_time']
mon_port = metric_dict['mon_port']
for port_stat in port_stat_dict[str(switch_dpid)]:
@@ -407,7 +432,8 @@
if port_stat['port_no'] == 'LOCAL':
continue
if int(port_stat['port_no']) == int(mon_port):
- port_uptime = port_stat['duration_sec'] + port_stat['duration_nsec'] * 10 ** (-9)
+ port_uptime = port_stat['duration_sec'] + \
+ port_stat['duration_nsec'] * 10 ** (-9)
this_measurement = int(port_stat[metric_key])
# set prometheus metric
@@ -418,27 +444,26 @@
# also the rate is calculated here, but not used for now
# (rate can be easily queried from prometheus also)
if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:
- metric_dict['previous_measurement'] = int(port_stat[metric_key])
+ metric_dict['previous_measurement'] = int(
+ port_stat[metric_key])
metric_dict['previous_monitor_time'] = port_uptime
# do first measurement
- #time.sleep(1)
- #self.monitor_lock.release()
+ # time.sleep(1)
+ # self.monitor_lock.release()
# rate cannot be calculated yet (need a first measurement)
- metric_rate = None
-
- else:
- time_delta = (port_uptime - metric_dict['previous_monitor_time'])
- #metric_rate = (this_measurement - metric_dict['previous_measurement']) / float(time_delta)
-
metric_dict['previous_measurement'] = this_measurement
metric_dict['previous_monitor_time'] = port_uptime
return
- logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))
- logging.exception('monport:{0}, dpid:{1}'.format(mon_port, switch_dpid))
- logging.exception('monitored network_metrics:{0}'.format(self.network_metrics))
+ logging.exception('metric {0} not found on {1}:{2}'.format(
+ metric_key, vnf_name, vnf_interface))
+ logging.exception(
+ 'monport:{0}, dpid:{1}'.format(mon_port, switch_dpid))
+ logging.exception(
+ 'monitored network_metrics:{0}'.format(self.network_metrics))
logging.exception('port dict:{0}'.format(port_stat_dict))
- return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)
+ return 'metric {0} not found on {1}:{2}'.format(
+ metric_key, vnf_name, vnf_interface)
def set_flow_metric(self, metric_dict, flow_stat_dict):
# vnf tx is the datacenter switch rx and vice-versa
@@ -446,8 +471,6 @@
switch_dpid = metric_dict['switch_dpid']
vnf_name = metric_dict['vnf_name']
vnf_interface = metric_dict['vnf_interface']
- previous_measurement = metric_dict['previous_measurement']
- previous_monitor_time = metric_dict['previous_monitor_time']
cookie = metric_dict['cookie']
counter = 0
@@ -458,21 +481,24 @@
counter += flow_stat['packet_count']
# flow_uptime disabled for now (can give error)
- #flow_stat = flow_stat_dict[str(switch_dpid)][0]
- #flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)
+ # flow_stat = flow_stat_dict[str(switch_dpid)][0]
+ # flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)
self.prom_metrics[metric_dict['metric_key']]. \
labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \
set(counter)
def start_Prometheus(self, port=9090):
- # prometheus.yml configuration file is located in the same directory as this file
+ # prometheus.yml configuration file is located in the same directory as
+ # this file
cmd = ["docker",
"run",
"--rm",
"-p", "{0}:9090".format(port),
- "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(os.path.dirname(os.path.abspath(__file__))),
- "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(os.path.dirname(os.path.abspath(__file__))),
+ "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(
+ os.path.dirname(os.path.abspath(__file__))),
+ "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(
+ os.path.dirname(os.path.abspath(__file__))),
"--name", "prometheus",
"prom/prometheus"
]
@@ -502,12 +528,12 @@
"--volume=/var/lib/docker/:/var/lib/docker:ro",
"--publish={0}:8080".format(port),
"--name=cadvisor",
- "--label",'com.containernet=""',
+ "--label", 'com.containernet=""',
"--detach=true",
"google/cadvisor:latest",
- #"--storage_duration=1m0s",
- #"--allow_dynamic_housekeeping=true",
- #"--housekeeping_interval=1s",
+ # "--storage_duration=1m0s",
+ # "--allow_dynamic_housekeeping=true",
+ # "--housekeeping_interval=1s",
]
logging.info('Start cAdvisor container {0}'.format(cmd))
return Popen(cmd)
@@ -518,7 +544,8 @@
self.monitor_thread.join()
self.monitor_flow_thread.join()
- # these containers are used for monitoring but are started now outside of son-emu
+ # these containers are used for monitoring but are started now outside
+ # of son-emu
if self.pushgateway_process is not None:
logging.info('stopping pushgateway container')
@@ -528,28 +555,28 @@
logging.info('stopping cadvisor container')
self._stop_container('cadvisor')
- def switch_tx_rx(self,metric=''):
+ def switch_tx_rx(self, metric=''):
# when monitoring vnfs, the tx of the datacenter switch is actually the rx of the vnf
- # so we need to change the metric name to be consistent with the vnf rx or tx
+ # so we need to change the metric name to be consistent with the vnf rx
+ # or tx
if 'tx' in metric:
- metric = metric.replace('tx','rx')
+ metric = metric.replace('tx', 'rx')
elif 'rx' in metric:
- metric = metric.replace('rx','tx')
+ metric = metric.replace('rx', 'tx')
return metric
def _stop_container(self, name):
- #container = self.dockercli.containers.get(name)
- #container.stop()
- #container.remove(force=True)
+ # container = self.dockercli.containers.get(name)
+ # container.stop()
+ # container.remove(force=True)
# the only robust way to stop these containers is via Popen, it seems
time.sleep(1)
cmd = ['docker', 'rm', '-f', name]
Popen(cmd)
-
def update_skewmon(self, vnf_name, resource_name, action):
ret = ''
@@ -558,11 +585,11 @@
configfile = open(config_file_path, 'a+')
try:
config = json.load(configfile)
- except:
- #not a valid json file or empty
+ except BaseException:
+ # not a valid json file or empty
config = {}
- #initialize config file
+ # initialize config file
if len(self.skewmon_metrics) == 0:
config = {}
json.dump(config, configfile)
@@ -576,14 +603,16 @@
if action == 'start':
# add a new vnf to monitor
config[key] = dict(VNF_NAME=vnf_name,
- VNF_ID=vnf_id,
- VNF_METRIC=resource_name)
- ret = 'adding to skewness monitor: {0} {1} '.format(vnf_name, resource_name)
+ VNF_ID=vnf_id,
+ VNF_METRIC=resource_name)
+ ret = 'adding to skewness monitor: {0} {1} '.format(
+ vnf_name, resource_name)
logging.info(ret)
elif action == 'stop':
# remove vnf to monitor
config.pop(key)
- ret = 'removing from skewness monitor: {0} {1} '.format(vnf_name, resource_name)
+ ret = 'removing from skewness monitor: {0} {1} '.format(
+ vnf_name, resource_name)
logging.info(ret)
self.skewmon_metrics = config
@@ -604,8 +633,8 @@
# start container if not running
ret += 'starting skewness monitor'
logging.info('starting skewness monitor')
- volumes = {'/sys/fs/cgroup':{'bind':'/sys/fs/cgroup', 'mode':'ro'},
- '/tmp/skewmon.cfg':{'bind':'/config.txt', 'mode':'ro'}}
+ volumes = {'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'},
+ '/tmp/skewmon.cfg': {'bind': '/config.txt', 'mode': 'ro'}}
self.dockercli.containers.run('skewmon',
detach=True,
volumes=volumes,
@@ -616,7 +645,8 @@
started = False
wait_time = 0
while not started:
- list1 = self.dockercli.containers.list(filters={'status': 'running', 'name': 'prometheus'})
+ list1 = self.dockercli.containers.list(
+ filters={'status': 'running', 'name': 'prometheus'})
if len(list1) >= 1:
time.sleep(1)
started = True
@@ -634,7 +664,6 @@
:return:
"""
-
if vnf_list is None:
vnf_list = []
if not isinstance(vnf_list, list):
@@ -644,8 +673,8 @@
return self.start_xterm(vnf_list)
-
# start an xterm for the specfified vnfs
+
def start_xterm(self, vnf_names):
# start xterm for all vnfs
for vnf_name in vnf_names:
@@ -660,13 +689,3 @@
if len(vnf_names) == 0:
ret = 'vnf list is empty, no xterms started'
return ret
-
-
-
-
-
-
-
-
-
-
diff --git a/src/emuvim/dcemulator/net.py b/src/emuvim/dcemulator/net.py
index ea9fd1c..005e272 100755
--- a/src/emuvim/dcemulator/net.py
+++ b/src/emuvim/dcemulator/net.py
@@ -1,30 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
import site
@@ -36,13 +34,13 @@
import json
from mininet.net import Containernet
-from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
+from mininet.node import OVSSwitch, OVSKernelSwitch, Docker, RemoteController
from mininet.cli import CLI
from mininet.link import TCLink
from mininet.clean import cleanup
import networkx as nx
from emuvim.dcemulator.monitoring import DCNetworkMonitor
-from emuvim.dcemulator.node import Datacenter, EmulatorCompute, EmulatorExtSAP
+from emuvim.dcemulator.node import Datacenter, EmulatorCompute
from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
LOG = logging.getLogger("dcemulator.net")
@@ -56,6 +54,7 @@
# default cookie number for new flow-rules
DEFAULT_COOKIE = 10
+
class DCNetwork(Containernet):
"""
Wraps the original Mininet/Containernet class and provides
@@ -65,7 +64,10 @@
"""
def __init__(self, controller=RemoteController, monitor=False,
- enable_learning=False, # learning switch behavior of the default ovs switches icw Ryu controller can be turned off/on, needed for E-LAN functionality
+ enable_learning=False,
+ # learning switch behavior of the default ovs switches icw Ryu
+ # controller can be turned off/on, needed for E-LAN
+ # functionality
dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
dc_emulation_max_mem=512, # emulation max mem in MB
**kwargs):
@@ -78,13 +80,13 @@
# members
self.dcs = {}
self.ryu_process = None
- #list of deployed nsds.E_Lines and E_LANs (uploaded from the dummy gatekeeper)
+ # list of deployed nsds.E_Lines and E_LANs (uploaded from the dummy
+ # gatekeeper)
self.deployed_nsds = []
self.deployed_elines = []
self.deployed_elans = []
self.installed_chains = []
-
# always cleanup environment before we start the emulator
self.killRyu()
cleanup()
@@ -95,7 +97,7 @@
# default switch configuration
enable_ryu_learning = False
- if enable_learning :
+ if enable_learning:
self.failMode = 'standalone'
enable_ryu_learning = True
else:
@@ -138,7 +140,8 @@
"""
if label in self.dcs:
raise Exception("Data center label already exists: %s" % label)
- dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
+ dc = Datacenter(label, metadata=metadata,
+ resource_log_path=resource_log_path)
dc.net = self # set reference to network
self.dcs[label] = dc
dc.create() # finally create the data center in our Mininet instance
@@ -154,31 +157,32 @@
assert node2 is not None
# ensure type of node1
- if isinstance( node1, basestring ):
+ if isinstance(node1, basestring):
if node1 in self.dcs:
node1 = self.dcs[node1].switch
- if isinstance( node1, Datacenter ):
+ if isinstance(node1, Datacenter):
node1 = node1.switch
# ensure type of node2
- if isinstance( node2, basestring ):
+ if isinstance(node2, basestring):
if node2 in self.dcs:
node2 = self.dcs[node2].switch
- if isinstance( node2, Datacenter ):
+ if isinstance(node2, Datacenter):
node2 = node2.switch
# try to give containers a default IP
- if isinstance( node1, Docker ):
+ if isinstance(node1, Docker):
if "params1" not in params:
params["params1"] = {}
if "ip" not in params["params1"]:
params["params1"]["ip"] = self.getNextIp()
- if isinstance( node2, Docker ):
+ if isinstance(node2, Docker):
if "params2" not in params:
params["params2"] = {}
if "ip" not in params["params2"]:
params["params2"]["ip"] = self.getNextIp()
# ensure that we allow TCLinks between data centers
# TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
- # see Containernet issue: https://github.com/mpeuster/containernet/issues/3
+ # see Containernet issue:
+ # https://github.com/mpeuster/containernet/issues/3
if "cls" not in params:
params["cls"] = TCLink
@@ -197,7 +201,6 @@
node2_port_id = params["params2"]["id"]
node2_port_name = link.intf2.name
-
# add edge and assigned port number to graph in both directions between node1 and node2
# port_id: id given in descriptor (if available, otherwise same as port)
# port: portnumber assigned by Containernet
@@ -215,23 +218,24 @@
attr_number = None
attr_dict[attr] = attr_number
-
attr_dict2 = {'src_port_id': node1_port_id, 'src_port_nr': node1.ports[link.intf1],
'src_port_name': node1_port_name,
- 'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
+ 'dst_port_id': node2_port_id, 'dst_port_nr': node2.ports[link.intf2],
'dst_port_name': node2_port_name}
attr_dict2.update(attr_dict)
- self.DCNetwork_graph.add_edge(node1.name, node2.name, attr_dict=attr_dict2)
+ self.DCNetwork_graph.add_edge(
+ node1.name, node2.name, attr_dict=attr_dict2)
attr_dict2 = {'src_port_id': node2_port_id, 'src_port_nr': node2.ports[link.intf2],
'src_port_name': node2_port_name,
- 'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
+ 'dst_port_id': node1_port_id, 'dst_port_nr': node1.ports[link.intf1],
'dst_port_name': node1_port_name}
attr_dict2.update(attr_dict)
- self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)
+ self.DCNetwork_graph.add_edge(
+ node2.name, node1.name, attr_dict=attr_dict2)
LOG.debug("addLink: n1={0} intf1={1} -- n2={2} intf2={3}".format(
- str(node1),node1_port_name, str(node2), node2_port_name))
+ str(node1), node1_port_name, str(node2), node2_port_name))
return link
@@ -248,21 +252,24 @@
# TODO we might decrease the loglevel to debug:
try:
self.DCNetwork_graph.remove_edge(node2.name, node1.name)
- except:
- LOG.warning("%s, %s not found in DCNetwork_graph." % ((node2.name, node1.name)))
+ except BaseException:
+ LOG.warning("%s, %s not found in DCNetwork_graph." %
+ ((node2.name, node1.name)))
try:
self.DCNetwork_graph.remove_edge(node1.name, node2.name)
- except:
- LOG.warning("%s, %s not found in DCNetwork_graph." % ((node1.name, node2.name)))
+ except BaseException:
+ LOG.warning("%s, %s not found in DCNetwork_graph." %
+ ((node1.name, node2.name)))
- def addDocker( self, label, **params ):
+ def addDocker(self, label, **params):
"""
Wrapper for addDocker method to use custom container class.
"""
self.DCNetwork_graph.add_node(label, type=params.get('type', 'docker'))
- return Containernet.addDocker(self, label, cls=EmulatorCompute, **params)
+ return Containernet.addDocker(
+ self, label, cls=EmulatorCompute, **params)
- def removeDocker( self, label, **params):
+ def removeDocker(self, label, **params):
"""
Wrapper for removeDocker method to update graph.
"""
@@ -274,7 +281,7 @@
Wrapper for addExtSAP method to store SAP also in graph.
"""
# make sure that 'type' is set
- params['type'] = params.get('type','sap_ext')
+ params['type'] = params.get('type', 'sap_ext')
self.DCNetwork_graph.add_node(sap_name, type=params['type'])
return Containernet.addExtSAP(self, sap_name, sap_ip, **params)
@@ -285,22 +292,24 @@
self.DCNetwork_graph.remove_node(sap_name)
return Containernet.removeExtSAP(self, sap_name)
- def addSwitch( self, name, add_to_graph=True, **params ):
+ def addSwitch(self, name, add_to_graph=True, **params):
"""
Wrapper for addSwitch method to store switch also in graph.
"""
# add this switch to the global topology overview
if add_to_graph:
- self.DCNetwork_graph.add_node(name, type=params.get('type','switch'))
+ self.DCNetwork_graph.add_node(
+ name, type=params.get('type', 'switch'))
# set the learning switch behavior
- if 'failMode' in params :
+ if 'failMode' in params:
failMode = params['failMode']
- else :
+ else:
failMode = self.failMode
- s = Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)
+ s = Containernet.addSwitch(
+ self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)
return s
@@ -331,7 +340,6 @@
# stop Ryu controller
self.killRyu()
-
def CLI(self):
CLI(self)
@@ -343,7 +351,6 @@
:return:
"""
src_sw = None
- src_sw_inport_nr = 0
src_sw_inport_name = None
# get a vlan tag for this E-LAN
@@ -364,20 +371,20 @@
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == vnf_src_interface or
- link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
+ link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
# found the right link and connected switch
src_sw = connected_sw
- src_sw_inport_nr = link_dict[link]['dst_port_nr']
src_sw_inport_name = link_dict[link]['dst_port_name']
break
# set the tag on the dc switch interface
- LOG.debug('set E-LAN: vnf name: {0} interface: {1} tag: {2}'.format(vnf_src_name, vnf_src_interface,vlan))
+ LOG.debug('set E-LAN: vnf name: {0} interface: {1} tag: {2}'.format(
+ vnf_src_name, vnf_src_interface, vlan))
switch_node = self.getNodeByName(src_sw)
self._set_vlan_tag(switch_node, src_sw_inport_name, vlan)
def _addMonitorFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None,
- tag=None, **kwargs):
+ tag=None, **kwargs):
"""
Add a monitoring flow entry that adds a special flowentry/counter at the begin or end of a chain.
So this monitoring flowrule exists on top of a previously defined chain rule and uses the same vlan tag/routing.
@@ -400,7 +407,7 @@
LOG.debug("call AddMonitorFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
- #check if port is specified (vnf:port)
+ # check if port is specified (vnf:port)
if vnf_src_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
@@ -443,31 +450,34 @@
try:
# returns the first found shortest path
# if all shortest paths are wanted, use: all_shortest_paths
- path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
- except:
+ path = nx.shortest_path(
+ self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
+ except BaseException:
LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
vnf_src_name, vnf_dst_name, src_sw, dst_sw))
LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
for e, v in self.DCNetwork_graph.edges():
LOG.debug("%r" % self.DCNetwork_graph[e][v])
- return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+ return "No path could be found between {0} and {1}".format(
+ vnf_src_name, vnf_dst_name)
- LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
+ LOG.info("Path between {0} and {1}: {2}".format(
+ vnf_src_name, vnf_dst_name, path))
current_hop = src_sw
switch_inport_nr = src_sw_inport_nr
cmd = kwargs.get('cmd')
- #iterate through the path to install the flow-entries
- for i in range(0,len(path)):
+ # iterate through the path to install the flow-entries
+ for i in range(0, len(path)):
current_node = self.getNodeByName(current_hop)
- if path.index(current_hop) < len(path)-1:
- next_hop = path[path.index(current_hop)+1]
+ if path.index(current_hop) < len(path) - 1:
+ next_hop = path[path.index(current_hop) + 1]
else:
- #last switch reached
+ # last switch reached
next_hop = vnf_dst_name
next_node = self.getNodeByName(next_hop)
@@ -475,7 +485,7 @@
if next_hop == vnf_dst_name:
switch_outport_nr = dst_sw_outport_nr
LOG.info("end node reached: {0}".format(vnf_dst_name))
- elif not isinstance( next_node, OVSSwitch ):
+ elif not isinstance(next_node, OVSSwitch):
LOG.info("Next node: {0} is not a switch".format(next_hop))
return "Next node: {0} is not a switch".format(next_hop)
else:
@@ -483,9 +493,8 @@
index_edge_out = 0
switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
-
- # set of entry via ovs-ofctl
- if isinstance( current_node, OVSSwitch ):
+ # set of entry via ovs-ofctl
+ if isinstance(current_node, OVSSwitch):
kwargs['vlan'] = tag
kwargs['path'] = path
kwargs['current_hop'] = current_hop
@@ -497,33 +506,38 @@
monitor_placement = kwargs.get('monitor_placement').strip()
# put monitor flow at the dst switch
insert_flow = False
- if monitor_placement == 'tx' and path.index(current_hop) == 0: # first node:
+ # first node:
+ if monitor_placement == 'tx' and path.index(current_hop) == 0:
insert_flow = True
# put monitoring flow at the src switch
- elif monitor_placement == 'rx' and path.index(current_hop) == len(path) - 1: # last node:
+ # last node:
+ elif monitor_placement == 'rx' and path.index(current_hop) == len(path) - 1:
insert_flow = True
elif monitor_placement not in ['rx', 'tx']:
- LOG.exception('invalid monitor command: {0}'.format(monitor_placement))
-
+ LOG.exception(
+ 'invalid monitor command: {0}'.format(monitor_placement))
if self.controller == RemoteController and insert_flow:
- ## set flow entry via ryu rest api
- self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+ # set flow entry via ryu rest api
+ self._set_flow_entry_ryu_rest(
+ current_node, switch_inport_nr, switch_outport_nr, **kwargs)
break
elif insert_flow:
- ## set flow entry via ovs-ofctl
- self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+ # set flow entry via ovs-ofctl
+ self._set_flow_entry_dpctl(
+ current_node, switch_inport_nr, switch_outport_nr, **kwargs)
break
# take first link between switches by default
- if isinstance( next_node, OVSSwitch ):
+ if isinstance(next_node, OVSSwitch):
switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
current_hop = next_hop
- return "path {2} between {0} and {1}".format(vnf_src_name, vnf_dst_name, cmd)
+ return "path {2} between {0} and {1}".format(
+ vnf_src_name, vnf_dst_name, cmd)
-
- def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
+ def setChain(self, vnf_src_name, vnf_dst_name,
+ vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
"""
Chain 2 vnf interfaces together by installing the flowrules in the switches along their path.
Currently the path is found using the default networkx shortest path function.
@@ -549,17 +563,19 @@
# check if chain already exists
found_chains = [chain_dict for chain_dict in self.installed_chains if
- (chain_dict['vnf_src_name'] == vnf_src_name and chain_dict['vnf_src_interface'] == vnf_src_interface
- and chain_dict['vnf_dst_name'] == vnf_dst_name and chain_dict['vnf_dst_interface'] == vnf_dst_interface)]
+ (chain_dict['vnf_src_name'] == vnf_src_name and
+ chain_dict['vnf_src_interface'] == vnf_src_interface and
+ chain_dict['vnf_dst_name'] == vnf_dst_name and
+ chain_dict['vnf_dst_interface'] == vnf_dst_interface)]
if len(found_chains) > 0:
# this chain exists, so need an extra monitoring flow
# assume only 1 chain per vnf/interface pair
LOG.debug('*** installing monitoring chain on top of pre-defined chain from {0}:{1} -> {2}:{3}'.
- format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
+ format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
tag = found_chains[0]['tag']
ret = self._addMonitorFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface,
- tag=tag, table_id=0, **kwargs)
+ tag=tag, table_id=0, **kwargs)
return ret
else:
# no chain existing (or E-LAN) -> install normal chain
@@ -567,22 +583,24 @@
format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
pass
-
cmd = kwargs.get('cmd', 'add-flow')
if cmd == 'add-flow' or cmd == 'del-flows':
- ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
+ ret = self._chainAddFlow(
+ vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
if kwargs.get('bidirectional'):
if kwargs.get('path') is not None:
kwargs['path'] = list(reversed(kwargs.get('path')))
- ret = ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
+ ret = ret + '\n' + \
+ self._chainAddFlow(
+ vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
else:
ret = "Command unknown"
return ret
-
- def _chainAddFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
+ def _chainAddFlow(self, vnf_src_name, vnf_dst_name,
+ vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
src_sw = None
src_sw_inport_nr = 0
@@ -594,7 +612,7 @@
LOG.debug("call chainAddFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
- #check if port is specified (vnf:port)
+ # check if port is specified (vnf:port)
if vnf_src_interface is None:
# take first interface by default
connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
@@ -636,17 +654,20 @@
try:
# returns the first found shortest path
# if all shortest paths are wanted, use: all_shortest_paths
- path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
- except:
+ path = nx.shortest_path(
+ self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
+ except BaseException:
LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
vnf_src_name, vnf_dst_name, src_sw, dst_sw))
LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
for e, v in self.DCNetwork_graph.edges():
LOG.debug("%r" % self.DCNetwork_graph[e][v])
- return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+ return "No path could be found between {0} and {1}".format(
+ vnf_src_name, vnf_dst_name)
- LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
+ LOG.info("Path between {0} and {1}: {2}".format(
+ vnf_src_name, vnf_dst_name, path))
current_hop = src_sw
switch_inport_nr = src_sw_inport_nr
@@ -671,8 +692,8 @@
chain_dict['tag'] = vlan
self.installed_chains.append(chain_dict)
- #iterate through the path to install the flow-entries
- for i in range(0,len(path)):
+ # iterate through the path to install the flow-entries
+ for i in range(0, len(path)):
current_node = self.getNodeByName(current_hop)
if i < len(path) - 1:
@@ -686,7 +707,7 @@
if next_hop == vnf_dst_name:
switch_outport_nr = dst_sw_outport_nr
LOG.info("end node reached: {0}".format(vnf_dst_name))
- elif not isinstance( next_node, OVSSwitch ):
+ elif not isinstance(next_node, OVSSwitch):
LOG.info("Next node: {0} is not a switch".format(next_hop))
return "Next node: {0} is not a switch".format(next_hop)
else:
@@ -694,9 +715,8 @@
index_edge_out = 0
switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
-
- # set OpenFlow entry
- if isinstance( current_node, OVSSwitch ):
+ # set OpenFlow entry
+ if isinstance(current_node, OVSSwitch):
kwargs['vlan'] = vlan
kwargs['path'] = path
kwargs['current_hop'] = current_hop
@@ -705,28 +725,32 @@
kwargs['pathindex'] = i
if self.controller == RemoteController:
- ## set flow entry via ryu rest api
- self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+ # set flow entry via ryu rest api
+ self._set_flow_entry_ryu_rest(
+ current_node, switch_inport_nr, switch_outport_nr, **kwargs)
else:
- ## set flow entry via ovs-ofctl
- self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+ # set flow entry via ovs-ofctl
+ self._set_flow_entry_dpctl(
+ current_node, switch_inport_nr, switch_outport_nr, **kwargs)
# take first link between switches by default
- if isinstance( next_node, OVSSwitch ):
+ if isinstance(next_node, OVSSwitch):
switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
current_hop = next_hop
flow_options = {
- 'priority':kwargs.get('priority', DEFAULT_PRIORITY),
- 'cookie':kwargs.get('cookie', DEFAULT_COOKIE),
- 'vlan':kwargs['vlan'],
- 'path':kwargs['path'],
- 'match_input':kwargs.get('match')
+ 'priority': kwargs.get('priority', DEFAULT_PRIORITY),
+ 'cookie': kwargs.get('cookie', DEFAULT_COOKIE),
+ 'vlan': kwargs['vlan'],
+ 'path': kwargs['path'],
+ 'match_input': kwargs.get('match')
}
flow_options_str = json.dumps(flow_options, indent=1)
- return "success: {2} between {0} and {1} with options: {3}".format(vnf_src_name, vnf_dst_name, cmd, flow_options_str)
+ return "success: {2} between {0} and {1} with options: {3}".format(
+ vnf_src_name, vnf_dst_name, cmd, flow_options_str)
- def _set_flow_entry_ryu_rest(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
+ def _set_flow_entry_ryu_rest(
+ self, node, switch_inport_nr, switch_outport_nr, **kwargs):
match = 'in_port=%s' % switch_inport_nr
cookie = kwargs.get('cookie')
@@ -764,7 +788,7 @@
# http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#add-a-flow-entry
if cmd == 'add-flow':
prefix = 'stats/flowentry/add'
- if vlan != None:
+ if vlan is not None:
if index == 0: # first node
# set vlan tag in ovs instance (to isolate E-LANs)
if not skip_vlan_tag:
@@ -773,8 +797,12 @@
# set vlan push action if more than 1 switch in the path
if len(path) > 1:
action = {}
- action['type'] = 'PUSH_VLAN' # Push a new VLAN tag if a input frame is non-VLAN-tagged
- action['ethertype'] = 33024 # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame
+ # Push a new VLAN tag if a input frame is
+ # non-VLAN-tagged
+ action['type'] = 'PUSH_VLAN'
+ # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged
+ # frame
+ action['ethertype'] = 33024
flow['actions'].append(action)
action = {}
action['type'] = 'SET_FIELD'
@@ -809,7 +837,8 @@
if cookie:
# TODO: add cookie_mask as argument
- flow['cookie_mask'] = int('0xffffffffffffffff', 16) # need full mask to match complete cookie
+ # need full mask to match complete cookie
+ flow['cookie_mask'] = int('0xffffffffffffffff', 16)
action = {}
action['type'] = 'OUTPUT'
@@ -820,10 +849,12 @@
self.ryu_REST(prefix, data=flow)
def _set_vlan_tag(self, node, switch_port, tag):
- node.vsctl('set', 'port {0} tag={1}'.format(switch_port,tag))
- LOG.debug("set vlan in switch: {0} in_port: {1} vlan tag: {2}".format(node.name, switch_port, tag))
+ node.vsctl('set', 'port {0} tag={1}'.format(switch_port, tag))
+ LOG.debug("set vlan in switch: {0} in_port: {1} vlan tag: {2}".format(
+ node.name, switch_port, tag))
- def _set_flow_entry_dpctl(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
+ def _set_flow_entry_dpctl(
+ self, node, switch_inport_nr, switch_outport_nr, **kwargs):
match = 'in_port=%s' % switch_inport_nr
@@ -842,9 +873,10 @@
match = s.join([match, match_input])
if cmd == 'add-flow':
action = 'action=%s' % switch_outport_nr
- if vlan != None:
- if index == 0: # first node
- action = ('action=mod_vlan_vid:%s' % vlan) + (',output=%s' % switch_outport_nr)
+ if vlan is not None:
+ if index == 0: # first node
+ action = ('action=mod_vlan_vid:%s' % vlan) + \
+ (',output=%s' % switch_outport_nr)
match = '-O OpenFlow13 ' + match
elif index == len(path) - 1: # last node
match += ',dl_vlan=%s' % vlan
@@ -859,15 +891,16 @@
node.dpctl(cmd, ofcmd)
LOG.info("{3} in switch: {0} in_port: {1} out_port: {2}".format(node.name, switch_inport_nr,
- switch_outport_nr, cmd))
+ switch_outport_nr, cmd))
# start Ryu Openflow controller as Remote Controller for the DCNetwork
def startRyu(self, learning_switch=True):
# start Ryu controller with rest-API
python_install_path = site.getsitepackages()[0]
# ryu default learning switch
- #ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
- #custom learning switch that installs a default NORMAL action in the ovs switches
+ # ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
+ # custom learning switch that installs a default NORMAL action in the
+ # ovs switches
dir_path = os.path.dirname(os.path.realpath(__file__))
ryu_path = dir_path + '/son_emu_simple_switch_13.py'
ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
@@ -878,12 +911,14 @@
ryu_cmd = 'ryu-manager'
FNULL = open("/tmp/ryu.log", 'w')
if learning_switch:
- self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+ self.ryu_process = Popen(
+ [ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
LOG.debug('starting ryu-controller with {0}'.format(ryu_path))
LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
else:
# no learning switch, but with rest api
- self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+ self.ryu_process = Popen(
+ [ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
time.sleep(1)
@@ -910,7 +945,6 @@
else:
req = self.RyuSession.get(url)
-
# do extra logging if status code is not 200 (OK)
if req.status_code is not requests.codes.ok:
logging.info(
@@ -918,9 +952,10 @@
req.encoding, req.text,
req.headers, req.history))
LOG.info('url: {0}'.format(str(url)))
- if data: LOG.info('POST: {0}'.format(str(data)))
- LOG.info('status: {0} reason: {1}'.format(req.status_code, req.reason))
-
+ if data:
+ LOG.info('POST: {0}'.format(str(data)))
+ LOG.info('status: {0} reason: {1}'.format(
+ req.status_code, req.reason))
if 'json' in req.headers['content-type']:
ret = req.json()
@@ -929,9 +964,9 @@
ret = req.text.rstrip()
return ret
-
# need to respect that some match fields must be integers
# http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#description-of-match-and-actions
+
def _parse_match(self, match):
matches = match.split(',')
dict = {}
@@ -940,13 +975,14 @@
if len(match) == 2:
try:
m2 = int(match[1], 0)
- except:
+ except BaseException:
m2 = match[1]
- dict.update({match[0]:m2})
+ dict.update({match[0]: m2})
return dict
- def find_connected_dc_interface(self, vnf_src_name, vnf_src_interface=None):
+ def find_connected_dc_interface(
+ self, vnf_src_name, vnf_src_interface=None):
if vnf_src_interface is None:
# take first interface by default
@@ -958,9 +994,8 @@
link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
for link in link_dict:
if (link_dict[link]['src_port_id'] == vnf_src_interface or
- link_dict[link]['src_port_name'] == vnf_src_interface): # Fix: we might also get interface names, e.g, from a son-emu-cli call
+ link_dict[link]['src_port_name'] == vnf_src_interface):
+ # Fix: we might also get interface names, e.g, from a son-emu-cli call
# found the right link and connected switch
- src_sw = connected_sw
- src_sw_inport_nr = link_dict[link]['dst_port_nr']
src_sw_inport_name = link_dict[link]['dst_port_name']
return src_sw_inport_name
diff --git a/src/emuvim/dcemulator/node.py b/src/emuvim/dcemulator/node.py
index 77a71a0..5a9c048 100755
--- a/src/emuvim/dcemulator/node.py
+++ b/src/emuvim/dcemulator/node.py
@@ -1,31 +1,29 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-from mininet.node import Docker, OVSBridge
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
+from mininet.node import Docker
from mininet.link import Link
from emuvim.dcemulator.resourcemodel import NotEnoughResourcesAvailable
import logging
@@ -38,6 +36,7 @@
DCDPID_BASE = 1000 # start of switch dpid's used for data center switches
EXTSAPDPID_BASE = 2000 # start of switch dpid's used for external SAP switches
+
class EmulatorCompute(Docker):
"""
Emulator specific compute node class.
@@ -51,7 +50,8 @@
self, name, dimage, **kwargs):
self.datacenter = kwargs.get("datacenter") # pointer to current DC
self.flavor_name = kwargs.get("flavor_name")
- LOG.debug("Starting compute instance %r in data center %r" % (name, str(self.datacenter)))
+ LOG.debug("Starting compute instance %r in data center %r" %
+ (name, str(self.datacenter)))
# call original Docker.__init__
Docker.__init__(self, name, dimage, **kwargs)
@@ -65,9 +65,11 @@
for i in self.intfList():
vnf_name = self.name
vnf_interface = str(i)
- dc_port_name = self.datacenter.net.find_connected_dc_interface(vnf_name, vnf_interface)
+ dc_port_name = self.datacenter.net.find_connected_dc_interface(
+ vnf_name, vnf_interface)
# format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
- intf_dict = {'intf_name': str(i), 'ip': "{0}/{1}".format(i.IP(), i.prefixLen), 'netmask': i.prefixLen, 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
+ intf_dict = {'intf_name': str(i), 'ip': "{0}/{1}".format(i.IP(), i.prefixLen), 'netmask': i.prefixLen,
+ 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
networkStatusList.append(intf_dict)
return networkStatusList
@@ -91,7 +93,8 @@
status["state"] = self.dcli.inspect_container(self.dc)["State"]
status["id"] = self.dcli.inspect_container(self.dc)["Id"]
status["short_id"] = self.dcli.inspect_container(self.dc)["Id"][:12]
- status["hostname"] = self.dcli.inspect_container(self.dc)["Config"]['Hostname']
+ status["hostname"] = self.dcli.inspect_container(self.dc)[
+ "Config"]['Hostname']
status["datacenter"] = (None if self.datacenter is None
else self.datacenter.label)
@@ -113,14 +116,16 @@
self.net = self.datacenter.net
self.name = sap_name
- LOG.debug("Starting ext SAP instance %r in data center %r" % (sap_name, str(self.datacenter)))
+ LOG.debug("Starting ext SAP instance %r in data center %r" %
+ (sap_name, str(self.datacenter)))
# create SAP as separate OVS switch with an assigned ip address
self.ip = str(sap_net[1]) + '/' + str(sap_net.prefixlen)
self.subnet = sap_net
# allow connection to the external internet through the host
params = dict(NAT=True)
- self.switch = self.net.addExtSAP(sap_name, self.ip, dpid=hex(self._get_next_extSAP_dpid())[2:], **params)
+ self.switch = self.net.addExtSAP(sap_name, self.ip, dpid=hex(
+ self._get_next_extSAP_dpid())[2:], **params)
self.switch.start()
def _get_next_extSAP_dpid(self):
@@ -140,9 +145,11 @@
vnf_interface = str(i)
if vnf_interface == 'lo':
continue
- dc_port_name = self.datacenter.net.find_connected_dc_interface(vnf_name, vnf_interface)
+ dc_port_name = self.datacenter.net.find_connected_dc_interface(
+ vnf_name, vnf_interface)
# format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
- intf_dict = {'intf_name': str(i), 'ip': self.ip, 'netmask': i.prefixLen, 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
+ intf_dict = {'intf_name': str(i), 'ip': self.ip, 'netmask': i.prefixLen, 'mac': i.MAC(
+ ), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
networkStatusList.append(intf_dict)
return networkStatusList
@@ -154,6 +161,7 @@
"network": self.getNetworkStatus()
}
+
class Datacenter(object):
"""
Represents a logical data center to which compute resources
@@ -174,7 +182,8 @@
self.label = label
# dict to store arbitrary metadata (e.g. latitude and longitude)
self.metadata = metadata
- # path to which resource information should be logged (e.g. for experiments). None = no logging
+ # path to which resource information should be logged (e.g. for
+ # experiments). None = no logging
self.resource_log_path = resource_log_path
# first prototype assumes one "bigswitch" per DC
self.switch = None
@@ -208,7 +217,8 @@
def start(self):
pass
- def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny", properties=dict(), **params):
+ def startCompute(self, name, image=None, command=None, network=None,
+ flavor_name="tiny", properties=dict(), **params):
"""
Create a new container as compute resource and connect it to this
data center.
@@ -230,7 +240,8 @@
if network is None:
network = {} # {"ip": "10.0.0.254/8"}
if isinstance(network, dict):
- network = [network] # if we have only one network, put it in a list
+ # if we have only one network, put it in a list
+ network = [network]
if isinstance(network, list):
if len(network) < 1:
network.append({})
@@ -250,19 +261,19 @@
dcmd=command,
datacenter=self,
flavor_name=flavor_name,
- environment = env,
+ environment=env,
**params
)
-
-
# apply resource limits to container if a resource model is defined
if self._resource_model is not None:
try:
self._resource_model.allocate(d)
- self._resource_model.write_allocation_log(d, self.resource_log_path)
+ self._resource_model.write_allocation_log(
+ d, self.resource_log_path)
except NotEnoughResourcesAvailable as ex:
- LOG.warning("Allocation of container %r was blocked by resource model." % name)
+ LOG.warning(
+ "Allocation of container %r was blocked by resource model." % name)
LOG.info(ex.message)
# ensure that we remove the container
self.net.removeDocker(name)
@@ -272,11 +283,14 @@
# if no --net option is given, network = [{}], so 1 empty dict in the list
# this results in 1 default interface with a default ip address
for nw in network:
- # clean up network configuration (e.g. RTNETLINK does not allow ':' in intf names
+ # clean up network configuration (e.g. RTNETLINK does not allow ':'
+ # in intf names
if nw.get("id") is not None:
nw["id"] = self._clean_ifname(nw["id"])
- # TODO we cannot use TCLink here (see: https://github.com/mpeuster/containernet/issues/3)
- self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id'))
+ # TODO we cannot use TCLink here (see:
+ # https://github.com/mpeuster/containernet/issues/3)
+ self.net.addLink(d, self.switch, params1=nw,
+ cls=Link, intfName1=nw.get('id'))
# do bookkeeping
self.containers[name] = d
@@ -289,7 +303,8 @@
assert name is not None
if name not in self.containers:
raise Exception("Container with name %s not found." % name)
- LOG.debug("Stopping compute instance %r in data center %r" % (name, str(self)))
+ LOG.debug("Stopping compute instance %r in data center %r" %
+ (name, str(self)))
# stop the monitored metrics
if self.net.monitor_agent is not None:
@@ -298,7 +313,8 @@
# call resource model and free resources
if self._resource_model is not None:
self._resource_model.free(self.containers[name])
- self._resource_model.write_free_log(self.containers[name], self.resource_log_path)
+ self._resource_model.write_free_log(
+ self.containers[name], self.resource_log_path)
# remove links
self.net.removeLink(
@@ -318,7 +334,7 @@
def removeExternalSAP(self, sap_name):
sap_switch = self.extSAPs[sap_name].switch
- #sap_switch = self.net.getNodeByName(sap_name)
+ # sap_switch = self.net.getNodeByName(sap_name)
# remove link of SAP to the DC switch
self.net.removeLink(link=None, node1=sap_switch, node2=self.switch)
self.net.removeExtSAP(sap_name)
@@ -350,8 +366,8 @@
"switch": self.switch.name,
"n_running_containers": len(self.containers),
"metadata": self.metadata,
- "vnf_list" : container_list,
- "ext SAP list" : ext_saplist
+ "vnf_list": container_list,
+ "ext SAP list": ext_saplist
}
def assignResourceModel(self, rm):
@@ -361,7 +377,8 @@
:return:
"""
if self._resource_model is not None:
- raise Exception("There is already an resource model assigned to this DC.")
+ raise Exception(
+ "There is already an resource model assigned to this DC.")
self._resource_model = rm
self.net.rm_registrar.register(self, rm)
LOG.info("Assigned RM: %r to DC: %r" % (rm, self))
@@ -381,4 +398,3 @@
name = name.replace(".", "-")
name = name.replace("_", "-")
return name
-
diff --git a/src/emuvim/dcemulator/resourcemodel/__init__.py b/src/emuvim/dcemulator/resourcemodel/__init__.py
index 869eb1d..de100f7 100755
--- a/src/emuvim/dcemulator/resourcemodel/__init__.py
+++ b/src/emuvim/dcemulator/resourcemodel/__init__.py
@@ -1,34 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-Base classes needed for resource models support.
-"""
-
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import logging
LOG = logging.getLogger("resourcemodel")
LOG.setLevel(logging.DEBUG)
@@ -55,7 +49,8 @@
:return: None
"""
if dc in self._resource_models:
- raise Exception("There is already an resource model assigned to this DC.")
+ raise Exception(
+ "There is already an resource model assigned to this DC.")
self._resource_models[dc] = rm
rm.registrar = self
rm.dcs.append(dc)
@@ -75,7 +70,8 @@
Total number of data centers that are connected to a resource model
:return:
"""
- return sum([len(rm.dcs) for rm in list(self._resource_models.itervalues())])
+ return sum([len(rm.dcs)
+ for rm in list(self._resource_models.itervalues())])
class ResourceFlavor(object):
@@ -83,6 +79,7 @@
Simple class that represents resource flavors (c.f. OpenStack).
Can contain arbitrary metrics.
"""
+
def __init__(self, name, metrics):
self.name = name
self._metrics = metrics
@@ -114,15 +111,15 @@
initialize some default flavours (naming/sizes inspired by OpenStack)
"""
self.addFlavour(ResourceFlavor(
- "tiny", {"compute": 0.5, "memory": 32, "disk": 1}))
+ "tiny", {"compute": 0.5, "memory": 32, "disk": 1}))
self.addFlavour(ResourceFlavor(
- "small", {"compute": 1.0, "memory": 128, "disk": 20}))
+ "small", {"compute": 1.0, "memory": 128, "disk": 20}))
self.addFlavour(ResourceFlavor(
- "medium", {"compute": 4.0, "memory": 256, "disk": 40}))
+ "medium", {"compute": 4.0, "memory": 256, "disk": 40}))
self.addFlavour(ResourceFlavor(
- "large", {"compute": 8.0, "memory": 512, "disk": 80}))
+ "large", {"compute": 8.0, "memory": 512, "disk": 80}))
self.addFlavour(ResourceFlavor(
- "xlarge", {"compute": 16.0, "memory": 1024, "disk": 160}))
+ "xlarge", {"compute": 16.0, "memory": 1024, "disk": 160}))
def addFlavour(self, fl):
"""
@@ -139,7 +136,8 @@
This method has to be overwritten by a real resource model.
:param d: Container object
"""
- LOG.warning("Allocating in BaseResourceModel: %r with flavor: %r" % (d.name, d.flavor_name))
+ LOG.warning("Allocating in BaseResourceModel: %r with flavor: %r" % (
+ d.name, d.flavor_name))
self._allocated_compute_instances[d.name] = d.flavor_name
def free(self, d):
diff --git a/src/emuvim/dcemulator/resourcemodel/upb/__init__.py b/src/emuvim/dcemulator/resourcemodel/upb/__init__.py
index 395c0ce..d888119 100755
--- a/src/emuvim/dcemulator/resourcemodel/upb/__init__.py
+++ b/src/emuvim/dcemulator/resourcemodel/upb/__init__.py
@@ -1,27 +1,25 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
diff --git a/src/emuvim/dcemulator/resourcemodel/upb/simple.py b/src/emuvim/dcemulator/resourcemodel/upb/simple.py
index b812aad..01231fd 100755
--- a/src/emuvim/dcemulator/resourcemodel/upb/simple.py
+++ b/src/emuvim/dcemulator/resourcemodel/upb/simple.py
@@ -1,33 +1,28 @@
-"""
-Copyright (c) 2015 SONATA-NFV and Paderborn University
-ALL RIGHTS RESERVED.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Neither the name of the SONATA-NFV, Paderborn University
-nor the names of its contributors may be used to endorse or promote
-products derived from this software without specific prior written
-permission.
-
-This work has been performed in the framework of the SONATA project,
-funded by the European Commission under Grant number 671517 through
-the Horizon 2020 and 5G-PPP programmes. The authors would like to
-acknowledge the contributions of their colleagues of the SONATA
-partner consortium (www.sonata-nfv.eu).
-"""
-"""
-Playground for resource models created by University of Paderborn.
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
import time
import json
import logging
@@ -93,7 +88,8 @@
fl_cu = self._get_flavor(d).get("compute")
# check for over provisioning
if self.dc_alloc_cu + fl_cu > self.dc_max_cu and self.raise_no_cpu_resources_left:
- raise NotEnoughResourcesAvailable("Not enough compute resources left.")
+ raise NotEnoughResourcesAvailable(
+ "Not enough compute resources left.")
self.dc_alloc_cu += fl_cu
def _allocate_mem(self, d):
@@ -105,7 +101,8 @@
fl_mu = self._get_flavor(d).get("memory")
# check for over provisioning
if self.dc_alloc_mu + fl_mu > self.dc_max_mu and self.raise_no_mem_resources_left:
- raise NotEnoughResourcesAvailable("Not enough memory resources left.")
+ raise NotEnoughResourcesAvailable(
+ "Not enough memory resources left.")
self.dc_alloc_mu += fl_mu
def free(self, d):
@@ -162,12 +159,14 @@
# calculate cpu time fraction for container with given flavor
cpu_time_percentage = self.single_cu * number_cu
# calculate input values for CFS scheduler bandwidth limitation
- cpu_period, cpu_quota = self._calculate_cpu_cfs_values(cpu_time_percentage)
+ cpu_period, cpu_quota = self._calculate_cpu_cfs_values(
+ cpu_time_percentage)
# apply limits to container if changed
if d.resources['cpu_period'] != cpu_period or d.resources['cpu_quota'] != cpu_quota:
LOG.debug("Setting CPU limit for %r: cpu_quota = cpu_period * limit = %f * %f = %f (op_factor=%f)" % (
d.name, cpu_period, cpu_time_percentage, cpu_quota, self.cpu_op_factor))
- d.updateCpuLimit(cpu_period=int(cpu_period), cpu_quota=int(cpu_quota))
+ d.updateCpuLimit(cpu_period=int(cpu_period),
+ cpu_quota=int(cpu_quota))
def _compute_single_cu(self):
"""
@@ -177,7 +176,8 @@
# get cpu time fraction for entire emulation
e_cpu = self.registrar.e_cpu
# calculate
- return float(e_cpu) / sum([rm.dc_max_cu for rm in list(self.registrar.resource_models)])
+ return float(
+ e_cpu) / sum([rm.dc_max_cu for rm in list(self.registrar.resource_models)])
def _calculate_cpu_cfs_values(self, cpu_time_percentage):
"""
@@ -188,8 +188,10 @@
# (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
# Attention minimum cpu_quota is 1ms (micro)
cpu_period = CPU_PERIOD # lets consider a fixed period of 1000000 microseconds for now
- cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container
- # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
+ # calculate the fraction of cpu time for this container
+ cpu_quota = cpu_period * cpu_time_percentage
+ # ATTENTION >= 1000 to avoid a invalid argument system error ... no
+ # idea why
if cpu_quota < 1000:
cpu_quota = 1000
LOG.warning("Increased CPU quota to avoid system error.")
@@ -205,14 +207,15 @@
# get memory amount for entire emulation
e_mem = self.registrar.e_mem
# calculate amount of memory for a single mu
- self.single_mu = float(e_mem) / sum([rm.dc_max_mu for rm in list(self.registrar.resource_models)])
+ self.single_mu = float(
+ e_mem) / sum([rm.dc_max_mu for rm in list(self.registrar.resource_models)])
# calculate mem for given flavor
mem_limit = self.single_mu * number_mu
mem_limit = self._calculate_mem_limit_value(mem_limit)
# apply to container if changed
if d.resources['mem_limit'] != mem_limit:
LOG.debug("Setting MEM limit for %r: mem_limit = %f MB (op_factor=%f)" %
- (d.name, mem_limit/1024/1024, self.mem_op_factor))
+ (d.name, mem_limit / 1024 / 1024, self.mem_op_factor))
d.updateMemoryLimit(mem_limit=mem_limit)
def _calculate_mem_limit_value(self, mem_limit):
@@ -226,7 +229,7 @@
mem_limit = 4
LOG.warning("Increased MEM limit because it was less than 4.0 MB.")
# to byte!
- return int(mem_limit*1024*1024)
+ return int(mem_limit * 1024 * 1024)
def get_state_dict(self):
"""
@@ -281,14 +284,14 @@
if path is None:
return
# we have a path: write out RM info
- l = dict()
- l["t"] = time.time()
- l["container_state"] = d.getStatus()
- l["action"] = action
- l["rm_state"] = self.get_state_dict()
+ logd = dict()
+ logd["t"] = time.time()
+ logd["container_state"] = d.getStatus()
+ logd["action"] = action
+ logd["rm_state"] = self.get_state_dict()
# append to logfile
with open(path, "a") as f:
- f.write("%s\n" % json.dumps(l))
+ f.write("%s\n" % json.dumps(logd))
class UpbOverprovisioningCloudDcRM(UpbSimpleCloudDcRM):
@@ -299,6 +302,7 @@
containers whenever a data-center is over provisioned.
"""
# TODO add parts for memory
+
def __init__(self, *args, **kvargs):
super(UpbOverprovisioningCloudDcRM, self).__init__(*args, **kvargs)
self.raise_no_cpu_resources_left = False
@@ -312,15 +316,18 @@
# get cpu time fraction for entire emulation
e_cpu = self.registrar.e_cpu
# calculate over provisioning scale factor
- self.cpu_op_factor = float(self.dc_max_cu) / (max(self.dc_max_cu, self.dc_alloc_cu))
+ self.cpu_op_factor = float(self.dc_max_cu) / \
+ (max(self.dc_max_cu, self.dc_alloc_cu))
# calculate
- return float(e_cpu) / sum([rm.dc_max_cu for rm in list(self.registrar.resource_models)]) * self.cpu_op_factor
+ return float(e_cpu) / sum([rm.dc_max_cu for rm in list(
+ self.registrar.resource_models)]) * self.cpu_op_factor
class UpbDummyRM(UpbSimpleCloudDcRM):
"""
No limits. But log allocations.
"""
+
def __init__(self, *args, **kvargs):
super(UpbDummyRM, self).__init__(*args, **kvargs)
self.raise_no_cpu_resources_left = False
@@ -328,4 +335,3 @@
def _apply_limits(self):
# do nothing here
pass
-
diff --git a/src/emuvim/dcemulator/son_emu_simple_switch_13.py b/src/emuvim/dcemulator/son_emu_simple_switch_13.py
index 53d1a2e..2adaf80 100755
--- a/src/emuvim/dcemulator/son_emu_simple_switch_13.py
+++ b/src/emuvim/dcemulator/son_emu_simple_switch_13.py
@@ -21,7 +21,8 @@
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
-from ryu.topology.event import EventSwitchEnter, EventSwitchLeave, EventSwitchReconnected
+from ryu.topology.event import EventSwitchEnter, EventSwitchReconnected
+
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
@@ -44,12 +45,13 @@
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
match = parser.OFPMatch()
- #actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
+ # actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
# ofproto.OFPCML_NO_BUFFER)]
actions = [parser.OFPActionOutput(ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
- def add_flow(self, datapath, priority, match, actions, buffer_id=None, table_id=0):
+ def add_flow(self, datapath, priority, match,
+ actions, buffer_id=None, table_id=0):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
@@ -77,7 +79,6 @@
actions = [ofp_parser.OFPActionOutput(ofproto_v1_3.OFPP_NORMAL)]
self.add_flow(datapath, 0, None, actions, table_id=0)
-
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase