-"""\r
-Copyright (c) 2015 SONATA-NFV\r
-ALL RIGHTS RESERVED.\r
-\r
-Licensed under the Apache License, Version 2.0 (the "License");\r
-you may not use this file except in compliance with the License.\r
-You may obtain a copy of the License at\r
-\r
- http://www.apache.org/licenses/LICENSE-2.0\r
-\r
-Unless required by applicable law or agreed to in writing, software\r
-distributed under the License is distributed on an "AS IS" BASIS,\r
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
-See the License for the specific language governing permissions and\r
-limitations under the License.\r
-\r
-Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]\r
-nor the names of its contributors may be used to endorse or promote\r
-products derived from this software without specific prior written\r
-permission.\r
-\r
-This work has been performed in the framework of the SONATA project,\r
-funded by the European Commission under Grant number 671517 through\r
-the Horizon 2020 and 5G-PPP programmes. The authors would like to\r
-acknowledge the contributions of their colleagues of the SONATA\r
-partner consortium (www.sonata-nfv.eu).\r
-"""\r
-\r
+# Copyright (c) 2015 SONATA-NFV and Paderborn University\r
+# ALL RIGHTS RESERVED.\r
+#\r
+# Licensed under the Apache License, Version 2.0 (the "License");\r
+# you may not use this file except in compliance with the License.\r
+# You may obtain a copy of the License at\r
+#\r
+# http://www.apache.org/licenses/LICENSE-2.0\r
+#\r
+# Unless required by applicable law or agreed to in writing, software\r
+# distributed under the License is distributed on an "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r
+# See the License for the specific language governing permissions and\r
+# limitations under the License.\r
+#\r
+# Neither the name of the SONATA-NFV, Paderborn University\r
+# nor the names of its contributors may be used to endorse or promote\r
+# products derived from this software without specific prior written\r
+# permission.\r
+#\r
+# This work has been performed in the framework of the SONATA project,\r
+# funded by the European Commission under Grant number 671517 through\r
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to\r
+# acknowledge the contributions of their colleagues of the SONATA\r
+# partner consortium (www.sonata-nfv.eu).\r
import logging\r
-import sys\r
-from mininet.node import OVSSwitch\r
+from mininet.node import OVSSwitch\r
import ast\r
import time\r
-from prometheus_client import start_http_server, Summary, Histogram, Gauge, Counter, REGISTRY, CollectorRegistry, \\r
- pushadd_to_gateway, push_to_gateway, delete_from_gateway\r
+from prometheus_client import Gauge, CollectorRegistry, \\r
+ pushadd_to_gateway, delete_from_gateway\r
import threading\r
from subprocess import Popen\r
import os\r
import docker\r
import json\r
+from copy import deepcopy\r
\r
-logging.basicConfig(level=logging.INFO)\r
+logging.basicConfig()\r
\r
"""\r
class to read openflow stats from the Ryu controller of the DCNetwork\r
\r
COOKIE_MASK = 0xffffffff\r
\r
+\r
class DCNetworkMonitor():\r
def __init__(self, net):\r
self.net = net\r
self.prom_rx_byte_count = Gauge('sonemu_rx_count_bytes', 'Total number of bytes received',\r
['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)\r
\r
- self.prom_metrics={'tx_packets':self.prom_tx_packet_count, 'rx_packets':self.prom_rx_packet_count,\r
- 'tx_bytes':self.prom_tx_byte_count,'rx_bytes':self.prom_rx_byte_count}\r
+ self.prom_metrics = {'tx_packets': self.prom_tx_packet_count, 'rx_packets': self.prom_rx_packet_count,\r
+ 'tx_bytes': self.prom_tx_byte_count, 'rx_bytes': self.prom_rx_byte_count}\r
\r
# list of installed metrics to monitor\r
# each entry can contain this data\r
self.monitor_thread = threading.Thread(target=self.get_network_metrics)\r
self.monitor_thread.start()\r
\r
- self.monitor_flow_thread = threading.Thread(target=self.get_flow_metrics)\r
+ self.monitor_flow_thread = threading.Thread(\r
+ target=self.get_flow_metrics)\r
self.monitor_flow_thread.start()\r
\r
# helper tools\r
- # cAdvisor, Prometheus pushgateway are started as external container, to gather monitoring metric in son-emu\r
+ # cAdvisor, Prometheus pushgateway are started as external container,\r
+ # to gather monitoring metric in son-emu\r
self.pushgateway_process = self.start_PushGateway()\r
self.cadvisor_process = self.start_cAdvisor()\r
\r
-\r
# first set some parameters, before measurement can start\r
- def setup_flow(self, vnf_name, vnf_interface=None, metric='tx_packets', cookie=0):\r
+\r
+ def setup_flow(self, vnf_name, vnf_interface=None,\r
+ metric='tx_packets', cookie=0):\r
\r
flow_metric = {}\r
\r
break\r
\r
if not vnf_switch:\r
- logging.exception("vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface))\r
- return "vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface)\r
+ logging.exception("vnf switch of {0}:{1} not found!".format(\r
+ vnf_name, vnf_interface))\r
+ return "vnf switch of {0}:{1} not found!".format(\r
+ vnf_name, vnf_interface)\r
\r
try:\r
# default port direction to monitor\r
next_node = self.net.getNodeByName(vnf_switch)\r
\r
if not isinstance(next_node, OVSSwitch):\r
- logging.info("vnf: {0} is not connected to switch".format(vnf_name))\r
+ logging.info(\r
+ "vnf: {0} is not connected to switch".format(vnf_name))\r
return\r
\r
flow_metric['previous_measurement'] = 0\r
self.flow_metrics.append(flow_metric)\r
self.monitor_flow_lock.release()\r
\r
- logging.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))\r
- return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)\r
+ logging.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric, cookie))\r
+ return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric, cookie)\r
\r
except Exception as ex:\r
logging.exception("setup_metric error.")\r
labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
set(float('nan'))\r
\r
- delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
+ delete_from_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller')\r
\r
self.monitor_flow_lock.release()\r
\r
- logging.info('Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))\r
- return 'Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)\r
-\r
- return 'Error stopping monitoring flow: {0} on {1}:{2}'.format(metric, vnf_name, vnf_interface)\r
+ logging.info('Stopped monitoring flow {3}: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric, cookie))\r
+ return 'Stopped monitoring flow {3}: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric, cookie)\r
\r
+ return 'Error stopping monitoring flow: {0} on {1}:{2}'.format(\r
+ metric, vnf_name, vnf_interface)\r
\r
# first set some parameters, before measurement can start\r
+\r
def setup_metric(self, vnf_name, vnf_interface=None, metric='tx_packets'):\r
\r
network_metric = {}\r
\r
# check if port is specified (vnf:port)\r
- if vnf_interface is None:\r
+ if vnf_interface is None or vnf_interface == '':\r
# take first interface by default\r
connected_sw = self.net.DCNetwork_graph.neighbors(vnf_name)[0]\r
link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]\r
break\r
\r
if 'mon_port' not in network_metric:\r
- logging.exception("vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface))\r
- return "vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface)\r
+ logging.exception("vnf interface {0}:{1} not found!".format(\r
+ vnf_name, vnf_interface))\r
+ return "vnf interface {0}:{1} not found!".format(\r
+ vnf_name, vnf_interface)\r
\r
try:\r
# default port direction to monitor\r
next_node = self.net.getNodeByName(vnf_switch)\r
\r
if not isinstance(next_node, OVSSwitch):\r
- logging.info("vnf: {0} is not connected to switch".format(vnf_name))\r
+ logging.info(\r
+ "vnf: {0} is not connected to switch".format(vnf_name))\r
return\r
\r
network_metric['previous_measurement'] = 0\r
network_metric['previous_monitor_time'] = 0\r
\r
-\r
network_metric['switch_dpid'] = int(str(next_node.dpid), 16)\r
network_metric['metric_key'] = metric\r
\r
self.network_metrics.append(network_metric)\r
self.monitor_lock.release()\r
\r
-\r
- logging.info('Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))\r
- return 'Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)\r
+ logging.info('Started monitoring: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric))\r
+ return 'Started monitoring: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric)\r
\r
except Exception as ex:\r
logging.exception("setup_metric error.")\r
link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]\r
vnf_interface = link_dict[0]['src_port_id']\r
\r
- for metric_dict in self.network_metrics:\r
+ for metric_dict in deepcopy(self.network_metrics):\r
if metric_dict['vnf_name'] == vnf_name and metric_dict['vnf_interface'] == vnf_interface \\r
and metric_dict['metric_key'] == metric:\r
\r
self.network_metrics.remove(metric_dict)\r
\r
# set values to NaN, prometheus api currently does not support removal of metrics\r
- #self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))\r
+ # self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))\r
self.prom_metrics[metric_dict['metric_key']]. \\r
labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=None). \\r
set(float('nan'))\r
# 1 single monitor job for all metrics of the SDN controller\r
# we can only remove from the pushgateway grouping keys(labels) which we have defined for the add_to_pushgateway\r
# we can not specify labels from the metrics to be removed\r
- # if we need to remove the metrics seperatelty, we need to give them a separate grouping key, and probably a diffferent registry also\r
- delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
+ # if we need to remove the metrics seperatelty, we need to give\r
+ # them a separate grouping key, and probably a diffferent\r
+ # registry also\r
+ delete_from_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller')\r
\r
self.monitor_lock.release()\r
\r
- logging.info('Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))\r
- return 'Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)\r
+ logging.info('Stopped monitoring: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric))\r
+ return 'Stopped monitoring: {2} on {0}:{1}'.format(\r
+ vnf_name, vnf_interface, metric)\r
\r
# delete everything from this vnf\r
elif metric_dict['vnf_name'] == vnf_name and vnf_interface is None and metric is None:\r
self.monitor_lock.acquire()\r
self.network_metrics.remove(metric_dict)\r
- for collector in self.registry._collectors:\r
- collector_dict = collector._metrics.copy()\r
- for name, interface, id in collector_dict:\r
- if name == vnf_name:\r
- logging.info('3 name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames,\r
- collector._metrics))\r
- collector.remove(name, interface, 'None')\r
-\r
- delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
- self.monitor_lock.release()\r
- logging.info('Stopped monitoring vnf: {0}'.format(vnf_name))\r
- return 'Stopped monitoring: {0}'.format(vnf_name)\r
+ logging.info('remove metric from monitor: vnf_name:{0} vnf_interface:{1} mon_port:{2}'.format(\r
+ metric_dict['vnf_name'], metric_dict['vnf_interface'], metric_dict['mon_port']))\r
\r
- return 'Error stopping monitoring metric: {0} on {1}:{2}'.format(metric, vnf_name, vnf_interface)\r
+ delete_from_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller')\r
+ self.monitor_lock.release()\r
+ continue\r
\r
+ if vnf_interface is None and metric is None:\r
+ logging.info('Stopped monitoring vnf: {0}'.format(vnf_name))\r
+ return 'Stopped monitoring: {0}'.format(vnf_name)\r
+ else:\r
+ return 'Error stopping monitoring metric: {0} on {1}:{2}'.format(\r
+ metric, vnf_name, vnf_interface)\r
\r
-# get all metrics defined in the list and export it to Prometheus\r
def get_flow_metrics(self):\r
+ """\r
+ Get all metrics defined in the list and export it to Prometheus.\r
+ """\r
while self.start_monitoring:\r
\r
self.monitor_flow_lock.acquire()\r
data['cookie_mask'] = COOKIE_MASK\r
\r
if 'tx' in flow_dict['metric_key']:\r
- data['match'] = {'in_port':flow_dict['mon_port']}\r
+ data['match'] = {'in_port': flow_dict['mon_port']}\r
elif 'rx' in flow_dict['metric_key']:\r
data['out_port'] = flow_dict['mon_port']\r
\r
-\r
# query Ryu\r
- ret = self.net.ryu_REST('stats/flow', dpid=flow_dict['switch_dpid'], data=data)\r
+ ret = self.net.ryu_REST(\r
+ 'stats/flow', dpid=flow_dict['switch_dpid'], data=data)\r
if isinstance(ret, dict):\r
flow_stat_dict = ret\r
elif isinstance(ret, basestring):\r
\r
self.set_flow_metric(flow_dict, flow_stat_dict)\r
\r
-\r
try:\r
if len(self.flow_metrics) > 0:\r
- pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
- except Exception, e:\r
- logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))\r
+ pushadd_to_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
+ except Exception as e:\r
+ logging.warning(\r
+ "Pushgateway not reachable: {0} {1}".format(Exception, e))\r
\r
self.monitor_flow_lock.release()\r
time.sleep(1)\r
self.monitor_lock.acquire()\r
\r
# group metrics by dpid to optimize the rest api calls\r
- dpid_list = [metric_dict['switch_dpid'] for metric_dict in self.network_metrics]\r
+ dpid_list = [metric_dict['switch_dpid']\r
+ for metric_dict in self.network_metrics]\r
dpid_set = set(dpid_list)\r
\r
for dpid in dpid_set:\r
port_stat_dict = None\r
\r
metric_list = [metric_dict for metric_dict in self.network_metrics\r
- if int(metric_dict['switch_dpid'])==int(dpid)]\r
+ if int(metric_dict['switch_dpid']) == int(dpid)]\r
\r
for metric_dict in metric_list:\r
self.set_network_metric(metric_dict, port_stat_dict)\r
\r
try:\r
if len(self.network_metrics) > 0:\r
- pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
- except Exception, e:\r
- logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))\r
+ pushadd_to_gateway(\r
+ self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
+ except Exception as e:\r
+ logging.warning(\r
+ "Pushgateway not reachable: {0} {1}".format(Exception, e))\r
\r
self.monitor_lock.release()\r
time.sleep(1)\r
\r
- # add metric to the list to export to Prometheus, parse the Ryu port-stats reply\r
+ # add metric to the list to export to Prometheus, parse the Ryu port-stats\r
+ # reply\r
def set_network_metric(self, metric_dict, port_stat_dict):\r
# vnf tx is the datacenter switch rx and vice-versa\r
metric_key = self.switch_tx_rx(metric_dict['metric_key'])\r
switch_dpid = metric_dict['switch_dpid']\r
vnf_name = metric_dict['vnf_name']\r
vnf_interface = metric_dict['vnf_interface']\r
- previous_measurement = metric_dict['previous_measurement']\r
previous_monitor_time = metric_dict['previous_monitor_time']\r
mon_port = metric_dict['mon_port']\r
-\r
for port_stat in port_stat_dict[str(switch_dpid)]:\r
+ # ovs output also gives back 'LOCAL' port\r
+ if port_stat['port_no'] == 'LOCAL':\r
+ continue\r
if int(port_stat['port_no']) == int(mon_port):\r
- port_uptime = port_stat['duration_sec'] + port_stat['duration_nsec'] * 10 ** (-9)\r
+ port_uptime = port_stat['duration_sec'] + \\r
+ port_stat['duration_nsec'] * 10 ** (-9)\r
this_measurement = int(port_stat[metric_key])\r
\r
# set prometheus metric\r
# also the rate is calculated here, but not used for now\r
# (rate can be easily queried from prometheus also)\r
if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:\r
- metric_dict['previous_measurement'] = int(port_stat[metric_key])\r
+ metric_dict['previous_measurement'] = int(\r
+ port_stat[metric_key])\r
metric_dict['previous_monitor_time'] = port_uptime\r
# do first measurement\r
- #time.sleep(1)\r
- #self.monitor_lock.release()\r
+ # time.sleep(1)\r
+ # self.monitor_lock.release()\r
# rate cannot be calculated yet (need a first measurement)\r
- metric_rate = None\r
-\r
- else:\r
- time_delta = (port_uptime - metric_dict['previous_monitor_time'])\r
- metric_rate = (this_measurement - metric_dict['previous_measurement']) / float(time_delta)\r
-\r
metric_dict['previous_measurement'] = this_measurement\r
metric_dict['previous_monitor_time'] = port_uptime\r
return\r
\r
- logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))\r
- logging.exception('monport:{0}, dpid:{1}'.format(mon_port, switch_dpid))\r
+ logging.exception('metric {0} not found on {1}:{2}'.format(\r
+ metric_key, vnf_name, vnf_interface))\r
+ logging.exception(\r
+ 'monport:{0}, dpid:{1}'.format(mon_port, switch_dpid))\r
+ logging.exception(\r
+ 'monitored network_metrics:{0}'.format(self.network_metrics))\r
logging.exception('port dict:{0}'.format(port_stat_dict))\r
- return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)\r
+ return 'metric {0} not found on {1}:{2}'.format(\r
+ metric_key, vnf_name, vnf_interface)\r
\r
def set_flow_metric(self, metric_dict, flow_stat_dict):\r
# vnf tx is the datacenter switch rx and vice-versa\r
switch_dpid = metric_dict['switch_dpid']\r
vnf_name = metric_dict['vnf_name']\r
vnf_interface = metric_dict['vnf_interface']\r
- previous_measurement = metric_dict['previous_measurement']\r
- previous_monitor_time = metric_dict['previous_monitor_time']\r
cookie = metric_dict['cookie']\r
\r
counter = 0\r
counter += flow_stat['packet_count']\r
\r
# flow_uptime disabled for now (can give error)\r
- #flow_stat = flow_stat_dict[str(switch_dpid)][0]\r
- #flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)\r
+ # flow_stat = flow_stat_dict[str(switch_dpid)][0]\r
+ # flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)\r
\r
self.prom_metrics[metric_dict['metric_key']]. \\r
labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
set(counter)\r
\r
def start_Prometheus(self, port=9090):\r
- # prometheus.yml configuration file is located in the same directory as this file\r
+ # prometheus.yml configuration file is located in the same directory as\r
+ # this file\r
cmd = ["docker",\r
"run",\r
"--rm",\r
"-p", "{0}:9090".format(port),\r
- "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(os.path.dirname(os.path.abspath(__file__))),\r
- "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(os.path.dirname(os.path.abspath(__file__))),\r
+ "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(\r
+ os.path.dirname(os.path.abspath(__file__))),\r
+ "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(\r
+ os.path.dirname(os.path.abspath(__file__))),\r
"--name", "prometheus",\r
"prom/prometheus"\r
]\r
"--volume=/var/lib/docker/:/var/lib/docker:ro",\r
"--publish={0}:8080".format(port),\r
"--name=cadvisor",\r
- "--label",'com.containernet=""',\r
- "google/cadvisor:latest"\r
+ "--label", 'com.containernet=""',\r
+ "--detach=true",\r
+ "google/cadvisor:latest",\r
+ # "--storage_duration=1m0s",\r
+ # "--allow_dynamic_housekeeping=true",\r
+ # "--housekeeping_interval=1s",\r
]\r
logging.info('Start cAdvisor container {0}'.format(cmd))\r
return Popen(cmd)\r
self.monitor_thread.join()\r
self.monitor_flow_thread.join()\r
\r
- # these containers are used for monitoring but are started now outside of son-emu\r
+ # these containers are used for monitoring but are started now outside\r
+ # of son-emu\r
\r
if self.pushgateway_process is not None:\r
logging.info('stopping pushgateway container')\r
logging.info('stopping cadvisor container')\r
self._stop_container('cadvisor')\r
\r
- def switch_tx_rx(self,metric=''):\r
+ def switch_tx_rx(self, metric=''):\r
# when monitoring vnfs, the tx of the datacenter switch is actually the rx of the vnf\r
- # so we need to change the metric name to be consistent with the vnf rx or tx\r
+ # so we need to change the metric name to be consistent with the vnf rx\r
+ # or tx\r
if 'tx' in metric:\r
- metric = metric.replace('tx','rx')\r
+ metric = metric.replace('tx', 'rx')\r
elif 'rx' in metric:\r
- metric = metric.replace('rx','tx')\r
+ metric = metric.replace('rx', 'tx')\r
\r
return metric\r
\r
def _stop_container(self, name):\r
\r
- container = self.dockercli.containers.get(name)\r
- container.remove(force=True)\r
+ # container = self.dockercli.containers.get(name)\r
+ # container.stop()\r
+ # container.remove(force=True)\r
+\r
+ # the only robust way to stop these containers is via Popen, it seems\r
+ time.sleep(1)\r
+ cmd = ['docker', 'rm', '-f', name]\r
+ Popen(cmd)\r
\r
def update_skewmon(self, vnf_name, resource_name, action):\r
\r
configfile = open(config_file_path, 'a+')\r
try:\r
config = json.load(configfile)\r
- except:\r
- #not a valid json file or empty\r
+ except BaseException:\r
+ # not a valid json file or empty\r
config = {}\r
\r
- #initialize config file\r
+ # initialize config file\r
if len(self.skewmon_metrics) == 0:\r
config = {}\r
json.dump(config, configfile)\r
if action == 'start':\r
# add a new vnf to monitor\r
config[key] = dict(VNF_NAME=vnf_name,\r
- VNF_ID=vnf_id,\r
- VNF_METRIC=resource_name)\r
- ret = 'adding to skewness monitor: {0} {1} '.format(vnf_name, resource_name)\r
+ VNF_ID=vnf_id,\r
+ VNF_METRIC=resource_name)\r
+ ret = 'adding to skewness monitor: {0} {1} '.format(\r
+ vnf_name, resource_name)\r
logging.info(ret)\r
elif action == 'stop':\r
# remove vnf to monitor\r
config.pop(key)\r
- ret = 'removing from skewness monitor: {0} {1} '.format(vnf_name, resource_name)\r
+ ret = 'removing from skewness monitor: {0} {1} '.format(\r
+ vnf_name, resource_name)\r
logging.info(ret)\r
\r
self.skewmon_metrics = config\r
# start container if not running\r
ret += 'starting skewness monitor'\r
logging.info('starting skewness monitor')\r
- volumes = {'/sys/fs/cgroup':{'bind':'/sys/fs/cgroup', 'mode':'ro'},\r
- '/tmp/skewmon.cfg':{'bind':'/config.txt', 'mode':'ro'}}\r
+ volumes = {'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'},\r
+ '/tmp/skewmon.cfg': {'bind': '/config.txt', 'mode': 'ro'}}\r
self.dockercli.containers.run('skewmon',\r
detach=True,\r
volumes=volumes,\r
labels=['com.containernet'],\r
name='skewmon'\r
)\r
+ # Wait a while for containers to be completely started\r
+ started = False\r
+ wait_time = 0\r
+ while not started:\r
+ list1 = self.dockercli.containers.list(\r
+ filters={'status': 'running', 'name': 'prometheus'})\r
+ if len(list1) >= 1:\r
+ time.sleep(1)\r
+ started = True\r
+ if wait_time > 5:\r
+ return 'skewmon not started'\r
+ time.sleep(1)\r
+ wait_time += 1\r
return ret\r
\r
-\r
-\r
-\r
-\r
+ def term(self, vnf_list=[]):\r
+ """\r
+ Start a terminal window for the specified VNFs\r
+ (start a terminal for all VNFs if vnf_list is empty)\r
+ :param vnf_list:\r
+ :return:\r
+ """\r
+\r
+ if vnf_list is None:\r
+ vnf_list = []\r
+ if not isinstance(vnf_list, list):\r
+ vnf_list = str(vnf_list).split(',')\r
+ vnf_list = map(str.strip, vnf_list)\r
+ logging.info('vnf_list: {}'.format(vnf_list))\r
+\r
+ return self.start_xterm(vnf_list)\r
+\r
+ # start an xterm for the specfified vnfs\r
+\r
+ def start_xterm(self, vnf_names):\r
+ # start xterm for all vnfs\r
+ for vnf_name in vnf_names:\r
+ terminal_cmd = "docker exec -it mn.{0} /bin/bash".format(vnf_name)\r
+\r
+ cmd = ['xterm', '-xrm', 'XTerm*selectToClipboard: true', '-xrm', 'XTerm.vt100.allowTitleOps: false',\r
+ '-T', vnf_name,\r
+ '-e', terminal_cmd]\r
+ Popen(cmd)\r
+\r
+ ret = 'xterms started for {0}'.format(vnf_names)\r
+ if len(vnf_names) == 0:\r
+ ret = 'vnf list is empty, no xterms started'\r
+ return ret\r