from mininet.node import OVSSwitch\r
import ast\r
import time\r
-from prometheus_client import start_http_server, Summary, Histogram, Gauge, Counter\r
+from prometheus_client import start_http_server, Summary, Histogram, Gauge, Counter, REGISTRY, CollectorRegistry, \\r
+ pushadd_to_gateway, push_to_gateway, delete_from_gateway\r
import threading\r
+from subprocess import Popen, PIPE\r
+import os\r
\r
logging.basicConfig(level=logging.INFO)\r
\r
self.REST_api = 'http://{0}:{1}'.format(self.ip,self.port)\r
\r
# helper variables to calculate the metrics\r
- # TODO put these in a list to support multiple metrics simultaneously\r
- self.switch_dpid = 0\r
- self.vnf_name = None\r
- self.vnf_interface = None\r
- self.previous_measurement = 0\r
- self.previous_monitor_time = 0\r
- self.metric_key = None\r
- self.mon_port = None\r
-\r
-\r
+ self.pushgateway = 'localhost:9091'\r
# Start up the server to expose the metrics to Prometheus.\r
- start_http_server(8000)\r
+ #start_http_server(8000)\r
# supported Prometheus metrics\r
+ self.registry = CollectorRegistry()\r
self.prom_tx_packet_count = Gauge('sonemu_tx_count_packets', 'Total number of packets sent',\r
- ['vnf_name', 'vnf_interface'])\r
+ ['vnf_name', 'vnf_interface'], registry=self.registry)\r
self.prom_rx_packet_count = Gauge('sonemu_rx_count_packets', 'Total number of packets received',\r
- ['vnf_name', 'vnf_interface'])\r
+ ['vnf_name', 'vnf_interface'], registry=self.registry)\r
self.prom_tx_byte_count = Gauge('sonemu_tx_count_bytes', 'Total number of bytes sent',\r
- ['vnf_name', 'vnf_interface'])\r
+ ['vnf_name', 'vnf_interface'], registry=self.registry)\r
self.prom_rx_byte_count = Gauge('sonemu_rx_count_bytes', 'Total number of bytes received',\r
- ['vnf_name', 'vnf_interface'])\r
+ ['vnf_name', 'vnf_interface'], registry=self.registry)\r
\r
self.prom_metrics={'tx_packets':self.prom_tx_packet_count, 'rx_packets':self.prom_rx_packet_count,\r
'tx_bytes':self.prom_tx_byte_count,'rx_bytes':self.prom_rx_byte_count}\r
mon_port = None\r
}\r
'''\r
- self.network_metrics=[]\r
+ self.monitor_lock = threading.Lock()\r
+ self.network_metrics = []\r
\r
# start monitoring thread\r
+ self.start_monitoring = True\r
self.monitor_thread = threading.Thread(target=self.get_network_metrics)\r
self.monitor_thread.start()\r
\r
+ # helper tools\r
+ self.pushgateway_process = self.start_PushGateway()\r
+ self.prometheus_process = self.start_Prometheus()\r
+ self.cadvisor_process = self.start_cadvisor()\r
\r
# first set some parameters, before measurement can start\r
def setup_metric(self, vnf_name, vnf_interface=None, metric='tx_packets'):\r
\r
network_metric['vnf_name'] = vnf_name\r
network_metric['vnf_interface'] = vnf_interface\r
- #self.vnf_name = vnf_name\r
- #self.vnf_interface = vnf_interface\r
\r
for connected_sw in self.net.DCNetwork_graph.neighbors(vnf_name):\r
link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]\r
# found the right link and connected switch\r
# logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))\r
network_metric['mon_port'] = link_dict[link]['dst_port']\r
- # self.mon_port = link_dict[link]['dst_port']\r
break\r
\r
if 'mon_port' not in network_metric:\r
\r
network_metric['previous_measurement'] = 0\r
network_metric['previous_monitor_time'] = 0\r
- #self.previous_measurement = 0\r
- #self.previous_monitor_time = 0\r
+\r
\r
network_metric['switch_dpid'] = int(str(next_node.dpid), 16)\r
network_metric['metric_key'] = metric\r
- #self.switch_dpid = int(str(next_node.dpid), 16)\r
- #self.metric_key = '{0}_{1}'.format(direction, metric)\r
+\r
+ self.monitor_lock.acquire()\r
\r
self.network_metrics.append(network_metric)\r
+ self.monitor_lock.release()\r
+\r
\r
logging.info('Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))\r
return 'Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)\r
\r
except Exception as ex:\r
- logging.exception("get_rate error.")\r
+ logging.exception("setup_metric error.")\r
return ex.message\r
\r
+ def stop_metric(self, vnf_name, vnf_interface, metric):\r
+\r
+ for metric_dict in self.network_metrics:\r
+ if metric_dict['vnf_name'] == vnf_name and metric_dict['vnf_interface'] == vnf_interface \\r
+ and metric_dict['metric_key'] == metric:\r
+\r
+ self.monitor_lock.acquire()\r
+\r
+ self.network_metrics.remove(metric_dict)\r
+\r
+ #this removes the complete metric, all labels...\r
+ #REGISTRY.unregister(self.prom_metrics[metric_dict['metric_key']])\r
+ #self.registry.unregister(self.prom_metrics[metric_dict['metric_key']])\r
+\r
+ for collector in self.registry._collectors :\r
+ logging.info('name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames, collector._metrics))\r
+ """\r
+ INFO:root:name:sonemu_rx_count_packets\r
+ labels:('vnf_name', 'vnf_interface')\r
+ metrics:{(u'tsrc', u'output'): < prometheus_client.core.Gauge\r
+ object\r
+ at\r
+ 0x7f353447fd10 >}\r
+ """\r
+ logging.info('{0}'.format(collector._metrics.values()))\r
+ #if self.prom_metrics[metric_dict['metric_key']]\r
+ if (vnf_name, vnf_interface) in collector._metrics:\r
+ logging.info('2 name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames,\r
+ collector._metrics))\r
+ #collector._metrics = {}\r
+ collector.remove(vnf_name, vnf_interface)\r
+\r
+ # set values to NaN, prometheus api currently does not support removal of metrics\r
+ #self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))\r
+\r
+ # this removes the complete metric, all labels...\r
+ # 1 single monitor job for all metrics of the SDN controller\r
+ # we can only remove from the pushgateway grouping keys(labels) which we have defined for the add_to_pushgateway\r
+ # we can not specify labels from the metrics to be removed\r
+ # if we need to remove the metrics seperatelty, we need to give them a separate grouping key, and probably a diffferent registry also\r
+ delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
+\r
+ self.monitor_lock.release()\r
+\r
+ logging.info('Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))\r
+ return 'Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)\r
\r
- # get all metrics defined in the list\r
+\r
+ # get all metrics defined in the list and export it to Prometheus\r
def get_network_metrics(self):\r
- while True:\r
+ while self.start_monitoring:\r
+\r
+ self.monitor_lock.acquire()\r
+\r
# group metrics by dpid to optimize the rest api calls\r
dpid_list = [metric_dict['switch_dpid'] for metric_dict in self.network_metrics]\r
dpid_set = set(dpid_list)\r
for metric_dict in metric_list:\r
self.set_network_metric(metric_dict, port_stat_dict)\r
\r
+ self.monitor_lock.release()\r
time.sleep(1)\r
\r
- # call this function repeatedly for streaming measurements\r
+ # add metric to the list to export to Prometheus, parse the Ryu port-stats reply\r
def set_network_metric(self, metric_dict, port_stat_dict):\r
-\r
- metric_key = metric_dict['metric_key']\r
+ # vnf tx is the datacenter switch rx and vice-versa\r
+ metric_key = self.switch_tx_rx(metric_dict['metric_key'])\r
switch_dpid = metric_dict['switch_dpid']\r
vnf_name = metric_dict['vnf_name']\r
vnf_interface = metric_dict['vnf_interface']\r
#logging.info('set prom packets:{0} {1}:{2}'.format(this_measurement, vnf_name, vnf_interface))\r
\r
# set prometheus metric\r
- self.prom_metrics[metric_key].labels(vnf_name, vnf_interface).set(this_measurement)\r
+ self.prom_metrics[metric_dict['metric_key']].\\r
+ labels({'vnf_name':vnf_name, 'vnf_interface':vnf_interface}).\\r
+ set(this_measurement)\r
+ #push_to_gateway(self.pushgateway, job='SDNcontroller',\r
+ # grouping_key={'metric':metric_dict['metric_key']}, registry=self.registry)\r
+\r
+ # 1 single monitor job for all metrics of the SDN controller\r
+ pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
\r
if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:\r
metric_dict['previous_measurement'] = int(port_stat[metric_key])\r
# do first measurement\r
#logging.info('first measurement')\r
time.sleep(1)\r
- byte_rate = self.get_network_metrics()\r
- return byte_rate\r
+ self.monitor_lock.release()\r
+ metric_rate = self.get_network_metrics()\r
+ return metric_rate\r
else:\r
time_delta = (port_uptime - metric_dict['previous_monitor_time'])\r
- byte_rate = (this_measurement - metric_dict['previous_measurement']) / float(time_delta)\r
+ metric_rate = (this_measurement - metric_dict['previous_measurement']) / float(time_delta)\r
# logging.info('uptime:{2} delta:{0} rate:{1}'.format(time_delta,byte_rate,port_uptime))\r
\r
metric_dict['previous_measurement'] = this_measurement\r
metric_dict['previous_monitor_time'] = port_uptime\r
- return byte_rate\r
+ return metric_rate\r
\r
logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))\r
return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)\r
\r
\r
- # call this function repeatedly for streaming measurements\r
- def get_rate(self, vnf_name, vnf_interface=None, direction='tx', metric='packets'):\r
-\r
- key = self.metric_key\r
-\r
- ret = self.REST_cmd('stats/port', self.switch_dpid)\r
- port_stat_dict = ast.literal_eval(ret)\r
- for port_stat in port_stat_dict[str(self.switch_dpid)]:\r
- if port_stat['port_no'] == self.mon_port:\r
- port_uptime = port_stat['duration_sec'] + port_stat['duration_nsec'] * 10 ** (-9)\r
- this_measurement = int(port_stat[key])\r
- #logging.info('packets:{0}'.format(this_measurement))\r
-\r
- # set prometheus metrics\r
- if metric == 'packets':\r
- self.prom_tx_packet_count.labels(self.vnf_name, self.vnf_interface).set(this_measurement)\r
- elif metric == 'bytes':\r
- self.prom_tx_byte_count.labels(self.vnf_name, self.vnf_interface).set(this_measurement)\r
-\r
- if self.previous_monitor_time <= 0 or self.previous_monitor_time >= port_uptime:\r
- self.previous_measurement = int(port_stat[key])\r
- self.previous_monitor_time = port_uptime\r
- # do first measurement\r
- time.sleep(1)\r
- byte_rate = self.get_rate(vnf_name, vnf_interface, direction, metric)\r
- return byte_rate\r
- else:\r
- time_delta = (port_uptime - self.previous_monitor_time)\r
- byte_rate = (this_measurement - self.previous_measurement) / float(time_delta)\r
- #logging.info('uptime:{2} delta:{0} rate:{1}'.format(time_delta,byte_rate,port_uptime))\r
-\r
- self.previous_measurement = this_measurement\r
- self.previous_monitor_time = port_uptime\r
- return byte_rate\r
-\r
- return ret\r
-\r
def REST_cmd(self, prefix, dpid):\r
url = self.REST_api + '/' + str(prefix) + '/' + str(dpid)\r
req = urllib2.Request(url)\r
ret = urllib2.urlopen(req).read()\r
- return ret
\ No newline at end of file
+ return ret\r
+\r
+ def start_Prometheus(self, port=9090):\r
+ # prometheus.yml configuration file is located in the same directory as this file\r
+ cmd = ["docker",\r
+ "run",\r
+ "--rm",\r
+ "-p", "{0}:9090".format(port),\r
+ "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(os.path.dirname(os.path.abspath(__file__))),\r
+ "--name", "prometheus",\r
+ "prom/prometheus"\r
+ ]\r
+ logging.info('Start Prometheus container {0}'.format(cmd))\r
+ return Popen(cmd)\r
+\r
+ def start_PushGateway(self, port=9091):\r
+ cmd = ["docker",\r
+ "run",\r
+ "-d",\r
+ "-p", "{0}:9091".format(port),\r
+ "--name", "pushgateway",\r
+ "prom/pushgateway"\r
+ ]\r
+\r
+ logging.info('Start Prometheus Push Gateway container {0}'.format(cmd))\r
+ return Popen(cmd)\r
+\r
+ def start_cadvisor(self, port=8090):\r
+ cmd = ["docker",\r
+ "run",\r
+ "--rm",\r
+ "--volume=/:/rootfs:ro",\r
+ "--volume=/var/run:/var/run:rw",\r
+ "--volume=/sys:/sys:ro",\r
+ "--volume=/var/lib/docker/:/var/lib/docker:ro",\r
+ "--publish={0}:8080".format(port),\r
+ "--name=cadvisor",\r
+ "google/cadvisor:latest"\r
+ ]\r
+ logging.info('Start cAdvisor container {0}'.format(cmd))\r
+ return Popen(cmd)\r
+\r
+ def stop(self):\r
+ # stop the monitoring thread\r
+ self.start_monitoring = False\r
+ self.monitor_thread.join()\r
+\r
+ if self.prometheus_process is not None:\r
+ logging.info('stopping prometheus container')\r
+ self.prometheus_process.terminate()\r
+ self.prometheus_process.kill()\r
+ self._stop_container('prometheus')\r
+\r
+ if self.pushgateway_process is not None:\r
+ logging.info('stopping pushgateway container')\r
+ self.pushgateway_process.terminate()\r
+ self.pushgateway_process.kill()\r
+ self._stop_container('pushgateway')\r
+\r
+ if self.cadvisor_process is not None:\r
+ logging.info('stopping cadvisor container')\r
+ self.cadvisor_process.terminate()\r
+ self.cadvisor_process.kill()\r
+ self._stop_container('cadvisor')\r
+\r
+ def switch_tx_rx(self,metric=''):\r
+ # when monitoring vnfs, the tx of the datacenter switch is actually the rx of the vnf\r
+ # so we need to change the metric name to be consistent with the vnf rx or tx\r
+ if 'tx' in metric:\r
+ metric = metric.replace('tx','rx')\r
+ elif 'rx' in metric:\r
+ metric = metric.replace('rx','tx')\r
+\r
+ return metric\r
+\r
+ def _stop_container(self, name):\r
+ cmd = ["docker",\r
+ "stop",\r
+ name]\r
+ Popen(cmd).wait()\r
+\r
+ cmd = ["docker",\r
+ "rm",\r
+ name]\r
+ Popen(cmd).wait()\r
+\r
+\r