1 __author__
= 'Administrator'
5 from mininet
.node
import OVSSwitch
8 from prometheus_client
import start_http_server
, Summary
, Histogram
, Gauge
, Counter
, REGISTRY
, CollectorRegistry
, \
9 pushadd_to_gateway
, push_to_gateway
, delete_from_gateway
11 from subprocess
import Popen
, PIPE
17 logging
.basicConfig(level
=logging
.INFO
)
20 class to read openflow stats from the Ryu controller of the DCNetwork
23 class DCNetworkMonitor():
24 def __init__(self
, net
):
26 # link to Ryu REST_API
29 self
.ryu_REST_api
= 'http://{0}:{1}'.format(ryu_ip
, ryu_port
)
30 prometheus_ip
= '0.0.0.0'
31 prometheus_port
= '9090'
32 self
.prometheus_REST_api
= 'http://{0}:{1}'.format(prometheus_ip
, prometheus_port
)
36 # helper variables to calculate the metrics
37 self
.pushgateway
= 'localhost:9091'
38 # Start up the server to expose the metrics to Prometheus.
39 #start_http_server(8000)
40 # supported Prometheus metrics
41 self
.registry
= CollectorRegistry()
42 self
.prom_tx_packet_count
= Gauge('sonemu_tx_count_packets', 'Total number of packets sent',
43 ['vnf_name', 'vnf_interface', 'flow_id'], registry
=self
.registry
)
44 self
.prom_rx_packet_count
= Gauge('sonemu_rx_count_packets', 'Total number of packets received',
45 ['vnf_name', 'vnf_interface', 'flow_id'], registry
=self
.registry
)
46 self
.prom_tx_byte_count
= Gauge('sonemu_tx_count_bytes', 'Total number of bytes sent',
47 ['vnf_name', 'vnf_interface', 'flow_id'], registry
=self
.registry
)
48 self
.prom_rx_byte_count
= Gauge('sonemu_rx_count_bytes', 'Total number of bytes received',
49 ['vnf_name', 'vnf_interface', 'flow_id'], registry
=self
.registry
)
51 self
.prom_metrics
={'tx_packets':self
.prom_tx_packet_count
, 'rx_packets':self
.prom_rx_packet_count
,
52 'tx_bytes':self
.prom_tx_byte_count
,'rx_bytes':self
.prom_rx_byte_count
}
54 # list of installed metrics to monitor
55 # each entry can contain this data
61 previous_measurement = 0
62 previous_monitor_time = 0
67 self
.monitor_lock
= threading
.Lock()
68 self
.monitor_flow_lock
= threading
.Lock()
69 self
.network_metrics
= []
70 self
.flow_metrics
= []
72 # start monitoring thread
73 self
.start_monitoring
= True
74 self
.monitor_thread
= threading
.Thread(target
=self
.get_network_metrics
)
75 self
.monitor_thread
.start()
77 self
.monitor_flow_thread
= threading
.Thread(target
=self
.get_flow_metrics
)
78 self
.monitor_flow_thread
.start()
81 self
.pushgateway_process
= self
.start_PushGateway()
82 self
.prometheus_process
= self
.start_Prometheus()
83 self
.cadvisor_process
= self
.start_cadvisor()
85 # first set some parameters, before measurement can start
86 def setup_flow(self
, vnf_name
, vnf_interface
=None, metric
='tx_packets', cookie
=0):
90 # check if port is specified (vnf:port)
91 if vnf_interface
is None:
92 # take first interface by default
93 connected_sw
= self
.net
.DCNetwork_graph
.neighbors(vnf_name
)[0]
94 link_dict
= self
.net
.DCNetwork_graph
[vnf_name
][connected_sw
]
95 vnf_interface
= link_dict
[0]['src_port_id']
97 flow_metric
['vnf_name'] = vnf_name
98 flow_metric
['vnf_interface'] = vnf_interface
101 for connected_sw
in self
.net
.DCNetwork_graph
.neighbors(vnf_name
):
102 link_dict
= self
.net
.DCNetwork_graph
[vnf_name
][connected_sw
]
103 for link
in link_dict
:
104 # logging.info("{0},{1}".format(link_dict[link],vnf_interface))
105 if link_dict
[link
]['src_port_id'] == vnf_interface
:
106 # found the right link and connected switch
107 # logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
108 vnf_switch
= connected_sw
109 flow_metric
['mon_port'] = link_dict
[link
]['dst_port_nr']
113 logging
.exception("vnf switch of {0}:{1} not found!".format(vnf_name
, vnf_interface
))
114 return "vnf switch of {0}:{1} not found!".format(vnf_name
, vnf_interface
)
117 # default port direction to monitor
119 metric
= 'tx_packets'
121 next_node
= self
.net
.getNodeByName(vnf_switch
)
123 if not isinstance(next_node
, OVSSwitch
):
124 logging
.info("vnf: {0} is not connected to switch".format(vnf_name
))
127 flow_metric
['previous_measurement'] = 0
128 flow_metric
['previous_monitor_time'] = 0
130 flow_metric
['switch_dpid'] = int(str(next_node
.dpid
), 16)
131 flow_metric
['metric_key'] = metric
132 flow_metric
['cookie'] = cookie
134 self
.monitor_flow_lock
.acquire()
135 self
.flow_metrics
.append(flow_metric
)
136 self
.monitor_flow_lock
.release()
138 logging
.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name
, vnf_interface
, metric
, cookie
))
139 return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name
, vnf_interface
, metric
, cookie
)
141 except Exception as ex
:
142 logging
.exception("setup_metric error.")
146 # first set some parameters, before measurement can start
147 def setup_metric(self
, vnf_name
, vnf_interface
=None, metric
='tx_packets'):
151 # check if port is specified (vnf:port)
152 if vnf_interface
is None:
153 # take first interface by default
154 connected_sw
= self
.net
.DCNetwork_graph
.neighbors(vnf_name
)[0]
155 link_dict
= self
.net
.DCNetwork_graph
[vnf_name
][connected_sw
]
156 vnf_interface
= link_dict
[0]['src_port_id']
158 network_metric
['vnf_name'] = vnf_name
159 network_metric
['vnf_interface'] = vnf_interface
161 for connected_sw
in self
.net
.DCNetwork_graph
.neighbors(vnf_name
):
162 link_dict
= self
.net
.DCNetwork_graph
[vnf_name
][connected_sw
]
163 for link
in link_dict
:
164 # logging.info("{0},{1}".format(link_dict[link],vnf_interface))
165 if link_dict
[link
]['src_port_id'] == vnf_interface
:
166 # found the right link and connected switch
167 # logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
168 network_metric
['mon_port'] = link_dict
[link
]['dst_port_nr']
171 if 'mon_port' not in network_metric
:
172 logging
.exception("vnf interface {0}:{1} not found!".format(vnf_name
,vnf_interface
))
173 return "vnf interface {0}:{1} not found!".format(vnf_name
,vnf_interface
)
176 # default port direction to monitor
178 metric
= 'tx_packets'
180 vnf_switch
= self
.net
.DCNetwork_graph
.neighbors(str(vnf_name
))
182 if len(vnf_switch
) > 1:
183 logging
.info("vnf: {0} has multiple ports".format(vnf_name
))
185 elif len(vnf_switch
) == 0:
186 logging
.info("vnf: {0} is not connected".format(vnf_name
))
189 vnf_switch
= vnf_switch
[0]
190 next_node
= self
.net
.getNodeByName(vnf_switch
)
192 if not isinstance(next_node
, OVSSwitch
):
193 logging
.info("vnf: {0} is not connected to switch".format(vnf_name
))
196 network_metric
['previous_measurement'] = 0
197 network_metric
['previous_monitor_time'] = 0
200 network_metric
['switch_dpid'] = int(str(next_node
.dpid
), 16)
201 network_metric
['metric_key'] = metric
203 self
.monitor_lock
.acquire()
205 self
.network_metrics
.append(network_metric
)
206 self
.monitor_lock
.release()
209 logging
.info('Started monitoring: {2} on {0}:{1}'.format(vnf_name
, vnf_interface
, metric
))
210 return 'Started monitoring: {2} on {0}:{1}'.format(vnf_name
, vnf_interface
, metric
)
212 except Exception as ex
:
213 logging
.exception("setup_metric error.")
216 def stop_metric(self
, vnf_name
, vnf_interface
=None, metric
=None):
218 for metric_dict
in self
.network_metrics
:
219 #logging.info('start Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric_dict))
220 if metric_dict
['vnf_name'] == vnf_name
and metric_dict
['vnf_interface'] == vnf_interface \
221 and metric_dict
['metric_key'] == metric
:
223 self
.monitor_lock
.acquire()
225 self
.network_metrics
.remove(metric_dict
)
227 #this removes the complete metric, all labels...
228 #REGISTRY.unregister(self.prom_metrics[metric_dict['metric_key']])
229 #self.registry.unregister(self.prom_metrics[metric_dict['metric_key']])
231 for collector
in self
.registry
._collectors
:
232 #logging.info('name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames, collector._metrics))
234 INFO:root:name:sonemu_rx_count_packets
235 labels:('vnf_name', 'vnf_interface')
236 metrics:{(u'tsrc', u'output'): < prometheus_client.core.Gauge
241 logging
.info('{0}'.format(collector
._metrics
.values()))
242 #if self.prom_metrics[metric_dict['metric_key']]
243 if (vnf_name
, vnf_interface
, 'None') in collector
._metrics
:
244 logging
.info('2 name:{0} labels:{1} metrics:{2}'.format(collector
._name
, collector
._labelnames
,
246 #collector._metrics = {}
247 collector
.remove(vnf_name
, vnf_interface
, 'None')
249 # set values to NaN, prometheus api currently does not support removal of metrics
250 #self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))
252 # this removes the complete metric, all labels...
253 # 1 single monitor job for all metrics of the SDN controller
254 # we can only remove from the pushgateway grouping keys(labels) which we have defined for the add_to_pushgateway
255 # we can not specify labels from the metrics to be removed
256 # if we need to remove the metrics seperatelty, we need to give them a separate grouping key, and probably a diffferent registry also
257 delete_from_gateway(self
.pushgateway
, job
='sonemu-SDNcontroller')
259 self
.monitor_lock
.release()
261 logging
.info('Stopped monitoring: {2} on {0}:{1}'.format(vnf_name
, vnf_interface
, metric
))
262 return 'Stopped monitoring: {2} on {0}:{1}'.format(vnf_name
, vnf_interface
, metric
)
264 # delete everything from this vnf
265 elif metric_dict
['vnf_name'] == vnf_name
and vnf_interface
is None and metric
is None:
266 self
.monitor_lock
.acquire()
267 self
.network_metrics
.remove(metric_dict
)
268 for collector
in self
.registry
._collectors
:
269 collector_dict
= collector
._metrics
.copy()
270 for name
, interface
, id in collector_dict
:
272 logging
.info('3 name:{0} labels:{1} metrics:{2}'.format(collector
._name
, collector
._labelnames
,
274 collector
.remove(name
, interface
, 'None')
276 delete_from_gateway(self
.pushgateway
, job
='sonemu-SDNcontroller')
277 self
.monitor_lock
.release()
278 logging
.info('Stopped monitoring vnf: {0}'.format(vnf_name
))
279 return 'Stopped monitoring: {0}'.format(vnf_name
)
282 # get all metrics defined in the list and export it to Prometheus
283 def get_flow_metrics(self
):
284 while self
.start_monitoring
:
286 self
.monitor_flow_lock
.acquire()
288 for flow_dict
in self
.flow_metrics
:
291 data
['cookie'] = flow_dict
['cookie']
293 if 'tx' in flow_dict
['metric_key']:
294 data
['match'] = {'in_port':flow_dict
['mon_port']}
295 elif 'rx' in flow_dict
['metric_key']:
296 data
['out_port'] = flow_dict
['mon_port']
300 ret
= self
.REST_cmd('stats/flow', flow_dict
['switch_dpid'], data
=data
)
301 flow_stat_dict
= ast
.literal_eval(ret
)
303 logging
.info('received flow stat:{0} '.format(flow_stat_dict
))
304 self
.set_flow_metric(flow_dict
, flow_stat_dict
)
306 self
.monitor_flow_lock
.release()
309 def get_network_metrics(self
):
310 while self
.start_monitoring
:
312 self
.monitor_lock
.acquire()
314 # group metrics by dpid to optimize the rest api calls
315 dpid_list
= [metric_dict
['switch_dpid'] for metric_dict
in self
.network_metrics
]
316 dpid_set
= set(dpid_list
)
318 for dpid
in dpid_set
:
321 ret
= self
.REST_cmd('stats/port', dpid
)
322 port_stat_dict
= ast
.literal_eval(ret
)
324 metric_list
= [metric_dict
for metric_dict
in self
.network_metrics
325 if int(metric_dict
['switch_dpid'])==int(dpid
)]
326 #logging.info('1set prom packets:{0} '.format(self.network_metrics))
327 for metric_dict
in metric_list
:
328 self
.set_network_metric(metric_dict
, port_stat_dict
)
330 self
.monitor_lock
.release()
333 # add metric to the list to export to Prometheus, parse the Ryu port-stats reply
334 def set_network_metric(self
, metric_dict
, port_stat_dict
):
335 # vnf tx is the datacenter switch rx and vice-versa
336 metric_key
= self
.switch_tx_rx(metric_dict
['metric_key'])
337 switch_dpid
= metric_dict
['switch_dpid']
338 vnf_name
= metric_dict
['vnf_name']
339 vnf_interface
= metric_dict
['vnf_interface']
340 previous_measurement
= metric_dict
['previous_measurement']
341 previous_monitor_time
= metric_dict
['previous_monitor_time']
342 mon_port
= metric_dict
['mon_port']
344 for port_stat
in port_stat_dict
[str(switch_dpid
)]:
345 if int(port_stat
['port_no']) == int(mon_port
):
346 port_uptime
= port_stat
['duration_sec'] + port_stat
['duration_nsec'] * 10 ** (-9)
347 this_measurement
= int(port_stat
[metric_key
])
348 #logging.info('set prom packets:{0} {1}:{2}'.format(this_measurement, vnf_name, vnf_interface))
350 # set prometheus metric
351 self
.prom_metrics
[metric_dict
['metric_key']].\
352 labels({'vnf_name': vnf_name
, 'vnf_interface': vnf_interface
, 'flow_id': None}).\
353 set(this_measurement
)
354 #push_to_gateway(self.pushgateway, job='SDNcontroller',
355 # grouping_key={'metric':metric_dict['metric_key']}, registry=self.registry)
357 # 1 single monitor job for all metrics of the SDN controller
358 pushadd_to_gateway(self
.pushgateway
, job
='sonemu-SDNcontroller', registry
=self
.registry
)
360 if previous_monitor_time
<= 0 or previous_monitor_time
>= port_uptime
:
361 metric_dict
['previous_measurement'] = int(port_stat
[metric_key
])
362 metric_dict
['previous_monitor_time'] = port_uptime
363 # do first measurement
364 #logging.info('first measurement')
366 self
.monitor_lock
.release()
368 metric_rate
= self
.get_network_metrics()
372 time_delta
= (port_uptime
- metric_dict
['previous_monitor_time'])
373 metric_rate
= (this_measurement
- metric_dict
['previous_measurement']) / float(time_delta
)
374 #logging.info('metric: {0} rate:{1}'.format(metric_dict['metric_key'], metric_rate))
376 metric_dict
['previous_measurement'] = this_measurement
377 metric_dict
['previous_monitor_time'] = port_uptime
380 logging
.exception('metric {0} not found on {1}:{2}'.format(metric_key
, vnf_name
, vnf_interface
))
381 return 'metric {0} not found on {1}:{2}'.format(metric_key
, vnf_name
, vnf_interface
)
383 def set_flow_metric(self
, metric_dict
, flow_stat_dict
):
384 # vnf tx is the datacenter switch rx and vice-versa
385 #metric_key = self.switch_tx_rx(metric_dict['metric_key'])
386 metric_key
= metric_dict
['metric_key']
387 switch_dpid
= metric_dict
['switch_dpid']
388 vnf_name
= metric_dict
['vnf_name']
389 vnf_interface
= metric_dict
['vnf_interface']
390 previous_measurement
= metric_dict
['previous_measurement']
391 previous_monitor_time
= metric_dict
['previous_monitor_time']
392 cookie
= metric_dict
['cookie']
394 # TODO aggregate all found flow stats
395 flow_stat
= flow_stat_dict
[str(switch_dpid
)][0]
396 if 'bytes' in metric_key
:
397 counter
= flow_stat
['byte_count']
398 elif 'packet' in metric_key
:
399 counter
= flow_stat
['packet_count']
401 flow_uptime
= flow_stat
['duration_sec'] + flow_stat
['duration_nsec'] * 10 ** (-9)
403 self
.prom_metrics
[metric_dict
['metric_key']]. \
404 labels({'vnf_name': vnf_name
, 'vnf_interface': vnf_interface
, 'flow_id': cookie
}). \
406 pushadd_to_gateway(self
.pushgateway
, job
='sonemu-SDNcontroller', registry
=self
.registry
)
408 #logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))
409 #return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)
411 def REST_cmd(self
, prefix
, dpid
, data
=None):
412 url
= self
.ryu_REST_api
+ '/' + str(prefix
) + '/' + str(dpid
)
414 logging
.info('POST: {0}'.format(str(data
)))
415 req
= urllib2
.Request(url
, str(data
))
417 req
= urllib2
.Request(url
)
419 ret
= urllib2
.urlopen(req
).read()
422 def query_Prometheus(self
, query
):
425 for old in escaped_chars:
426 new = '\{0}'.format(old)
427 query = query.replace(old, new)
429 url
= self
.prometheus_REST_api
+ '/' + 'api/v1/query?query=' + query
430 #logging.info('query:{0}'.format(url))
431 req
= urllib2
.Request(url
)
432 ret
= urllib2
.urlopen(req
).read()
433 ret
= ast
.literal_eval(ret
)
434 if ret
['status'] == 'success':
435 #logging.info('return:{0}'.format(ret))
437 ret
= ret
['data']['result'][0]['value']
444 def start_Prometheus(self
, port
=9090):
445 # prometheus.yml configuration file is located in the same directory as this file
449 "-p", "{0}:9090".format(port
),
450 "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(os
.path
.dirname(os
.path
.abspath(__file__
))),
451 "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(os
.path
.dirname(os
.path
.abspath(__file__
))),
452 "--name", "prometheus",
455 logging
.info('Start Prometheus container {0}'.format(cmd
))
458 def start_PushGateway(self
, port
=9091):
462 "-p", "{0}:9091".format(port
),
463 "--name", "pushgateway",
467 logging
.info('Start Prometheus Push Gateway container {0}'.format(cmd
))
470 def start_cadvisor(self
, port
=8090):
474 "--volume=/:/rootfs:ro",
475 "--volume=/var/run:/var/run:rw",
476 "--volume=/sys:/sys:ro",
477 "--volume=/var/lib/docker/:/var/lib/docker:ro",
478 "--publish={0}:8080".format(port
),
480 "google/cadvisor:latest"
482 logging
.info('Start cAdvisor container {0}'.format(cmd
))
486 # stop the monitoring thread
487 self
.start_monitoring
= False
488 self
.monitor_thread
.join()
489 self
.monitor_flow_thread
.join()
491 if self
.prometheus_process
is not None:
492 logging
.info('stopping prometheus container')
493 self
.prometheus_process
.terminate()
494 self
.prometheus_process
.kill()
495 self
._stop
_container
('prometheus')
497 if self
.pushgateway_process
is not None:
498 logging
.info('stopping pushgateway container')
499 self
.pushgateway_process
.terminate()
500 self
.pushgateway_process
.kill()
501 self
._stop
_container
('pushgateway')
503 if self
.cadvisor_process
is not None:
504 logging
.info('stopping cadvisor container')
505 self
.cadvisor_process
.terminate()
506 self
.cadvisor_process
.kill()
507 self
._stop
_container
('cadvisor')
509 def switch_tx_rx(self
,metric
=''):
510 # when monitoring vnfs, the tx of the datacenter switch is actually the rx of the vnf
511 # so we need to change the metric name to be consistent with the vnf rx or tx
513 metric
= metric
.replace('tx','rx')
515 metric
= metric
.replace('rx','tx')
519 def _stop_container(self
, name
):
530 def profile(self
, mgmt_ip
, rate
, input_ip
, vnf_uuid
):
532 ssh
= paramiko
.SSHClient()
533 ssh
.set_missing_host_key_policy(paramiko
.AutoAddPolicy())
534 #ssh.connect(mgmt_ip, username='steven', password='test')
535 ssh
.connect(mgmt_ip
, username
='root', password
='root')
537 iperf_cmd
= 'iperf -c {0} -u -l18 -b{1}M -t1000 &'.format(input_ip
, rate
)
539 stdin
, stdout
, stderr
= ssh
.exec_command(iperf_cmd
)
541 start_time
= time
.time()
542 query_cpu
= '(sum(rate(container_cpu_usage_seconds_total{{id="/docker/{0}"}}[{1}s])))'.format(vnf_uuid
, 1)
543 while (time
.time() - start_time
) < 15:
544 data
= self
.query_Prometheus(query_cpu
)
545 # logging.info('rate: {1} data:{0}'.format(data, rate))
549 query_cpu2
= '(sum(rate(container_cpu_usage_seconds_total{{id="/docker/{0}"}}[{1}s])))'.format(vnf_uuid
, 8)
550 cpu_load
= float(self
.query_Prometheus(query_cpu2
)[1])
551 output
= 'rate: {1}Mbps; cpu_load: {0}%'.format(round(cpu_load
* 100, 2), rate
)
553 logging
.info(output_line
)
555 stop_iperf
= 'pkill -9 iperf'
556 stdin
, stdout
, stderr
= ssh
.exec_command(stop_iperf
)