Merge pull request #95 from stevenvanrossem/master
[osm/vim-emu.git] / src / emuvim / dcemulator / monitoring.py
1 __author__ = 'Administrator'
2
3 import urllib2
4 import logging
5 from mininet.node import OVSSwitch
6 import ast
7 import time
8 from prometheus_client import start_http_server, Summary, Histogram, Gauge, Counter, REGISTRY, CollectorRegistry, \
9 pushadd_to_gateway, push_to_gateway, delete_from_gateway
10 import threading
11 from subprocess import Popen, PIPE
12 import os
13
14 import paramiko
15 import gevent
16
17 logging.basicConfig(level=logging.INFO)
18
19 """
20 class to read openflow stats from the Ryu controller of the DCNetwork
21 """
22
23 class DCNetworkMonitor():
24 def __init__(self, net):
25 self.net = net
26 # link to Ryu REST_API
27 ryu_ip = '0.0.0.0'
28 ryu_port = '8080'
29 self.ryu_REST_api = 'http://{0}:{1}'.format(ryu_ip, ryu_port)
30 prometheus_ip = '0.0.0.0'
31 prometheus_port = '9090'
32 self.prometheus_REST_api = 'http://{0}:{1}'.format(prometheus_ip, prometheus_port)
33
34
35
36 # helper variables to calculate the metrics
37 self.pushgateway = 'localhost:9091'
38 # Start up the server to expose the metrics to Prometheus.
39 #start_http_server(8000)
40 # supported Prometheus metrics
41 self.registry = CollectorRegistry()
42 self.prom_tx_packet_count = Gauge('sonemu_tx_count_packets', 'Total number of packets sent',
43 ['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
44 self.prom_rx_packet_count = Gauge('sonemu_rx_count_packets', 'Total number of packets received',
45 ['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
46 self.prom_tx_byte_count = Gauge('sonemu_tx_count_bytes', 'Total number of bytes sent',
47 ['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
48 self.prom_rx_byte_count = Gauge('sonemu_rx_count_bytes', 'Total number of bytes received',
49 ['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
50
51 self.prom_metrics={'tx_packets':self.prom_tx_packet_count, 'rx_packets':self.prom_rx_packet_count,
52 'tx_bytes':self.prom_tx_byte_count,'rx_bytes':self.prom_rx_byte_count}
53
54 # list of installed metrics to monitor
55 # each entry can contain this data
56 '''
57 {
58 switch_dpid = 0
59 vnf_name = None
60 vnf_interface = None
61 previous_measurement = 0
62 previous_monitor_time = 0
63 metric_key = None
64 mon_port = None
65 }
66 '''
67 self.monitor_lock = threading.Lock()
68 self.monitor_flow_lock = threading.Lock()
69 self.network_metrics = []
70 self.flow_metrics = []
71
72 # start monitoring thread
73 self.start_monitoring = True
74 self.monitor_thread = threading.Thread(target=self.get_network_metrics)
75 self.monitor_thread.start()
76
77 self.monitor_flow_thread = threading.Thread(target=self.get_flow_metrics)
78 self.monitor_flow_thread.start()
79
80 # helper tools
81 self.pushgateway_process = self.start_PushGateway()
82 self.prometheus_process = self.start_Prometheus()
83 self.cadvisor_process = self.start_cadvisor()
84
85 # first set some parameters, before measurement can start
86 def setup_flow(self, vnf_name, vnf_interface=None, metric='tx_packets', cookie=0):
87
88 flow_metric = {}
89
90 # check if port is specified (vnf:port)
91 if vnf_interface is None:
92 # take first interface by default
93 connected_sw = self.net.DCNetwork_graph.neighbors(vnf_name)[0]
94 link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
95 vnf_interface = link_dict[0]['src_port_id']
96
97 flow_metric['vnf_name'] = vnf_name
98 flow_metric['vnf_interface'] = vnf_interface
99
100 vnf_switch = None
101 for connected_sw in self.net.DCNetwork_graph.neighbors(vnf_name):
102 link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
103 for link in link_dict:
104 # logging.info("{0},{1}".format(link_dict[link],vnf_interface))
105 if link_dict[link]['src_port_id'] == vnf_interface:
106 # found the right link and connected switch
107 # logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
108 vnf_switch = connected_sw
109 flow_metric['mon_port'] = link_dict[link]['dst_port_nr']
110 break
111
112 if not vnf_switch:
113 logging.exception("vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface))
114 return "vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface)
115
116 try:
117 # default port direction to monitor
118 if metric is None:
119 metric = 'tx_packets'
120
121 next_node = self.net.getNodeByName(vnf_switch)
122
123 if not isinstance(next_node, OVSSwitch):
124 logging.info("vnf: {0} is not connected to switch".format(vnf_name))
125 return
126
127 flow_metric['previous_measurement'] = 0
128 flow_metric['previous_monitor_time'] = 0
129
130 flow_metric['switch_dpid'] = int(str(next_node.dpid), 16)
131 flow_metric['metric_key'] = metric
132 flow_metric['cookie'] = cookie
133
134 self.monitor_flow_lock.acquire()
135 self.flow_metrics.append(flow_metric)
136 self.monitor_flow_lock.release()
137
138 logging.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))
139 return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)
140
141 except Exception as ex:
142 logging.exception("setup_metric error.")
143 return ex.message
144
145
146 # first set some parameters, before measurement can start
147 def setup_metric(self, vnf_name, vnf_interface=None, metric='tx_packets'):
148
149 network_metric = {}
150
151 # check if port is specified (vnf:port)
152 if vnf_interface is None:
153 # take first interface by default
154 connected_sw = self.net.DCNetwork_graph.neighbors(vnf_name)[0]
155 link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
156 vnf_interface = link_dict[0]['src_port_id']
157
158 network_metric['vnf_name'] = vnf_name
159 network_metric['vnf_interface'] = vnf_interface
160
161 for connected_sw in self.net.DCNetwork_graph.neighbors(vnf_name):
162 link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
163 for link in link_dict:
164 # logging.info("{0},{1}".format(link_dict[link],vnf_interface))
165 if link_dict[link]['src_port_id'] == vnf_interface:
166 # found the right link and connected switch
167 # logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
168 network_metric['mon_port'] = link_dict[link]['dst_port_nr']
169 break
170
171 if 'mon_port' not in network_metric:
172 logging.exception("vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface))
173 return "vnf interface {0}:{1} not found!".format(vnf_name,vnf_interface)
174
175 try:
176 # default port direction to monitor
177 if metric is None:
178 metric = 'tx_packets'
179
180 vnf_switch = self.net.DCNetwork_graph.neighbors(str(vnf_name))
181
182 if len(vnf_switch) > 1:
183 logging.info("vnf: {0} has multiple ports".format(vnf_name))
184 return
185 elif len(vnf_switch) == 0:
186 logging.info("vnf: {0} is not connected".format(vnf_name))
187 return
188 else:
189 vnf_switch = vnf_switch[0]
190 next_node = self.net.getNodeByName(vnf_switch)
191
192 if not isinstance(next_node, OVSSwitch):
193 logging.info("vnf: {0} is not connected to switch".format(vnf_name))
194 return
195
196 network_metric['previous_measurement'] = 0
197 network_metric['previous_monitor_time'] = 0
198
199
200 network_metric['switch_dpid'] = int(str(next_node.dpid), 16)
201 network_metric['metric_key'] = metric
202
203 self.monitor_lock.acquire()
204
205 self.network_metrics.append(network_metric)
206 self.monitor_lock.release()
207
208
209 logging.info('Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))
210 return 'Started monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)
211
212 except Exception as ex:
213 logging.exception("setup_metric error.")
214 return ex.message
215
216 def stop_metric(self, vnf_name, vnf_interface=None, metric=None):
217
218 for metric_dict in self.network_metrics:
219 #logging.info('start Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric_dict))
220 if metric_dict['vnf_name'] == vnf_name and metric_dict['vnf_interface'] == vnf_interface \
221 and metric_dict['metric_key'] == metric:
222
223 self.monitor_lock.acquire()
224
225 self.network_metrics.remove(metric_dict)
226
227 #this removes the complete metric, all labels...
228 #REGISTRY.unregister(self.prom_metrics[metric_dict['metric_key']])
229 #self.registry.unregister(self.prom_metrics[metric_dict['metric_key']])
230
231 for collector in self.registry._collectors :
232 #logging.info('name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames, collector._metrics))
233 """
234 INFO:root:name:sonemu_rx_count_packets
235 labels:('vnf_name', 'vnf_interface')
236 metrics:{(u'tsrc', u'output'): < prometheus_client.core.Gauge
237 object
238 at
239 0x7f353447fd10 >}
240 """
241 logging.info('{0}'.format(collector._metrics.values()))
242 #if self.prom_metrics[metric_dict['metric_key']]
243 if (vnf_name, vnf_interface, 'None') in collector._metrics:
244 logging.info('2 name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames,
245 collector._metrics))
246 #collector._metrics = {}
247 collector.remove(vnf_name, vnf_interface, 'None')
248
249 # set values to NaN, prometheus api currently does not support removal of metrics
250 #self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))
251
252 # this removes the complete metric, all labels...
253 # 1 single monitor job for all metrics of the SDN controller
254 # we can only remove from the pushgateway grouping keys(labels) which we have defined for the add_to_pushgateway
255 # we can not specify labels from the metrics to be removed
256 # if we need to remove the metrics seperatelty, we need to give them a separate grouping key, and probably a diffferent registry also
257 delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')
258
259 self.monitor_lock.release()
260
261 logging.info('Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))
262 return 'Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)
263
264 # delete everything from this vnf
265 elif metric_dict['vnf_name'] == vnf_name and vnf_interface is None and metric is None:
266 self.monitor_lock.acquire()
267 self.network_metrics.remove(metric_dict)
268 for collector in self.registry._collectors:
269 collector_dict = collector._metrics.copy()
270 for name, interface, id in collector_dict:
271 if name == vnf_name:
272 logging.info('3 name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames,
273 collector._metrics))
274 collector.remove(name, interface, 'None')
275
276 delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')
277 self.monitor_lock.release()
278 logging.info('Stopped monitoring vnf: {0}'.format(vnf_name))
279 return 'Stopped monitoring: {0}'.format(vnf_name)
280
281
282 # get all metrics defined in the list and export it to Prometheus
283 def get_flow_metrics(self):
284 while self.start_monitoring:
285
286 self.monitor_flow_lock.acquire()
287
288 for flow_dict in self.flow_metrics:
289 data = {}
290
291 data['cookie'] = flow_dict['cookie']
292
293 if 'tx' in flow_dict['metric_key']:
294 data['match'] = {'in_port':flow_dict['mon_port']}
295 elif 'rx' in flow_dict['metric_key']:
296 data['out_port'] = flow_dict['mon_port']
297
298
299 # query Ryu
300 ret = self.REST_cmd('stats/flow', flow_dict['switch_dpid'], data=data)
301 flow_stat_dict = ast.literal_eval(ret)
302
303 logging.info('received flow stat:{0} '.format(flow_stat_dict))
304 self.set_flow_metric(flow_dict, flow_stat_dict)
305
306 self.monitor_flow_lock.release()
307 time.sleep(1)
308
309 def get_network_metrics(self):
310 while self.start_monitoring:
311
312 self.monitor_lock.acquire()
313
314 # group metrics by dpid to optimize the rest api calls
315 dpid_list = [metric_dict['switch_dpid'] for metric_dict in self.network_metrics]
316 dpid_set = set(dpid_list)
317
318 for dpid in dpid_set:
319
320 # query Ryu
321 ret = self.REST_cmd('stats/port', dpid)
322 port_stat_dict = ast.literal_eval(ret)
323
324 metric_list = [metric_dict for metric_dict in self.network_metrics
325 if int(metric_dict['switch_dpid'])==int(dpid)]
326 #logging.info('1set prom packets:{0} '.format(self.network_metrics))
327 for metric_dict in metric_list:
328 self.set_network_metric(metric_dict, port_stat_dict)
329
330 self.monitor_lock.release()
331 time.sleep(1)
332
333 # add metric to the list to export to Prometheus, parse the Ryu port-stats reply
334 def set_network_metric(self, metric_dict, port_stat_dict):
335 # vnf tx is the datacenter switch rx and vice-versa
336 metric_key = self.switch_tx_rx(metric_dict['metric_key'])
337 switch_dpid = metric_dict['switch_dpid']
338 vnf_name = metric_dict['vnf_name']
339 vnf_interface = metric_dict['vnf_interface']
340 previous_measurement = metric_dict['previous_measurement']
341 previous_monitor_time = metric_dict['previous_monitor_time']
342 mon_port = metric_dict['mon_port']
343
344 for port_stat in port_stat_dict[str(switch_dpid)]:
345 if int(port_stat['port_no']) == int(mon_port):
346 port_uptime = port_stat['duration_sec'] + port_stat['duration_nsec'] * 10 ** (-9)
347 this_measurement = int(port_stat[metric_key])
348 #logging.info('set prom packets:{0} {1}:{2}'.format(this_measurement, vnf_name, vnf_interface))
349
350 # set prometheus metric
351 self.prom_metrics[metric_dict['metric_key']].\
352 labels({'vnf_name': vnf_name, 'vnf_interface': vnf_interface, 'flow_id': None}).\
353 set(this_measurement)
354 #push_to_gateway(self.pushgateway, job='SDNcontroller',
355 # grouping_key={'metric':metric_dict['metric_key']}, registry=self.registry)
356
357 # 1 single monitor job for all metrics of the SDN controller
358 pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
359
360 if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:
361 metric_dict['previous_measurement'] = int(port_stat[metric_key])
362 metric_dict['previous_monitor_time'] = port_uptime
363 # do first measurement
364 #logging.info('first measurement')
365 time.sleep(1)
366 self.monitor_lock.release()
367
368 metric_rate = self.get_network_metrics()
369 return metric_rate
370
371 else:
372 time_delta = (port_uptime - metric_dict['previous_monitor_time'])
373 metric_rate = (this_measurement - metric_dict['previous_measurement']) / float(time_delta)
374 #logging.info('metric: {0} rate:{1}'.format(metric_dict['metric_key'], metric_rate))
375
376 metric_dict['previous_measurement'] = this_measurement
377 metric_dict['previous_monitor_time'] = port_uptime
378 return metric_rate
379
380 logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))
381 return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)
382
383 def set_flow_metric(self, metric_dict, flow_stat_dict):
384 # vnf tx is the datacenter switch rx and vice-versa
385 #metric_key = self.switch_tx_rx(metric_dict['metric_key'])
386 metric_key = metric_dict['metric_key']
387 switch_dpid = metric_dict['switch_dpid']
388 vnf_name = metric_dict['vnf_name']
389 vnf_interface = metric_dict['vnf_interface']
390 previous_measurement = metric_dict['previous_measurement']
391 previous_monitor_time = metric_dict['previous_monitor_time']
392 cookie = metric_dict['cookie']
393
394 # TODO aggregate all found flow stats
395 flow_stat = flow_stat_dict[str(switch_dpid)][0]
396 if 'bytes' in metric_key:
397 counter = flow_stat['byte_count']
398 elif 'packet' in metric_key:
399 counter = flow_stat['packet_count']
400
401 flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)
402
403 self.prom_metrics[metric_dict['metric_key']]. \
404 labels({'vnf_name': vnf_name, 'vnf_interface': vnf_interface, 'flow_id': cookie}). \
405 set(counter)
406 pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
407
408 #logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))
409 #return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)
410
411 def REST_cmd(self, prefix, dpid, data=None):
412 url = self.ryu_REST_api + '/' + str(prefix) + '/' + str(dpid)
413 if data:
414 logging.info('POST: {0}'.format(str(data)))
415 req = urllib2.Request(url, str(data))
416 else:
417 req = urllib2.Request(url)
418
419 ret = urllib2.urlopen(req).read()
420 return ret
421
422 def query_Prometheus(self, query):
423 '''
424 escaped_chars='{}[]'
425 for old in escaped_chars:
426 new = '\{0}'.format(old)
427 query = query.replace(old, new)
428 '''
429 url = self.prometheus_REST_api + '/' + 'api/v1/query?query=' + query
430 #logging.info('query:{0}'.format(url))
431 req = urllib2.Request(url)
432 ret = urllib2.urlopen(req).read()
433 ret = ast.literal_eval(ret)
434 if ret['status'] == 'success':
435 #logging.info('return:{0}'.format(ret))
436 try:
437 ret = ret['data']['result'][0]['value']
438 except:
439 ret = None
440 else:
441 ret = None
442 return ret
443
444 def start_Prometheus(self, port=9090):
445 # prometheus.yml configuration file is located in the same directory as this file
446 cmd = ["docker",
447 "run",
448 "--rm",
449 "-p", "{0}:9090".format(port),
450 "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(os.path.dirname(os.path.abspath(__file__))),
451 "-v", "{0}/profile.rules:/etc/prometheus/profile.rules".format(os.path.dirname(os.path.abspath(__file__))),
452 "--name", "prometheus",
453 "prom/prometheus"
454 ]
455 logging.info('Start Prometheus container {0}'.format(cmd))
456 return Popen(cmd)
457
458 def start_PushGateway(self, port=9091):
459 cmd = ["docker",
460 "run",
461 "-d",
462 "-p", "{0}:9091".format(port),
463 "--name", "pushgateway",
464 "prom/pushgateway"
465 ]
466
467 logging.info('Start Prometheus Push Gateway container {0}'.format(cmd))
468 return Popen(cmd)
469
470 def start_cadvisor(self, port=8090):
471 cmd = ["docker",
472 "run",
473 "--rm",
474 "--volume=/:/rootfs:ro",
475 "--volume=/var/run:/var/run:rw",
476 "--volume=/sys:/sys:ro",
477 "--volume=/var/lib/docker/:/var/lib/docker:ro",
478 "--publish={0}:8080".format(port),
479 "--name=cadvisor",
480 "google/cadvisor:latest"
481 ]
482 logging.info('Start cAdvisor container {0}'.format(cmd))
483 return Popen(cmd)
484
485 def stop(self):
486 # stop the monitoring thread
487 self.start_monitoring = False
488 self.monitor_thread.join()
489 self.monitor_flow_thread.join()
490
491 if self.prometheus_process is not None:
492 logging.info('stopping prometheus container')
493 self.prometheus_process.terminate()
494 self.prometheus_process.kill()
495 self._stop_container('prometheus')
496
497 if self.pushgateway_process is not None:
498 logging.info('stopping pushgateway container')
499 self.pushgateway_process.terminate()
500 self.pushgateway_process.kill()
501 self._stop_container('pushgateway')
502
503 if self.cadvisor_process is not None:
504 logging.info('stopping cadvisor container')
505 self.cadvisor_process.terminate()
506 self.cadvisor_process.kill()
507 self._stop_container('cadvisor')
508
509 def switch_tx_rx(self,metric=''):
510 # when monitoring vnfs, the tx of the datacenter switch is actually the rx of the vnf
511 # so we need to change the metric name to be consistent with the vnf rx or tx
512 if 'tx' in metric:
513 metric = metric.replace('tx','rx')
514 elif 'rx' in metric:
515 metric = metric.replace('rx','tx')
516
517 return metric
518
519 def _stop_container(self, name):
520 cmd = ["docker",
521 "stop",
522 name]
523 Popen(cmd).wait()
524
525 cmd = ["docker",
526 "rm",
527 name]
528 Popen(cmd).wait()
529
530 def profile(self, mgmt_ip, rate, input_ip, vnf_uuid ):
531
532 ssh = paramiko.SSHClient()
533 ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
534 #ssh.connect(mgmt_ip, username='steven', password='test')
535 ssh.connect(mgmt_ip, username='root', password='root')
536
537 iperf_cmd = 'iperf -c {0} -u -l18 -b{1}M -t1000 &'.format(input_ip, rate)
538 if rate > 0:
539 stdin, stdout, stderr = ssh.exec_command(iperf_cmd)
540
541 start_time = time.time()
542 query_cpu = '(sum(rate(container_cpu_usage_seconds_total{{id="/docker/{0}"}}[{1}s])))'.format(vnf_uuid, 1)
543 while (time.time() - start_time) < 15:
544 data = self.query_Prometheus(query_cpu)
545 # logging.info('rate: {1} data:{0}'.format(data, rate))
546 gevent.sleep(0)
547 time.sleep(1)
548
549 query_cpu2 = '(sum(rate(container_cpu_usage_seconds_total{{id="/docker/{0}"}}[{1}s])))'.format(vnf_uuid, 8)
550 cpu_load = float(self.query_Prometheus(query_cpu2)[1])
551 output = 'rate: {1}Mbps; cpu_load: {0}%'.format(round(cpu_load * 100, 2), rate)
552 output_line = output
553 logging.info(output_line)
554
555 stop_iperf = 'pkill -9 iperf'
556 stdin, stdout, stderr = ssh.exec_command(stop_iperf)
557
558 return output_line
559