set chaining via VLANs
diff --git a/src/emuvim/dcemulator/monitoring.py b/src/emuvim/dcemulator/monitoring.py
index 6531beb..ab77c39 100755
--- a/src/emuvim/dcemulator/monitoring.py
+++ b/src/emuvim/dcemulator/monitoring.py
@@ -11,6 +11,9 @@
from subprocess import Popen, PIPE
import os
+import paramiko
+import gevent
+
logging.basicConfig(level=logging.INFO)
"""
@@ -21,9 +24,14 @@
def __init__(self, net):
self.net = net
# link to Ryu REST_API
- self.ip = '0.0.0.0'
- self.port = '8080'
- self.REST_api = 'http://{0}:{1}'.format(self.ip,self.port)
+ ryu_ip = '0.0.0.0'
+ ryu_port = '8080'
+ self.ryu_REST_api = 'http://{0}:{1}'.format(ryu_ip, ryu_port)
+ prometheus_ip = '0.0.0.0'
+ prometheus_port = '9090'
+ self.prometheus_REST_api = 'http://{0}:{1}'.format(prometheus_ip, prometheus_port)
+
+
# helper variables to calculate the metrics
self.pushgateway = 'localhost:9091'
@@ -32,13 +40,13 @@
# supported Prometheus metrics
self.registry = CollectorRegistry()
self.prom_tx_packet_count = Gauge('sonemu_tx_count_packets', 'Total number of packets sent',
- ['vnf_name', 'vnf_interface'], registry=self.registry)
+ ['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
self.prom_rx_packet_count = Gauge('sonemu_rx_count_packets', 'Total number of packets received',
- ['vnf_name', 'vnf_interface'], registry=self.registry)
+ ['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
self.prom_tx_byte_count = Gauge('sonemu_tx_count_bytes', 'Total number of bytes sent',
- ['vnf_name', 'vnf_interface'], registry=self.registry)
+ ['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
self.prom_rx_byte_count = Gauge('sonemu_rx_count_bytes', 'Total number of bytes received',
- ['vnf_name', 'vnf_interface'], registry=self.registry)
+ ['vnf_name', 'vnf_interface', 'flow_id'], registry=self.registry)
self.prom_metrics={'tx_packets':self.prom_tx_packet_count, 'rx_packets':self.prom_rx_packet_count,
'tx_bytes':self.prom_tx_byte_count,'rx_bytes':self.prom_rx_byte_count}
@@ -57,19 +65,85 @@
}
'''
self.monitor_lock = threading.Lock()
+ self.monitor_flow_lock = threading.Lock()
self.network_metrics = []
+ self.flow_metrics = []
# start monitoring thread
self.start_monitoring = True
self.monitor_thread = threading.Thread(target=self.get_network_metrics)
self.monitor_thread.start()
+ self.monitor_flow_thread = threading.Thread(target=self.get_flow_metrics)
+ self.monitor_flow_thread.start()
+
# helper tools
self.pushgateway_process = self.start_PushGateway()
self.prometheus_process = self.start_Prometheus()
self.cadvisor_process = self.start_cadvisor()
# first set some parameters, before measurement can start
+ def setup_flow(self, vnf_name, vnf_interface=None, metric='tx_packets', cookie=0):
+
+ flow_metric = {}
+
+ # check if port is specified (vnf:port)
+ if vnf_interface is None:
+ # take first interface by default
+ connected_sw = self.net.DCNetwork_graph.neighbors(vnf_name)[0]
+ link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
+ vnf_interface = link_dict[0]['src_port_id']
+
+ flow_metric['vnf_name'] = vnf_name
+ flow_metric['vnf_interface'] = vnf_interface
+
+ vnf_switch = None
+ for connected_sw in self.net.DCNetwork_graph.neighbors(vnf_name):
+ link_dict = self.net.DCNetwork_graph[vnf_name][connected_sw]
+ for link in link_dict:
+ # logging.info("{0},{1}".format(link_dict[link],vnf_interface))
+ if link_dict[link]['src_port_id'] == vnf_interface:
+ # found the right link and connected switch
+ # logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
+ vnf_switch = connected_sw
+ flow_metric['mon_port'] = link_dict[link]['dst_port_nr']
+ break
+
+ if not vnf_switch:
+ logging.exception("vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface))
+ return "vnf switch of {0}:{1} not found!".format(vnf_name, vnf_interface)
+
+ try:
+ # default port direction to monitor
+ if metric is None:
+ metric = 'tx_packets'
+
+ next_node = self.net.getNodeByName(vnf_switch)
+
+ if not isinstance(next_node, OVSSwitch):
+ logging.info("vnf: {0} is not connected to switch".format(vnf_name))
+ return
+
+ flow_metric['previous_measurement'] = 0
+ flow_metric['previous_monitor_time'] = 0
+
+ flow_metric['switch_dpid'] = int(str(next_node.dpid), 16)
+ flow_metric['metric_key'] = metric
+ flow_metric['cookie'] = cookie
+
+ self.monitor_flow_lock.acquire()
+ self.flow_metrics.append(flow_metric)
+ self.monitor_flow_lock.release()
+
+ logging.info('Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))
+ return 'Started monitoring flow:{3} {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)
+
+ except Exception as ex:
+ logging.exception("setup_metric error.")
+ return ex.message
+
+
+ # first set some parameters, before measurement can start
def setup_metric(self, vnf_name, vnf_interface=None, metric='tx_packets'):
network_metric = {}
@@ -139,9 +213,10 @@
logging.exception("setup_metric error.")
return ex.message
- def stop_metric(self, vnf_name, vnf_interface, metric):
+ def stop_metric(self, vnf_name, vnf_interface=None, metric=None):
for metric_dict in self.network_metrics:
+ #logging.info('start Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric_dict))
if metric_dict['vnf_name'] == vnf_name and metric_dict['vnf_interface'] == vnf_interface \
and metric_dict['metric_key'] == metric:
@@ -154,7 +229,7 @@
#self.registry.unregister(self.prom_metrics[metric_dict['metric_key']])
for collector in self.registry._collectors :
- logging.info('name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames, collector._metrics))
+ #logging.info('name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames, collector._metrics))
"""
INFO:root:name:sonemu_rx_count_packets
labels:('vnf_name', 'vnf_interface')
@@ -165,11 +240,11 @@
"""
logging.info('{0}'.format(collector._metrics.values()))
#if self.prom_metrics[metric_dict['metric_key']]
- if (vnf_name, vnf_interface) in collector._metrics:
+ if (vnf_name, vnf_interface, 'None') in collector._metrics:
logging.info('2 name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames,
collector._metrics))
#collector._metrics = {}
- collector.remove(vnf_name, vnf_interface)
+ collector.remove(vnf_name, vnf_interface, 'None')
# set values to NaN, prometheus api currently does not support removal of metrics
#self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))
@@ -186,8 +261,51 @@
logging.info('Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric))
return 'Stopped monitoring: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric)
+ # delete everything from this vnf
+ elif metric_dict['vnf_name'] == vnf_name and vnf_interface is None and metric is None:
+ self.monitor_lock.acquire()
+ self.network_metrics.remove(metric_dict)
+ for collector in self.registry._collectors:
+ collector_dict = collector._metrics.copy()
+ for name, interface, id in collector_dict:
+ if name == vnf_name:
+ logging.info('3 name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames,
+ collector._metrics))
+ collector.remove(name, interface, 'None')
+
+ delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')
+ self.monitor_lock.release()
+ logging.info('Stopped monitoring vnf: {0}'.format(vnf_name))
+ return 'Stopped monitoring: {0}'.format(vnf_name)
+
# get all metrics defined in the list and export it to Prometheus
+ def get_flow_metrics(self):
+ while self.start_monitoring:
+
+ self.monitor_flow_lock.acquire()
+
+ for flow_dict in self.flow_metrics:
+ data = {}
+
+ data['cookie'] = flow_dict['cookie']
+
+ if 'tx' in flow_dict['metric_key']:
+ data['match'] = {'in_port':flow_dict['mon_port']}
+ elif 'rx' in flow_dict['metric_key']:
+ data['out_port'] = flow_dict['mon_port']
+
+
+ # query Ryu
+ ret = self.REST_cmd('stats/flow', flow_dict['switch_dpid'], data=data)
+ flow_stat_dict = ast.literal_eval(ret)
+
+ logging.info('received flow stat:{0} '.format(flow_stat_dict))
+ self.set_flow_metric(flow_dict, flow_stat_dict)
+
+ self.monitor_flow_lock.release()
+ time.sleep(1)
+
def get_network_metrics(self):
while self.start_monitoring:
@@ -231,7 +349,7 @@
# set prometheus metric
self.prom_metrics[metric_dict['metric_key']].\
- labels({'vnf_name':vnf_name, 'vnf_interface':vnf_interface}).\
+ labels({'vnf_name': vnf_name, 'vnf_interface': vnf_interface, 'flow_id': None}).\
set(this_measurement)
#push_to_gateway(self.pushgateway, job='SDNcontroller',
# grouping_key={'metric':metric_dict['metric_key']}, registry=self.registry)
@@ -253,7 +371,7 @@
else:
time_delta = (port_uptime - metric_dict['previous_monitor_time'])
metric_rate = (this_measurement - metric_dict['previous_measurement']) / float(time_delta)
- logging.info('metric: {0} rate:{1}'.format(metric_dict['metric_key'], metric_rate))
+ #logging.info('metric: {0} rate:{1}'.format(metric_dict['metric_key'], metric_rate))
metric_dict['previous_measurement'] = this_measurement
metric_dict['previous_monitor_time'] = port_uptime
@@ -262,11 +380,65 @@
logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))
return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)
+ def set_flow_metric(self, metric_dict, flow_stat_dict):
+ # vnf tx is the datacenter switch rx and vice-versa
+ #metric_key = self.switch_tx_rx(metric_dict['metric_key'])
+ metric_key = metric_dict['metric_key']
+ switch_dpid = metric_dict['switch_dpid']
+ vnf_name = metric_dict['vnf_name']
+ vnf_interface = metric_dict['vnf_interface']
+ previous_measurement = metric_dict['previous_measurement']
+ previous_monitor_time = metric_dict['previous_monitor_time']
+ cookie = metric_dict['cookie']
- def REST_cmd(self, prefix, dpid):
- url = self.REST_api + '/' + str(prefix) + '/' + str(dpid)
+ # TODO aggregate all found flow stats
+ flow_stat = flow_stat_dict[str(switch_dpid)][0]
+ if 'bytes' in metric_key:
+ counter = flow_stat['byte_count']
+ elif 'packet' in metric_key:
+ counter = flow_stat['packet_count']
+
+ flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)
+
+ self.prom_metrics[metric_dict['metric_key']]. \
+ labels({'vnf_name': vnf_name, 'vnf_interface': vnf_interface, 'flow_id': cookie}). \
+ set(counter)
+ pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)
+
+ #logging.exception('metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface))
+ #return 'metric {0} not found on {1}:{2}'.format(metric_key, vnf_name, vnf_interface)
+
+ def REST_cmd(self, prefix, dpid, data=None):
+ url = self.ryu_REST_api + '/' + str(prefix) + '/' + str(dpid)
+ if data:
+ logging.info('POST: {0}'.format(str(data)))
+ req = urllib2.Request(url, str(data))
+ else:
+ req = urllib2.Request(url)
+
+ ret = urllib2.urlopen(req).read()
+ return ret
+
+ def query_Prometheus(self, query):
+ '''
+ escaped_chars='{}[]'
+ for old in escaped_chars:
+ new = '\{0}'.format(old)
+ query = query.replace(old, new)
+ '''
+ url = self.prometheus_REST_api + '/' + 'api/v1/query?query=' + query
+ #logging.info('query:{0}'.format(url))
req = urllib2.Request(url)
ret = urllib2.urlopen(req).read()
+ ret = ast.literal_eval(ret)
+ if ret['status'] == 'success':
+ #logging.info('return:{0}'.format(ret))
+ try:
+ ret = ret['data']['result'][0]['value']
+ except:
+ ret = None
+ else:
+ ret = None
return ret
def start_Prometheus(self, port=9090):
@@ -314,6 +486,7 @@
# stop the monitoring thread
self.start_monitoring = False
self.monitor_thread.join()
+ self.monitor_flow_thread.join()
if self.prometheus_process is not None:
logging.info('stopping prometheus container')
@@ -354,4 +527,33 @@
name]
Popen(cmd).wait()
+ def profile(self, mgmt_ip, rate, input_ip, vnf_uuid ):
+
+ ssh = paramiko.SSHClient()
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+ #ssh.connect(mgmt_ip, username='steven', password='test')
+ ssh.connect(mgmt_ip, username='root', password='root')
+
+ iperf_cmd = 'iperf -c {0} -u -l18 -b{1}M -t1000 &'.format(input_ip, rate)
+ if rate > 0:
+ stdin, stdout, stderr = ssh.exec_command(iperf_cmd)
+
+ start_time = time.time()
+ query_cpu = '(sum(rate(container_cpu_usage_seconds_total{{id="/docker/{0}"}}[{1}s])))'.format(vnf_uuid, 1)
+ while (time.time() - start_time) < 15:
+ data = self.query_Prometheus(query_cpu)
+ # logging.info('rate: {1} data:{0}'.format(data, rate))
+ gevent.sleep(0)
+ time.sleep(1)
+
+ query_cpu2 = '(sum(rate(container_cpu_usage_seconds_total{{id="/docker/{0}"}}[{1}s])))'.format(vnf_uuid, 8)
+ cpu_load = float(self.query_Prometheus(query_cpu2)[1])
+ output = 'rate: {1}Mbps; cpu_load: {0}%'.format(round(cpu_load * 100, 2), rate)
+ output_line = output
+ logging.info(output_line)
+
+ stop_iperf = 'pkill -9 iperf'
+ stdin, stdout, stderr = ssh.exec_command(stop_iperf)
+
+ return output_line