--- /dev/null
+global:
+ scrape_interval: 15s # By default, scrape targets every 15 seconds.
+ evaluation_interval: 5s
+
+
+# A scrape configuration containing endpoints to scrape:
+
+scrape_configs:
+
+# cAdvsior started in son-emu
+ - job_name: 'cAdvisor'
+
+ scrape_interval: 1s
+
+ target_groups:
+ - targets: ['172.17.0.1:8090']
+
+# Pushgateway started in SP
+ - job_name: 'PushGateway'
+
+ scrape_interval: 1s
+
+ target_groups:
+ - targets: ['172.17.0.1:9091']
+
+
--- /dev/null
+from setuptools import setup, find_packages
+
+setup(name='emuvim',
+ version='0.0.1',
+ license='Apache 2.0',
+ description='emuvim is a VIM for the SONATA platform',
+ url='http://github.com/sonata-emu',
+ author_email='sonata-dev@sonata-nfv.eu',
+ package_dir={'': 'src'},
+ # packages=find_packages('emuvim', exclude=['*.test', '*.test.*', 'test.*', 'test']),
+ packages=find_packages('src'),
+ install_requires=[
+ 'zerorpc',
+ 'tabulate',
+ 'argparse',
+ ],
+ zip_safe=False,
+ entry_points={
+ 'console_scripts': [
+ 'son-emu-cli=emuvim.cli.son_emu_cli:main',
+ ],
+ },
+ setup_requires=['pytest-runner'],
+ tests_require=['pytest'],
+)
\ No newline at end of file
logging.exception("RPC error.")
return ex.message
+ # remove the flow metrics measurement
+ def stop_flow(self, vnf_name, vnf_interface, metric, cookie):
+ logging.debug("RPC CALL: stop flow")
+ try:
+ c = self.net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
+ return c
+ except Exception as ex:
+ logging.exception("RPC error.")
+ return ex.message
+
# do prometheus query
def prometheus(self, dc_label, vnf_name, vnf_interface, query):
logging.debug("RPC CALL: query prometheus")
nw_list = list()
if args.get("network") is not None:
nw_list = self._parse_network(args.get("network"))
-
r = self.c.compute_action_start(
args.get("datacenter"),
args.get("name"),
\r
import argparse\r
import pprint\r
-from tabulate import tabulate\r
import zerorpc\r
-import time\r
-\r
+import prometheus\r
\r
pp = pprint.PrettyPrinter(indent=4)\r
\r
class ZeroRpcClient(object):\r
\r
def __init__(self):\r
+ # network zerorpc\r
self.c = zerorpc.Client()\r
# TODO connect to DCNetwork API\r
#self.c.connect("tcp://127.0.0.1:4242") # TODO hard coded for now. we'll change this later\r
self.c.connect("tcp://127.0.0.1:5151")\r
+\r
+ # compute zerorpc\r
+ self.compute_api = zerorpc.Client(heartbeat=None, timeout=120) # heartbeat=None, timeout=120\r
+ self.compute_api.connect("tcp://127.0.0.1:4242") # TODO hard coded for now. we'll change this later\r
+\r
self.cmds = {}\r
\r
def execute_command(self, args):\r
args.get("cookie"))\r
pp.pprint(r)\r
\r
- def prometheus(self, args):\r
+ def stop_flow(self, args):\r
+ vnf_name = self._parse_vnf_name(args.get("vnf_name"))\r
+ vnf_interface = self._parse_vnf_interface(args.get("vnf_name"))\r
+ r = self.c.stop_flow(\r
+ vnf_name,\r
+ vnf_interface,\r
+ args.get("metric"),\r
+ args.get("cookie"))\r
+ pp.pprint(r)\r
+\r
+ def prometheus_zrpc(self, args):\r
vnf_name = self._parse_vnf_name(args.get("vnf_name"))\r
vnf_interface = self._parse_vnf_interface(args.get("vnf_name"))\r
r = self.c.prometheus(\r
args.get("query"))\r
pp.pprint(r)\r
\r
+ def prometheus(self, args):\r
+ vnf_name = self._parse_vnf_name(args.get("vnf_name"))\r
+ vnf_interface = self._parse_vnf_interface(args.get("vnf_name"))\r
+ dc_label = args.get("datacenter")\r
+ query = args.get("query")\r
+ vnf_status = self.compute_api.compute_status(dc_label, vnf_name)\r
+ uuid = vnf_status['id']\r
+ query = query.replace('<uuid>', uuid)\r
+\r
+ r = prometheus.query_Prometheus(query)\r
+ pp.pprint(r)\r
+\r
\r
def _parse_vnf_name(self, vnf_name_str):\r
vnf_name = vnf_name_str.split(':')[0]\r
parser = argparse.ArgumentParser(description='son-emu monitor')\r
parser.add_argument(\r
"command",\r
- help="Action to be executed")\r
+ choices=['setup_metric', 'stop_metric', 'setup_flow', 'stop_flow','prometheus'],\r
+ help="setup/stop a metric/flow to be monitored or query Prometheus")\r
parser.add_argument(\r
"--vnf_name", "-vnf", dest="vnf_name",\r
help="vnf name:interface to be monitored")\r
parser = argparse.ArgumentParser(description='son-emu network')\r
parser.add_argument(\r
"command",\r
- help="Action to be executed: add|remove")\r
+ choices=['add', 'remove'],\r
+ help="Action to be executed.")\r
parser.add_argument(\r
"--datacenter", "-d", dest="datacenter",\r
help="Data center to in which the network action should be initiated")\r
parser.add_argument(\r
"--bidirectional", "-b", dest="bidirectional",\r
action='store_true',\r
- help="add/remove the flow entries in 2 directions")\r
+ help="add/remove the flow entries from src to dst and back")\r
parser.add_argument(\r
"--cookie", "-c", dest="cookie",\r
- help="cookie for this flow")\r
+ help="cookie for this flow, as easy to use identifier (eg. per tenant/service)")\r
\r
def main(argv):\r
args = vars(parser.parse_args(argv))\r
--- /dev/null
+"""
+Prometheus API helper functions
+(c) 2016 by Steven Van Rossem <steven.vanrossem@intec.ugent.be>
+"""
+
+import urllib2
+import ast
+
+prometheus_ip = '0.0.0.0'
+prometheus_port = '9090'
+prometheus_REST_api = 'http://{0}:{1}'.format(prometheus_ip, prometheus_port)
+
+
+def query_Prometheus(query):
+ url = prometheus_REST_api + '/' + 'api/v1/query?query=' + query
+ # logging.info('query:{0}'.format(url))
+ req = urllib2.Request(url)
+ ret = urllib2.urlopen(req).read()
+ ret = ast.literal_eval(ret)
+ if ret['status'] == 'success':
+ # logging.info('return:{0}'.format(ret))
+ try:
+ ret = ret['data']['result'][0]['value']
+ except:
+ ret = None
+ else:
+ ret = None
+ return ret
\ No newline at end of file
self.monitor_flow_thread.start()\r
\r
# helper tools\r
- self.pushgateway_process = self.start_PushGateway()\r
- self.prometheus_process = self.start_Prometheus()\r
+ #self.pushgateway_process = self.start_PushGateway()\r
+ #self.prometheus_process = self.start_Prometheus()\r
self.cadvisor_process = self.start_cadvisor()\r
\r
# first set some parameters, before measurement can start\r
logging.exception("setup_metric error.")\r
return ex.message\r
\r
+ def stop_flow(self, vnf_name, vnf_interface=None, metric=None, cookie=0):\r
+ for flow_dict in self.flow_metrics:\r
+ if flow_dict['vnf_name'] == vnf_name and flow_dict['vnf_interface'] == vnf_interface \\r
+ and flow_dict['metric_key'] == metric and flow_dict['cookie'] == cookie:\r
+\r
+ self.monitor_flow_lock.acquire()\r
+\r
+ self.flow_metrics.remove(flow_dict)\r
+\r
+ for collector in self.registry._collectors:\r
+ if (vnf_name, vnf_interface, cookie) in collector._metrics:\r
+ #logging.info('2 name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames,\r
+ # collector._metrics))\r
+ collector.remove(vnf_name, vnf_interface, cookie)\r
+\r
+ delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
+\r
+ self.monitor_flow_lock.release()\r
+\r
+ logging.info('Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie))\r
+ return 'Stopped monitoring flow {3}: {2} on {0}:{1}'.format(vnf_name, vnf_interface, metric, cookie)\r
+\r
\r
# first set some parameters, before measurement can start\r
def setup_metric(self, vnf_name, vnf_interface=None, metric='tx_packets'):\r
ret = self.net.ryu_REST('stats/flow', dpid=flow_dict['switch_dpid'], data=data)\r
flow_stat_dict = ast.literal_eval(ret)\r
\r
- logging.info('received flow stat:{0} '.format(flow_stat_dict))\r
+ #logging.info('received flow stat:{0} '.format(flow_stat_dict))\r
self.set_flow_metric(flow_dict, flow_stat_dict)\r
\r
self.monitor_flow_lock.release()\r
self.monitor_thread.join()\r
self.monitor_flow_thread.join()\r
\r
+ '''\r
if self.prometheus_process is not None:\r
logging.info('stopping prometheus container')\r
self.prometheus_process.terminate()\r
self.pushgateway_process.terminate()\r
self.pushgateway_process.kill()\r
self._stop_container('pushgateway')\r
+ '''\r
\r
if self.cadvisor_process is not None:\r
logging.info('stopping cadvisor container')\r
if cmd == 'add-flow':
ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
if kwargs.get('bidirectional'):
- return ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
+ ret = ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
elif cmd == 'del-flows': # TODO: del-flow to be implemented
ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
if kwargs.get('bidirectional'):
- return ret + '\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
+ ret = ret + '\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
else:
- return "Command unknown"
+ ret = "Command unknown"
+
+ return ret
def _chainAddFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
elif cmd == 'del-flows':
prefix = 'stats/flowentry/delete'
- # if cookie is given, only delete flows by cookie
- # do not specify other match -> also other cookies can be matched
if cookie:
+ # TODO: add cookie_mask as argument
flow['cookie_mask'] = int('0xffffffffffffffff', 16) # need full mask to match complete cookie
action = {}
self.ryu_process.kill()
def ryu_REST(self, prefix, dpid=None, data=None):
- if data: logging.info('log POST: {0}'.format(str(data)))
try:
if dpid:
url = self.ryu_REST_api + '/' + str(prefix) + '/' + str(dpid)
+++ /dev/null
-global:
- scrape_interval: 15s # By default, scrape targets every 15 seconds.
- evaluation_interval: 5s
-
- # Attach these labels to any time series or alerts when communicating with
- # external systems (federation, remote storage, Alertmanager).
- external_labels:
- monitor: 'codelab-monitor'
-
-# Rule files specifies a list of files from which rules are read.
-rule_files:
- - 'profile.rules'
-
-# A scrape configuration containing exactly one endpoint to scrape:
-# Here it's Prometheus itself.
-scrape_configs:
- # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- - job_name: 'prometheus'
-
- # Override the global default and scrape targets from this job every 5 seconds.
- scrape_interval: 5s
-
- target_groups:
- #- targets: ['localhost:9090']
-
- - job_name: 'son-emu'
-
- # Override the global default and scrape targets from this job every 5 seconds.
- scrape_interval: 5s
-
- target_groups:
- - targets: ['172.17.0.1:8000']
-
- - job_name: 'cAdvisor'
-
- # Override the global default and scrape targets from this job every 5 seconds.
- scrape_interval: 1s
-
- target_groups:
- - targets: ['172.17.0.1:8090']
-
- - job_name: 'PushGateway'
-
- # Override the global default and scrape targets from this job every 5 seconds.
- scrape_interval: 1s
-
- target_groups:
- - targets: ['172.17.0.1:9091']
-
-
net = DCNetwork(monitor=True, enable_learning=False)
"""
- 1b. add a monitoring agent to the DCNetwork
+ 1b. Add endpoint APIs for the whole DCNetwork,
+ to access and control the networking from outside.
+ e.g., to setup forwarding paths between compute
+ instances aka. VNFs (represented by Docker containers), passing through
+ different switches and datacenters of the emulated topology
"""
mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
mon_api.connectDCNetwork(net)
mon_api.start()
+
"""
2. Add (logical) data centers to the topology
(each data center is one "bigswitch" in our simplified
--- /dev/null
+#!/bin/bash
+# test if a vnf can be monitored and deployed
+
+cpu_load=$(son-emu-cli monitor prometheus -d datacenter1 -vnf vnf1 -q 'sum(rate(container_cpu_usage_seconds_total{id="/docker/<uuid>"}[10s]))')
+
+# test if prometheus query worked
+regex="[0-9.]+, [0-9.']+"
+if [[ $cpu_load =~ $regex ]] ; then
+ echo "OK"
+ exit 0
+else
+ echo $cpu_load
+ echo "not OK"
+ exit 1
+fi