import time\r
from prometheus_client import start_http_server, Summary, Histogram, Gauge, Counter\r
import threading\r
+from subprocess import Popen\r
+from os import getcwd\r
\r
logging.basicConfig(level=logging.INFO)\r
\r
mon_port = None\r
}\r
'''\r
- self.network_metrics=[]\r
+ self.network_metrics = []\r
\r
# start monitoring thread\r
self.monitor_thread = threading.Thread(target=self.get_network_metrics)\r
self.monitor_thread.start()\r
\r
+ # helper tools\r
+ self.prometheus_process = None\r
+ self.cAdvisor_process = None\r
+\r
\r
# first set some parameters, before measurement can start\r
def setup_metric(self, vnf_name, vnf_interface=None, metric='tx_packets'):\r
url = self.REST_api + '/' + str(prefix) + '/' + str(dpid)\r
req = urllib2.Request(url)\r
ret = urllib2.urlopen(req).read()\r
- return ret
\ No newline at end of file
+ return ret\r
+\r
+ def start_Prometheus(self, port=9090):\r
+ cmd = ["docker",\r
+ "run",\r
+ "--rm",\r
+ "-p", "{0}:9090".format(port),\r
+ "-v", "{0}/prometheus.yml:/etc/prometheus/prometheus.yml".format(getcwd()),\r
+ "--name", "prometheus",\r
+ "prom/prometheus"\r
+ ]\r
+\r
+ self.prometheus_process = Popen(cmd)\r
+\r
+ def start_cAdvisor(self, port=8090):\r
+ cmd = ["docker",\r
+ "run",\r
+ "--rm",\r
+ "--volume=/:/rootfs:ro",\r
+ "--volume=/var/run:/var/run:rw",\r
+ "--volume=/sys:/sys:ro",\r
+ "--volume=/var/lib/docker/:/var/lib/docker:ro",\r
+ "--publish={0}:8080".format(port),\r
+ "--name=cadvisor",\r
+ "google/cadvisor:latest"\r
+ ]\r
+ self.cAdvisor_process = Popen(cmd)\r
+\r
+ def stop(self):\r
+ if self.prometheus_process is not None:\r
+ self.prometheus_process.terminate()\r
+ self.prometheus_process.kill()\r
+\r
+ if self.cAdvisor_process is not None:\r
+ self.cAdvisor_process.terminate()\r
+ self.cAdvisor_process.kill()\r
--- /dev/null
+global:
+ scrape_interval: 15s # By default, scrape targets every 15 seconds.
+
+ # Attach these labels to any time series or alerts when communicating with
+ # external systems (federation, remote storage, Alertmanager).
+ external_labels:
+ monitor: 'codelab-monitor'
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+ # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+ - job_name: 'prometheus'
+
+ # Override the global default and scrape targets from this job every 5 seconds.
+ scrape_interval: 5s
+
+ target_groups:
+ #- targets: ['localhost:9090']
+
+ - job_name: 'son-emu'
+
+ # Override the global default and scrape targets from this job every 5 seconds.
+ scrape_interval: 5s
+
+ target_groups:
+ - targets: ['172.17.0.1:8000']
+
+ - job_name: 'cAdvisor'
+
+ # Override the global default and scrape targets from this job every 5 seconds.
+ scrape_interval: 5s
+
+ target_groups:
+ - targets: ['172.17.0.1:8090']
+