Merge remote-tracking branch 'steven/master'
authorpeusterm <manuel.peuster@uni-paderborn.de>
Mon, 6 Feb 2017 11:32:54 +0000 (12:32 +0100)
committerpeusterm <manuel.peuster@uni-paderborn.de>
Mon, 6 Feb 2017 11:32:54 +0000 (12:32 +0100)
35 files changed:
Vagrantfile [changed mode: 0644->0755]
ansible/install.yml
dashboard/README.md [changed mode: 0644->0755]
dashboard/css/main.css [changed mode: 0644->0755]
dashboard/img/SONATA_new.png [changed mode: 0644->0755]
dashboard/index.html [changed mode: 0644->0755]
dashboard/js/main.js [changed mode: 0644->0755]
dashboard/son-emu-dashboard-screenshot.png [changed mode: 0644->0755]
misc/son-monitor/skewmon/Dockerfile [new file with mode: 0755]
misc/son-monitor/skewmon/requirements.txt [new file with mode: 0755]
misc/son-monitor/skewmon/skewmon.py [new file with mode: 0755]
misc/sonata-demo-service.son [changed mode: 0644->0755]
misc/sonata-stress-service.son [changed mode: 0644->0755]
setup-cli.py [changed mode: 0644->0755]
setup.py
src/emuvim/api/rest/compute.py
src/emuvim/api/rest/monitor.py
src/emuvim/api/rest/network.py
src/emuvim/api/rest/rest_api_endpoint.py
src/emuvim/api/sonata/dummygatekeeper.py
src/emuvim/api/sonata/sap_vnfd.yml [changed mode: 0644->0755]
src/emuvim/cli/rest/compute.py
src/emuvim/cli/rest/monitor.py
src/emuvim/dcemulator/monitoring.py
src/emuvim/dcemulator/net.py
src/emuvim/dcemulator/node.py
src/emuvim/dcemulator/son_emu_simple_switch_13.py [new file with mode: 0755]
src/emuvim/examples/sonata_simple.py [new file with mode: 0755]
src/emuvim/examples/sonata_simple_topology.py [new file with mode: 0755]
src/emuvim/examples/sonata_y1_demo_topology_1_w_ls_and_mon_and_sap.py [changed mode: 0644->0755]
src/emuvim/examples/sonata_y1_demo_topology_1_w_ls_and_sap.py [changed mode: 0644->0755]
src/emuvim/test/api_base.py
src/emuvim/test/base.py
src/emuvim/test/unittests/test_sonata_dummy_gatekeeper.py
utils/docker/Dockerfile

old mode 100644 (file)
new mode 100755 (executable)
index fd3aa4b..4e4ff01 100755 (executable)
@@ -57,8 +57,8 @@
    - name: install requests
      pip: name=requests state=latest
 
-   - name: install docker-py
-     pip: name=docker-py version=1.7.1
+   - name: install docker
+     pip: name=docker version=2.0.2
 
    - name: install prometheus_client
      pip: name=prometheus_client state=latest
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
diff --git a/misc/son-monitor/skewmon/Dockerfile b/misc/son-monitor/skewmon/Dockerfile
new file mode 100755 (executable)
index 0000000..875708d
--- /dev/null
@@ -0,0 +1,10 @@
+FROM python:3-onbuild
+
+#periods in milliseconds
+ENV SAMPLE_PERIOD 10
+ENV TOTAL_PERIOD 2000
+
+ADD requirements.txt .
+ADD skewmon.py .
+
+CMD [ "python", "./skewmon.py" ]
\ No newline at end of file
diff --git a/misc/son-monitor/skewmon/requirements.txt b/misc/son-monitor/skewmon/requirements.txt
new file mode 100755 (executable)
index 0000000..a73ea1c
--- /dev/null
@@ -0,0 +1 @@
+prometheus_client
\ No newline at end of file
diff --git a/misc/son-monitor/skewmon/skewmon.py b/misc/son-monitor/skewmon/skewmon.py
new file mode 100755 (executable)
index 0000000..62384b1
--- /dev/null
@@ -0,0 +1,255 @@
+"""
+Copyright (c) 2015 SONATA-NFV and Paderborn University
+ALL RIGHTS RESERVED.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
+nor the names of its contributors may be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+This work has been performed in the framework of the SONATA project,
+funded by the European Commission under Grant number 671517 through
+the Horizon 2020 and 5G-PPP programmes. The authors would like to
+acknowledge the contributions of their colleagues of the SONATA
+partner consortium (www.sonata-nfv.eu).
+"""
+
+"""
+Monitor the skewness of the resource usage probability distribution
+and export to a Prometheus Push Gateway
+(c) 2017 by Steven Van Rossem <steven.vanrossem@intec.ugent.be>
+"""
+
+#!/usr/bin/python3
+
+from time import sleep, time, perf_counter
+import math
+from prometheus_client import start_http_server, Summary, Histogram, Gauge, Counter, REGISTRY, CollectorRegistry, \
+    pushadd_to_gateway, push_to_gateway, delete_from_gateway
+import threading
+import os
+import json
+
+import logging
+LOG = logging.getLogger('skewmon')
+LOG.setLevel(level=logging.DEBUG)
+LOG.addHandler(logging.StreamHandler())
+
+
+# put env vars in Dockerfile
+PUSHGATEWAY_IP = '172.17.0.1'
+PUSHGATEWAY_PORT = 9091
+PUSHGATEWAY_ADDR = ':'.join([PUSHGATEWAY_IP, str(PUSHGATEWAY_PORT)])
+
+
+#general settings (ms)
+SAMPLE_PERIOD = int(os.environ['SAMPLE_PERIOD'])
+TOTAL_PERIOD = int(os.environ['TOTAL_PERIOD'])
+
+# define global variables
+registry = CollectorRegistry()
+exported_metric = Gauge('skewness', 'Skewness of docker vnf resource usage',
+                                              ['vnf_id', 'vnf_name', 'vnf_metric'], registry=registry)
+
+# find the VNFs to monitor
+# {metric_shortId: {VNF_NAME:<>,VNF_ID:<>,VNF_METRIC:<>}}
+
+def get_vnfs_to_monitor(config):
+    for key in config:
+        vnf_name = config[key].get('VNF_NAME')
+        vnf_id = config[key].get('VNF_ID')
+        vnf_metric = config[key].get('VNF_METRIC')
+        yield (vnf_id, vnf_name, vnf_metric)
+
+# export metric to the Prometheus PushGateway
+def export_metrics(key=None):
+    try:
+        pushadd_to_gateway(PUSHGATEWAY_ADDR, job='sonemu-skewmon', registry=registry, grouping_key=key)
+    except Exception as e:
+        LOG.warning("Pushgateway not reachable: {0}".format(str(e)))
+
+class skewness_monitor():
+    def __init__(self, docker_id, docker_name, metric):
+        # Prometheus metric to export
+        self.prom_skewness = exported_metric
+        self.docker_id = docker_id
+        self.docker_name = docker_name
+        self.vnf_metric = metric
+
+        # https://www.datadoghq.com/blog/how-to-collect-docker-metrics/
+        self.cpu_proc_file = '/sys/fs/cgroup/cpuacct/docker/{0}/cpuacct.usage'.format(self.docker_id)
+        self.mem_proc_file = '/sys/fs/cgroup/memory/docker/{0}/memory.usage_in_bytes'.format(self.docker_id)
+        metric_dict = {'cpu': self.cpu_proc_file,
+                       'mem': self.mem_proc_file}
+
+        self.proc_file = metric_dict[metric]
+
+        self.fp = open(self.proc_file)
+
+        #monitoring thread
+        self.export_thread = None
+        self.monitor_stop = threading.Event()
+
+    # get statistics with certain frequency and export skewness for further analysis
+    def _calc_skewness(self):
+
+        cpu_count0 = 0
+        time0 = 0
+
+        #milliseconds
+        stat_delta = SAMPLE_PERIOD
+        sample_T = TOTAL_PERIOD
+
+        data = []
+        n = 0
+
+        moment1 = 0
+        moment2 = 0
+        moment3 = 0
+
+        fp = self.fp
+
+        #collect samples
+        for n in range(0,round(sample_T/stat_delta)):
+            # first measurement
+            if cpu_count0 <= 0 or time0 <= 0:
+                time0 = perf_counter()
+                cpu_count0 = int(fp.read().strip())
+                fp.seek(0)
+                sleep(stat_delta/1000)
+                continue
+
+
+            #perf_counter in seconds
+            time1 = perf_counter()
+
+            # cpu count in nanoseconds
+            cpu_count1 = int(fp.read().strip())
+            fp.seek(0)
+
+            cpu_delta = cpu_count1 - cpu_count0
+            cpu_count0 = cpu_count1
+
+            time_delta = time1 - time0
+            time0 = time1
+
+            #work in nanoseconds
+            metric = (cpu_delta / (time_delta * 1e9))
+
+            data.append(metric)
+
+            #running calculation of sample moments
+            moment1 += metric
+            temp = metric * metric
+            moment2 += temp
+            moment3 += temp * metric
+
+
+            sleep(stat_delta/1000)
+
+        # calc skewness
+        M1 = (1 / n) * moment1
+        M2 = ((1 / n) * moment2) - M1**2
+        M3 = ((1 / n) * moment3) - (3 * M1 * ((1 / n) * moment2)) + (2 * M1**3)
+
+        s2 = (math.sqrt(n*(n - 1))/(n - 2)) * (M3 / (M2)**1.5)
+
+        LOG.info("docker_name: {0} metric: {1}".format(self.docker_name, self.vnf_metric))
+        LOG.info("Nsamples: {0}".format(n))
+        LOG.info("skewness: {0:.2f}".format(s2))
+        LOG.info("\n")
+
+        return s2
+
+    def _export_skewness_loop(self, stop_event):
+        #loop until flag is set
+        while(not stop_event.is_set()):
+            try:
+                skewness = self._calc_skewness()
+                self.prom_skewness.labels(vnf_id=self.docker_id, vnf_name=self.docker_name, vnf_metric=self.vnf_metric)\
+                    .set(skewness)
+            except ZeroDivisionError as e:
+                self.prom_skewness.labels(vnf_id=self.docker_id, vnf_name=self.docker_name, vnf_metric=self.vnf_metric) \
+                    .set(float('nan'))
+                LOG.warning("{1}: Skewness cannot be calculated: {0}".format(str(e), self.docker_name))
+            except Exception as e:
+                LOG.warning("Skewness cannot be calculated, stop thread: {0}".format(str(e)))
+                self.monitor_stop.set()
+
+        # if while has ended, monitoring thread will stop
+        self.prom_skewness.labels(vnf_id=self.docker_id, vnf_name=self.docker_name, vnf_metric=self.vnf_metric) \
+            .set(float('nan'))
+
+    #start the monitoring thread
+    def start(self):
+            if self.export_thread is not None:
+                LOG.warning('monitor thread is already running for: {0}'.format(self.docker_name))
+                return
+
+            self.export_thread = threading.Thread(target=self._export_skewness_loop, args=(self.monitor_stop,))
+            self.export_thread.start()
+            LOG.info('started thread: {0}'.format(self.docker_name))
+
+    #stop the monitoring thread
+    def stop(self):
+        self.monitor_stop.set()
+
+
+if __name__ == '__main__':
+
+    #started_vnfs {vnf_id:object}
+    vnfs_monitored = {}
+
+    # endless loop
+    while True:
+        #check config.txt for docker ids/names
+        configfile = open('/config.txt', 'r')
+        config = json.load(configfile)
+        vnfs_to_monitor = list(get_vnfs_to_monitor(config))
+
+        #for each new docker id in ENV start thread to monitor skewness
+        for vnf_id, vnf_name, vnf_metric in vnfs_to_monitor:
+            key = '_'.join([vnf_metric, vnf_id])
+            if key not in vnfs_monitored:
+                try:
+                    vnfs_monitored[key] = skewness_monitor(vnf_id, vnf_name, vnf_metric)
+                    vnfs_monitored[key].start()
+                except Exception as e:
+                    LOG.warning("Monitor cannot be started: {0}".format(str(e)))
+
+         #for each removed docker id ENV, stop export
+        for vnf_key in list(vnfs_monitored):
+            vnf_keys_to_monitor = ['_'.join([vnf_metric, vnf_id]) for vnf_id, vnf_name, vnf_metric in vnfs_to_monitor]
+            if vnf_key not in vnf_keys_to_monitor:
+                vnfs_monitored[vnf_key].stop()
+
+                vnf_name = vnfs_monitored[vnf_key].docker_name
+                vnf_metric, vnf_id = vnf_key.split('_')
+                LOG.info('stop monitored VNFs: {0}'.format(vnfs_monitored[vnf_key].docker_name))
+                del vnfs_monitored[vnf_key]
+
+                # remove metric with labels from registry
+                # (Push Gateway remembers last pushed value, so this is not so useful)
+                # collector = registry._names_to_collectors['skewness']
+                # if (vnf_id, vnf_name, vnf_metric) in collector._metrics:
+                #     collector.remove(vnf_id, vnf_name, vnf_metric)
+                delete_from_gateway(PUSHGATEWAY_ADDR, job='sonemu-skewmon')
+
+
+        #push to Prometheus gateway
+        export_metrics()
+        LOG.info('monitored VNFs: {0}'.format([monitor.docker_name for key, monitor in vnfs_monitored.items()]))
+        # wait before checking  again
+        sleep(TOTAL_PERIOD/1000)
\ No newline at end of file
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
old mode 100644 (file)
new mode 100755 (executable)
index 0baad49..ce1c47f 100755 (executable)
--- a/setup.py
+++ b/setup.py
@@ -52,7 +52,7 @@ setup(name='emuvim',
           'pytest',
           'Flask',
           'flask_restful',
-          'docker-py==1.7.1',
+          'docker==2.0.2',
           'requests',
           'prometheus_client',
           'urllib3'
index 22e9d6d..dc2b611 100755 (executable)
@@ -50,8 +50,13 @@ class Compute(Resource):
     """
     global dcs
 
-    def put(self, dc_label, compute_name):
+    def put(self, dc_label, compute_name, resource=None, value=None):
+        # check if resource update
+        if resource and value:
+           c = self._update_resource(dc_label, compute_name, resource, value)
+           return c.getStatus(), 200
 
+        # deploy new container
         # check if json data is a dict
         data = request.json
         if data is None:
@@ -74,6 +79,20 @@ class Compute(Resource):
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
 
+    def _update_resource(self, dc_label, compute_name, resource, value):
+        #check if container exists
+        d = dcs.get(dc_label).net.getNodeByName(compute_name)
+        if resource == 'cpu':
+            cpu_period = int(dcs.get(dc_label).net.cpu_period)
+            cpu_quota = int(cpu_period * float(value))
+            #put default values back
+            if float(value) <= 0:
+                cpu_period = 100000
+                cpu_quota = -1
+            d.updateCpuLimit(cpu_period=cpu_period, cpu_quota=cpu_quota)
+        return d
+
+
     def get(self, dc_label, compute_name):
 
         logging.debug("API CALL: compute status")
index 5558b87..379c8d5 100755 (executable)
@@ -55,20 +55,26 @@ class MonitorInterfaceAction(Resource):
     """
     global net
 
-    def put(self, vnf_name, vnf_interface=None, metric='tx_packets'):
+    def put(self, vnf_name, vnf_interface=None, metric='tx_packets', cookie=None):
         logging.debug("REST CALL: start monitor VNF interface")
         try:
-            c = net.monitor_agent.setup_metric(vnf_name, vnf_interface, metric)
+            if cookie:
+                c = net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)
+            else:
+                c = net.monitor_agent.setup_metric(vnf_name, vnf_interface, metric)
             # return monitor message response
             return  str(c), 200, CORS_HEADER
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
 
-    def delete(self, vnf_name, vnf_interface=None, metric='tx_packets'):
+    def delete(self, vnf_name, vnf_interface=None, metric='tx_packets', cookie=None):
         logging.debug("REST CALL: stop monitor VNF interface")
         try:
-            c = net.monitor_agent.stop_metric(vnf_name, vnf_interface, metric)
+            if cookie:
+                c = net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
+            else:
+                c = net.monitor_agent.stop_metric(vnf_name, vnf_interface, metric)
             # return monitor message response
             return str(c), 200, CORS_HEADER
         except Exception as ex:
@@ -106,3 +112,140 @@ class MonitorFlowAction(Resource):
         except Exception as ex:
             logging.exception("API error.")
             return ex.message, 500, CORS_HEADER
+
+class MonitorLinkAction(Resource):
+    """
+    Add or remove flow monitoring on chains between VNFs.
+    These chain links are implemented as flow entries in the networks' SDN switches.
+    The monitoring is an extra flow entry on top of the existing chain, with a specific match. (preserving the chaining)
+    The counters of this new monitoring flow are exported
+    :param vnf_src_name: VNF name of the source of the link
+    :param vnf_dst_name: VNF name of the destination of the link
+    :param vnf_src_interface: VNF interface name of the source of the link
+    :param vnf_dst_interface: VNF interface name of the destination of the link
+    :param weight: weight of the link (can be useful for routing calculations)
+    :param match: OpenFlow match format of the flow entry
+    :param bidirectional: boolean value if the link needs to be implemented from src to dst and back
+    :param cookie: cookie value, identifier of the flow entry to be installed.
+    :param priority: integer indicating the priority of the flow entry
+    :param skip_vlan_tag: boolean to indicate whether a new vlan tag should be created for this chain
+    :param monitor: boolean to indicate whether a new vlan tag should be created for this chain
+    :param monitor_placement: 'tx'=place the monitoring flowrule at the beginning of the chain, 'rx'=place at the end of the chain
+    :param metric: tx_packet_rate, tx_byte_rate, rx_packet_rate, rx_byte_rate
+    :return: message string indicating if the chain action is succesful or not
+    """
+
+    # the global net is set from the topology file, and connected via connectDCNetwork function in rest_api_endpoint.py
+    global net
+
+    def put(self, vnf_src_name, vnf_dst_name):
+        logging.debug("REST CALL: monitor link flow add")
+
+        try:
+            command = 'add-flow'
+            return self._MonitorLinkAction(vnf_src_name, vnf_dst_name, command=command)
+        except Exception as ex:
+            logging.exception("API error.")
+            return ex.message, 500, CORS_HEADER
+
+    def delete(self, vnf_src_name, vnf_dst_name):
+        logging.debug("REST CALL: monitor link flow remove")
+
+        try:
+            command = 'del-flows'
+            return self._MonitorLinkAction(vnf_src_name, vnf_dst_name, command=command)
+        except Exception as ex:
+            logging.exception("API error.")
+            return ex.message, 500, CORS_HEADER
+
+    def _MonitorLinkAction(self, vnf_src_name, vnf_dst_name, command=None):
+        # call DCNetwork method, not really datacenter specific API for now...
+        # no check if vnfs are really connected to this datacenter...
+        try:
+            # check if json data is a dict
+            data = request.json
+            if data is None:
+                data = {}
+            elif type(data) is not dict:
+                data = json.loads(request.json)
+
+            vnf_src_interface = data.get("vnf_src_interface")
+            vnf_dst_interface = data.get("vnf_dst_interface")
+            weight = data.get("weight")
+            match = data.get("match")
+            bidirectional = data.get("bidirectional")
+            cookie = data.get("cookie")
+            priority = data.get("priority")
+            skip_vlan_tag = data.get("skip_vlan_tag")
+            monitor = data.get("monitor")
+            monitor_placement = data.get("monitor_placement")
+
+            #first install monitor flow
+            c1 = net.setChain(
+                vnf_src_name, vnf_dst_name,
+                vnf_src_interface=vnf_src_interface,
+                vnf_dst_interface=vnf_dst_interface,
+                cmd=command,
+                weight=weight,
+                match=match,
+                bidirectional=bidirectional,
+                cookie=cookie,
+                priority=priority,
+                skip_vlan_tag=skip_vlan_tag,
+                monitor=monitor,
+                monitor_placement=monitor_placement)
+
+            #then export monitor flow
+            metric = data.get("metric")
+            if 'rx' in monitor_placement:
+                vnf_name = vnf_dst_name
+                vnf_interface = vnf_dst_interface
+            elif 'tx' in monitor_placement:
+                vnf_name = vnf_src_name
+                vnf_interface = vnf_src_interface
+
+            c2 = 'command unknown'
+            if command == 'add-flow':
+                c2 = net.monitor_agent.setup_flow(vnf_name, vnf_interface, metric, cookie)
+            elif command == 'del-flows':
+                c2 = net.monitor_agent.stop_flow(vnf_name, vnf_interface, metric, cookie)
+
+            # return setChain response
+            return (str(c1) + " " + str(c2)), 200, CORS_HEADER
+        except Exception as ex:
+            logging.exception("API error.")
+            return ex.message, 500, CORS_HEADER
+
+class MonitorSkewAction(Resource):
+    """
+    Monitor the counters of a VNF interface
+    :param vnf_name: name of the VNF to be monitored
+    :param resource: the resource to be monitored (cpu, mem, ...)
+    :return: message string indicating if the monitor action is succesful or not
+    """
+    global net
+
+    def put(self, vnf_name, resource_name='cpu'):
+        logging.debug("REST CALL: start monitor skewness")
+        try:
+            # configure skewmon
+            c = net.monitor_agent.update_skewmon(vnf_name, resource_name, action='start')
+
+            # return monitor message response
+            return  str(c), 200
+        except Exception as ex:
+            logging.exception("API error.")
+            return ex.message, 500
+
+    def delete(self, vnf_name, resource_name='cpu'):
+        logging.debug("REST CALL: stop monitor skewness")
+        try:
+            # configure skewmon
+            c = net.monitor_agent.update_skewmon(vnf_name, resource_name, action='stop')
+
+            # return monitor message response
+            return str(c), 200, CORS_HEADER
+        except Exception as ex:
+            logging.exception("API error.")
+            return ex.message, 500, CORS_HEADER
+
index 2745602..10baa8f 100755 (executable)
@@ -41,6 +41,7 @@ logging.basicConfig(level=logging.INFO)
 
 CORS_HEADER = {'Access-Control-Allow-Origin': '*'}
 
+# the global net is set from the topology file, and connected via connectDCNetwork function in rest_api_endpoint.py
 net = None
 
 
@@ -56,6 +57,9 @@ class NetworkAction(Resource):
     :param bidirectional: boolean value if the link needs to be implemented from src to dst and back
     :param cookie: cookie value, identifier of the flow entry to be installed.
     :param priority: integer indicating the priority of the flow entry
+    :param skip_vlan_tag: boolean to indicate whether a new vlan tag should be created for this chain
+    :param monitor: boolean to indicate whether a new vlan tag should be created for this chain
+    :param monitor_placement: 'tx'=place the monitoring flowrule at the beginning of the chain, 'rx'=place at the end of the chain
     :return: message string indicating if the chain action is succesful or not
     """
 
@@ -89,6 +93,10 @@ class NetworkAction(Resource):
             bidirectional = data.get("bidirectional")
             cookie = data.get("cookie")
             priority = data.get("priority")
+            skip_vlan_tag = data.get("skip_vlan_tag")
+            monitor = data.get("monitor")
+            monitor_placement = data.get("monitor_placement")
+
             c = net.setChain(
                 vnf_src_name, vnf_dst_name,
                 vnf_src_interface=vnf_src_interface,
@@ -98,7 +106,10 @@ class NetworkAction(Resource):
                 match=match,
                 bidirectional=bidirectional,
                 cookie=cookie,
-                priority=priority)
+                priority=priority,
+                skip_vlan_tag=skip_vlan_tag,
+                monitor=monitor,
+                monitor_placement=monitor_placement)
             # return setChain response
             return str(c), 200, CORS_HEADER
         except Exception as ex:
index e382f5a..7168f37 100755 (executable)
@@ -39,7 +39,7 @@ import network
 from network import NetworkAction
 
 import monitor
-from monitor import MonitorInterfaceAction, MonitorFlowAction
+from monitor import MonitorInterfaceAction, MonitorFlowAction, MonitorLinkAction, MonitorSkewAction
 
 logging.basicConfig(level=logging.INFO)
 
@@ -61,7 +61,10 @@ class RestApiEndpoint(object):
 
         # setup endpoints
 
-        self.api.add_resource(Compute, "/restapi/compute/<dc_label>/<compute_name>")
+        # compute related actions (start/stop VNFs, get info)
+        self.api.add_resource(Compute,
+                              "/restapi/compute/<dc_label>/<compute_name>",
+                              "/restapi/compute/<dc_label>/<compute_name>/<resource>/<value>")
         self.api.add_resource(ComputeList,
                       "/restapi/compute",
                       "/restapi/compute/<dc_label>")
@@ -69,14 +72,30 @@ class RestApiEndpoint(object):
         self.api.add_resource(DatacenterStatus, "/restapi/datacenter/<dc_label>")
         self.api.add_resource(DatacenterList, "/restapi/datacenter")
 
-        self.api.add_resource(NetworkAction, "/restapi/network/<vnf_src_name>/<vnf_dst_name>", )
 
+        # network related actions (setup chaining between VNFs)
+        self.api.add_resource(NetworkAction,
+                              "/restapi/network/<vnf_src_name>/<vnf_dst_name>")
+
+
+        # monitoring related actions
+        # export a network interface traffic rate counter
         self.api.add_resource(MonitorInterfaceAction,
-                              "/restapi/monitor/<vnf_name>/<metric>",
-                              "/restapi/monitor/<vnf_name>/<vnf_interface>/<metric>")
+                              "/restapi/monitor/interface/<vnf_name>/<metric>",
+                              "/restapi/monitor/interface/<vnf_name>/<vnf_interface>/<metric>",
+                              "/restapi/monitor/interface/<vnf_name>/<vnf_interface>/<metric>/<cookie>")
+        # export flow traffic counter, of a manually pre-installed flow entry, specified by its cookie
         self.api.add_resource(MonitorFlowAction,
-                              "/restapi/flowmon/<vnf_name>/<metric>/<cookie>",
-                              "/restapi/flowmon/<vnf_name>/<vnf_interface>/<metric>/<cookie>")
+                              "/restapi/monitor/flow/<vnf_name>/<metric>/<cookie>",
+                              "/restapi/monitor/flow/<vnf_name>/<vnf_interface>/<metric>/<cookie>")
+        # install monitoring of a specific flow on a pre-existing link in the service.
+        # the traffic counters of the newly installed monitor flow are exported
+        self.api.add_resource(MonitorLinkAction,
+                              "/restapi/monitor/link/<vnf_src_name>/<vnf_dst_name>")
+        # install skewness monitor of resource usage disribution
+        # the skewness metric is exported
+        self.api.add_resource(MonitorSkewAction,
+                              "/restapi/monitor/skewness/<vnf_name>/<resource_name>")
 
         logging.debug("Created API endpoint %s(%s:%d)" % (self.__class__.__name__, self.ip, self.port))
 
index 4018b69..f9ff506 100755 (executable)
@@ -39,11 +39,12 @@ import hashlib
 import zipfile
 import yaml
 import threading
-from docker import Client as DockerClient
+from docker import DockerClient, APIClient
 from flask import Flask, request
 import flask_restful as fr
 from collections import defaultdict
 import pkg_resources
+from subprocess import Popen
 
 logging.basicConfig()
 LOG = logging.getLogger("sonata-dummy-gatekeeper")
@@ -75,6 +76,7 @@ class Gatekeeper(object):
     def __init__(self):
         self.services = dict()
         self.dcs = dict()
+        self.net = None
         self.vnf_counter = 0  # used to generate short names for VNFs (Mininet limitation)
         LOG.info("Create SONATA dummy gatekeeper.")
 
@@ -181,6 +183,9 @@ class Service(object):
             eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
             elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")]
 
+            GK.net.deployed_elines.extend(eline_fwd_links)
+            GK.net.deployed_elans.extend(elan_fwd_links)
+
             # 4a. deploy E-Line links
             # cookie is used as identifier for the flowrules installed by the dummygatekeeper
             # eg. different services get a unique cookie for their flowrules
@@ -228,6 +233,9 @@ class Service(object):
             # 4b. deploy E-LAN links
             base = 10
             for link in elan_fwd_links:
+
+                elan_vnf_list=[]
+
                 # generate lan ip address
                 ip = 1
                 for intf in link["connection_points_reference"]:
@@ -236,6 +244,8 @@ class Service(object):
                     if vnf_id in self.sap_identifiers:
                         src_docker_name = "{0}_{1}".format(vnf_id, intf_name)
                         vnf_id = src_docker_name
+                    else:
+                        src_docker_name = vnf_id
                     vnf_name = vnf_id2vnf_name[vnf_id]
                     LOG.debug(
                         "Setting up E-LAN link. %s(%s:%s) -> %s" % (
@@ -250,6 +260,14 @@ class Service(object):
                             self._vnf_reconfigure_network(vnfi, intf_name, ip_address)
                             # increase for the next ip address on this E-LAN
                             ip += 1
+
+                            # add this vnf and interface to the E-LAN for tagging
+                            network = self.vnfds[vnf_name].get("dc").net  # there should be a cleaner way to find the DCNetwork
+                            elan_vnf_list.append({'name':src_docker_name,'interface':intf_name})
+
+
+                # install the VLAN tags for this E-LAN
+                network.setLAN(elan_vnf_list)
                 # increase the base ip address for the next E-LAN
                 base += 1
 
@@ -436,6 +454,7 @@ class Service(object):
                 self.package_content_path,
                 make_relative_path(self.manifest.get("entry_service_template")))
             self.nsd = load_yaml(nsd_path)
+            GK.net.deployed_nsds.append(self.nsd)
             LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
 
     def _load_vnfd(self):
@@ -524,12 +543,22 @@ class Service(object):
         dc = DockerClient()
         for url in self.remote_docker_image_urls.itervalues():
             if not FORCE_PULL:  # only pull if not present (speedup for development)
-                if len(dc.images(name=url)) > 0:
+                if len(dc.images.list(name=url)) > 0:
                     LOG.debug("Image %r present. Skipping pull." % url)
                     continue
             LOG.info("Pulling image: %r" % url)
-            dc.pull(url,
-                    insecure_registry=True)
+            # this seems to fail with latest docker api version 2.0.2
+            # dc.images.pull(url,
+            #        insecure_registry=True)
+            #using docker cli instead
+            cmd = ["docker",
+                   "pull",
+                   url,
+                   ]
+            Popen(cmd).wait()
+
+
+
 
     def _check_docker_image_exists(self, image_name):
         """
@@ -537,7 +566,7 @@ class Service(object):
         :param image_name: name of the docker image
         :return:
         """
-        return len(DockerClient().images(image_name)) > 0
+        return len(DockerClient().images.list(name=image_name)) > 0
 
     def _calculate_placement(self, algorithm):
         """
@@ -747,6 +776,7 @@ api.add_resource(Exit, '/emulator/exit')
 
 def start_rest_api(host, port, datacenters=dict()):
     GK.dcs = datacenters
+    GK.net = get_dc_network()
     # start the Flask server (not the best performance but ok for our use case)
     app.run(host=host,
             port=port,
@@ -795,6 +825,14 @@ def generate_subnet_strings(n, start=1, subnet_size=24, ip=0):
         r.append("%d.0.0.%d/%d" % (i, ip, subnet_size))
     return r
 
+def get_dc_network():
+    """
+    retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
+    Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
+    :return:
+    """
+    assert (len(GK.dcs) > 0)
+    return GK.dcs.values()[0].net
 
 if __name__ == '__main__':
     """
old mode 100644 (file)
new mode 100755 (executable)
index b92fd4f..9e5e0ab 100755 (executable)
@@ -30,6 +30,7 @@ from tabulate import tabulate
 import pprint
 import argparse
 import json
+from subprocess import Popen
 
 pp = pprint.PrettyPrinter(indent=4)
 
@@ -77,18 +78,21 @@ class RestApiClient():
             if len(c) > 1:
                 name = c[0]
                 status = c[1]
-                eth0ip = status.get("docker_network", "-")
+                #eth0ip = status.get("docker_network", "-")
+                netw_list = [netw_dict['intf_name'] for netw_dict in status.get("network")]
+                dc_if_list = [netw_dict['dc_portname'] for netw_dict in status.get("network")]
                 table.append([status.get("datacenter"),
                               name,
                               status.get("image"),
-                              eth0ip,
-                              status.get("state").get("Status")])
+                              ','.join(netw_list),
+                              ','.join(dc_if_list)])
+                #status.get("state").get("Status")]
 
         headers = ["Datacenter",
                    "Container",
                    "Image",
-                   "docker0",
-                   "Status"]
+                   "Interface list",
+                   "Datacenter interfaces"]
         print(tabulate(table, headers=headers, tablefmt="grid"))
 
     def status(self, args):
@@ -100,6 +104,11 @@ class RestApiClient():
 
         pp.pprint(list)
 
+    def xterm(self, args):
+        vnf_names = args.get("vnf_names")
+        for vnf_name in vnf_names:
+            Popen(['xterm', '-xrm', 'XTerm.vt100.allowTitleOps: false', '-T', vnf_name,
+                   '-e', "docker exec -it mn.{0} /bin/bash".format(vnf_name)])
 
 parser = argparse.ArgumentParser(description="""son-emu compute
     
@@ -110,8 +119,12 @@ parser = argparse.ArgumentParser(description="""son-emu compute
     """, formatter_class=argparse.RawTextHelpFormatter)
 parser.add_argument(
     "command",
-    choices=['start', 'stop', 'list', 'status'],
+    choices=['start', 'stop', 'list', 'status', 'xterm'],
     help="Action to be executed.")
+parser.add_argument(
+    "vnf_names",
+    nargs='*',
+    help="vnf names to open an xterm for")
 parser.add_argument(
     "--datacenter", "-d", dest="datacenter",
     help="Data center to which the command should be applied.")
index 858aad7..7af1fbb 100755 (executable)
@@ -49,7 +49,7 @@ class RestApiClient():
         vnf_name = self._parse_vnf_name(args.get("vnf_name"))
         vnf_interface = self._parse_vnf_interface(args.get("vnf_name"))
 
-        response = put("%s/restapi/monitor/%s/%s/%s" %
+        response = put("%s/restapi/monitor/interface/%s/%s/%s" %
                        (args.get("endpoint"),
                         vnf_name,
                         vnf_interface,
@@ -60,7 +60,7 @@ class RestApiClient():
         vnf_name = self._parse_vnf_name(args.get("vnf_name"))
         vnf_interface = self._parse_vnf_interface(args.get("vnf_name"))
 
-        response = delete("%s/restapi/monitor/%s/%s/%s" %
+        response = delete("%s/restapi/monitor/interface/%s/%s/%s" %
                        (args.get("endpoint"),
                         vnf_name,
                         vnf_interface,
@@ -71,7 +71,7 @@ class RestApiClient():
         vnf_name = self._parse_vnf_name(args.get("vnf_name"))
         vnf_interface = self._parse_vnf_interface(args.get("vnf_name"))
 
-        response = put("%s/restapi/monitor/%s/%s/%s/%s" %
+        response = put("%s/restapi/monitor/flow/%s/%s/%s/%s" %
                        (args.get("endpoint"),
                         vnf_name,
                         vnf_interface,
@@ -84,7 +84,7 @@ class RestApiClient():
         vnf_name = self._parse_vnf_name(args.get("vnf_name"))
         vnf_interface = self._parse_vnf_interface(args.get("vnf_name"))
 
-        response = delete("%s/restapi/monitor/%s/%s/%s/%s" %
+        response = delete("%s/restapi/monitor/flow/%s/%s/%s/%s" %
                        (args.get("endpoint"),
                         vnf_name,
                         vnf_interface,
index 9cf2a3b..d0e45da 100755 (executable)
@@ -36,7 +36,8 @@ from prometheus_client import start_http_server, Summary, Histogram, Gauge, Coun
 import threading\r
 from subprocess import Popen\r
 import os\r
-\r
+import docker\r
+import json\r
 \r
 logging.basicConfig(level=logging.INFO)\r
 \r
@@ -53,6 +54,7 @@ COOKIE_MASK = 0xffffffff
 class DCNetworkMonitor():\r
     def __init__(self, net):\r
         self.net = net\r
+        self.dockercli = docker.from_env()\r
 \r
         # pushgateway address\r
         self.pushgateway = 'localhost:{0}'.format(PUSHGATEWAY_PORT)\r
@@ -88,6 +90,7 @@ class DCNetworkMonitor():
         self.monitor_flow_lock = threading.Lock()\r
         self.network_metrics = []\r
         self.flow_metrics = []\r
+        self.skewmon_metrics = {}\r
 \r
         # start monitoring thread\r
         self.start_monitoring = True\r
@@ -178,9 +181,10 @@ class DCNetworkMonitor():
 \r
                 self.flow_metrics.remove(flow_dict)\r
 \r
-                for collector in self.registry._collectors:\r
-                    if (vnf_name, vnf_interface, cookie) in collector._metrics:\r
-                        collector.remove(vnf_name, vnf_interface, cookie)\r
+                # set metric to NaN\r
+                self.prom_metrics[flow_dict['metric_key']]. \\r
+                    labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
+                    set(float('nan'))\r
 \r
                 delete_from_gateway(self.pushgateway, job='sonemu-SDNcontroller')\r
 \r
@@ -276,29 +280,11 @@ class DCNetworkMonitor():
 \r
                 self.network_metrics.remove(metric_dict)\r
 \r
-                #this removes the complete metric, all labels...\r
-                #REGISTRY.unregister(self.prom_metrics[metric_dict['metric_key']])\r
-                #self.registry.unregister(self.prom_metrics[metric_dict['metric_key']])\r
-\r
-                for collector in self.registry._collectors :\r
-\r
-                    """\r
-                    INFO:root:name:sonemu_rx_count_packets\r
-                    labels:('vnf_name', 'vnf_interface')\r
-                    metrics:{(u'tsrc', u'output'): < prometheus_client.core.Gauge\r
-                    object\r
-                    at\r
-                    0x7f353447fd10 >}\r
-                    """\r
-                    logging.info('{0}'.format(collector._metrics.values()))\r
-\r
-                    if (vnf_name, vnf_interface, 'None') in collector._metrics:\r
-                        logging.info('2 name:{0} labels:{1} metrics:{2}'.format(collector._name, collector._labelnames,\r
-                                                                              collector._metrics))\r
-                        collector.remove(vnf_name, vnf_interface, 'None')\r
-\r
                 # set values to NaN, prometheus api currently does not support removal of metrics\r
                 #self.prom_metrics[metric_dict['metric_key']].labels(vnf_name, vnf_interface).set(float('nan'))\r
+                self.prom_metrics[metric_dict['metric_key']]. \\r
+                    labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=None). \\r
+                    set(float('nan'))\r
 \r
                 # this removes the complete metric, all labels...\r
                 # 1 single monitor job for all metrics of the SDN controller\r
@@ -363,6 +349,13 @@ class DCNetworkMonitor():
 \r
                 self.set_flow_metric(flow_dict, flow_stat_dict)\r
 \r
+\r
+            try:\r
+                if len(self.flow_metrics) > 0:\r
+                    pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
+            except Exception, e:\r
+                logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))\r
+\r
             self.monitor_flow_lock.release()\r
             time.sleep(1)\r
 \r
@@ -392,6 +385,12 @@ class DCNetworkMonitor():
                 for metric_dict in metric_list:\r
                     self.set_network_metric(metric_dict, port_stat_dict)\r
 \r
+            try:\r
+                if len(self.network_metrics) > 0:\r
+                    pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
+            except Exception, e:\r
+                logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))\r
+\r
             self.monitor_lock.release()\r
             time.sleep(1)\r
 \r
@@ -413,12 +412,9 @@ class DCNetworkMonitor():
 \r
                 # set prometheus metric\r
                 self.prom_metrics[metric_dict['metric_key']].\\r
-                    labels({'vnf_name': vnf_name, 'vnf_interface': vnf_interface, 'flow_id': None}).\\r
+                    labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=None).\\r
                     set(this_measurement)\r
 \r
-                # 1 single monitor job for all metrics of the SDN controller\r
-                pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
-\r
                 # also the rate is calculated here, but not used for now\r
                 # (rate can be easily queried from prometheus also)\r
                 if previous_monitor_time <= 0 or previous_monitor_time >= port_uptime:\r
@@ -465,13 +461,8 @@ class DCNetworkMonitor():
         #flow_uptime = flow_stat['duration_sec'] + flow_stat['duration_nsec'] * 10 ** (-9)\r
 \r
         self.prom_metrics[metric_dict['metric_key']]. \\r
-            labels({'vnf_name': vnf_name, 'vnf_interface': vnf_interface, 'flow_id': cookie}). \\r
+            labels(vnf_name=vnf_name, vnf_interface=vnf_interface, flow_id=cookie). \\r
             set(counter)\r
-        try:\r
-            pushadd_to_gateway(self.pushgateway, job='sonemu-SDNcontroller', registry=self.registry)\r
-        except Exception, e:\r
-            logging.warning("Pushgateway not reachable: {0} {1}".format(Exception, e))\r
-\r
 \r
     def start_Prometheus(self, port=9090):\r
         # prometheus.yml configuration file is located in the same directory as this file\r
@@ -523,23 +514,13 @@ class DCNetworkMonitor():
         self.monitor_flow_thread.join()\r
 \r
         # these containers are used for monitoring but are started now outside of son-emu\r
-        '''\r
-        if self.prometheus_process is not None:\r
-            logging.info('stopping prometheus container')\r
-            self.prometheus_process.terminate()\r
-            self.prometheus_process.kill()\r
-            self._stop_container('prometheus')\r
-        '''\r
+\r
         if self.pushgateway_process is not None:\r
             logging.info('stopping pushgateway container')\r
-            #self.pushgateway_process.terminate()\r
-            #self.pushgateway_process.kill()\r
             self._stop_container('pushgateway')\r
 \r
         if self.cadvisor_process is not None:\r
             logging.info('stopping cadvisor container')\r
-            #self.cadvisor_process.terminate()\r
-            #self.cadvisor_process.kill()\r
             self._stop_container('cadvisor')\r
 \r
     def switch_tx_rx(self,metric=''):\r
@@ -554,9 +535,74 @@ class DCNetworkMonitor():
 \r
     def _stop_container(self, name):\r
 \r
-        cmd = ["docker",\r
-               "rm",\r
-               "-f",\r
-               name]\r
-        Popen(cmd).wait()\r
+        container = self.dockercli.containers.get(name)\r
+        container.remove(force=True)\r
+\r
+    def update_skewmon(self, vnf_name, resource_name, action):\r
+\r
+        ret = ''\r
+\r
+        config_file_path = '/tmp/skewmon.cfg'\r
+        configfile = open(config_file_path, 'a+')\r
+        try:\r
+            config = json.load(configfile)\r
+        except:\r
+            #not a valid json file or empty\r
+            config = {}\r
+\r
+        #initialize config file\r
+        if len(self.skewmon_metrics) == 0:\r
+            config = {}\r
+        json.dump(config, configfile)\r
+        configfile.close()\r
+\r
+        docker_name = 'mn.' + vnf_name\r
+        vnf_container = self.dockercli.containers.get(docker_name)\r
+        key = resource_name + '_' + vnf_container.short_id\r
+        vnf_id = vnf_container.id\r
+\r
+        if action == 'start':\r
+            # add a new vnf to monitor\r
+            config[key] = dict(VNF_NAME=vnf_name,\r
+                                VNF_ID=vnf_id,\r
+                                VNF_METRIC=resource_name)\r
+            ret = 'adding to skewness monitor: {0} {1} '.format(vnf_name, resource_name)\r
+            logging.info(ret)\r
+        elif action == 'stop':\r
+            # remove vnf to monitor\r
+            config.pop(key)\r
+            ret = 'removing from skewness monitor: {0} {1} '.format(vnf_name, resource_name)\r
+            logging.info(ret)\r
+\r
+        self.skewmon_metrics = config\r
+        configfile = open(config_file_path, 'w')\r
+        json.dump(config, configfile)\r
+        configfile.close()\r
+\r
+        try:\r
+            skewmon_container = self.dockercli.containers.get('skewmon')\r
+\r
+            # remove container if config is empty\r
+            if len(config) == 0:\r
+                ret += 'stopping skewness monitor'\r
+                logging.info('stopping skewness monitor')\r
+                skewmon_container.remove(force=True)\r
+\r
+        except docker.errors.NotFound:\r
+            # start container if not running\r
+            ret += 'starting skewness monitor'\r
+            logging.info('starting skewness monitor')\r
+            volumes = {'/sys/fs/cgroup':{'bind':'/sys/fs/cgroup', 'mode':'ro'},\r
+                       '/tmp/skewmon.cfg':{'bind':'/config.txt', 'mode':'ro'}}\r
+            self.dockercli.containers.run('skewmon',\r
+                                          detach=True,\r
+                                          volumes=volumes,\r
+                                          labels=['com.containernet'],\r
+                                          name='skewmon'\r
+                                          )\r
+        return ret\r
+\r
+\r
+\r
+\r
 \r
index 531e5f1..1a8d938 100755 (executable)
@@ -32,6 +32,7 @@ import time
 from subprocess import Popen
 import re
 import requests
+import os
 
 from mininet.net import Containernet
 from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
@@ -46,6 +47,9 @@ from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
 LOG = logging.getLogger("dcemulator.net")
 LOG.setLevel(logging.DEBUG)
 
+# default CPU period used for cpu percentage-based cfs values (microseconds)
+CPU_PERIOD = 1000000
+
 class DCNetwork(Containernet):
     """
     Wraps the original Mininet/Containernet class and provides
@@ -55,7 +59,7 @@ class DCNetwork(Containernet):
     """
 
     def __init__(self, controller=RemoteController, monitor=False,
-                 enable_learning = True,   # in case of RemoteController (Ryu), learning switch behavior can be turned off/on
+                 enable_learning=False, # learning switch behavior of the default ovs switches icw Ryu controller can be turned off/on, needed for E-LAN functionality
                  dc_emulation_max_cpu=1.0,  # fraction of overall CPU time for emulation
                  dc_emulation_max_mem=512,  # emulation max mem in MB
                  **kwargs):
@@ -68,6 +72,12 @@ class DCNetwork(Containernet):
         # members
         self.dcs = {}
         self.ryu_process = None
+        #list of deployed nsds.E_Lines and E_LANs (uploaded from the dummy gatekeeper)
+        self.deployed_nsds = []
+        self.deployed_elines = []
+        self.deployed_elans = []
+        self.installed_chains = []
+
 
         # always cleanup environment before we start the emulator
         self.killRyu()
@@ -77,10 +87,18 @@ class DCNetwork(Containernet):
         Containernet.__init__(
             self, switch=OVSKernelSwitch, controller=controller, **kwargs)
 
+        # default switch configuration
+        enable_ryu_learning = False
+        if enable_learning :
+            self.failMode = 'standalone'
+            enable_ryu_learning = True
+        else:
+            self.failMode = 'secure'
+
         # Ryu management
         if controller == RemoteController:
             # start Ryu controller
-            self.startRyu(learning_switch=enable_learning)
+            self.startRyu(learning_switch=enable_ryu_learning)
 
         # add the specified controller
         self.addController('c0', controller=controller)
@@ -106,6 +124,7 @@ class DCNetwork(Containernet):
         # initialize resource model registrar
         self.rm_registrar = ResourceModelRegistrar(
             dc_emulation_max_cpu, dc_emulation_max_mem)
+        self.cpu_period = CPU_PERIOD
 
     def addDatacenter(self, label, metadata={}, resource_log_path=None):
         """
@@ -127,7 +146,7 @@ class DCNetwork(Containernet):
         """
         assert node1 is not None
         assert node2 is not None
-        LOG.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
+
         # ensure type of node1
         if isinstance( node1, basestring ):
             if node1 in self.dcs:
@@ -183,7 +202,7 @@ class DCNetwork(Containernet):
         edge_attributes = [p for p in params if p in weight_metrics]
         for attr in edge_attributes:
             # if delay: strip ms (need number as weight in graph)
-            match = re.search('([0-9]*\.?[0-9]+)', params[attr])
+            match = re.search('([0-9]*\.?[0-9]+)', str(params[attr]))
             if match:
                 attr_number = match.group(1)
             else:
@@ -205,6 +224,9 @@ class DCNetwork(Containernet):
         attr_dict2.update(attr_dict)
         self.DCNetwork_graph.add_edge(node2.name, node1.name, attr_dict=attr_dict2)
 
+        LOG.debug("addLink: n1={0} intf1={1} -- n2={2} intf2={3}".format(
+            str(node1),node1_port_name, str(node2), node2_port_name))
+
         return link
 
     def addDocker( self, label, **params ):
@@ -225,9 +247,26 @@ class DCNetwork(Containernet):
         """
         Wrapper for addSwitch method to store switch also in graph.
         """
+
+        # add this switch to the global topology overview
         if add_to_graph:
             self.DCNetwork_graph.add_node(name)
-        return Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
+
+        # set the learning switch behavior
+        if 'failMode' in params :
+            failMode = params['failMode']
+        else :
+            failMode = self.failMode
+
+        s = Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', failMode=failMode, **params)
+
+        # set flow entry that enables learning switch behavior (needed to enable E-LAN functionality)
+        #LOG.info('failmode {0}'.format(failMode))
+        #if failMode == 'standalone' :
+        #    LOG.info('add NORMAL')
+        #    s.dpctl('add-flow', 'actions=NORMAL')
+
+        return s
 
     def getAllContainers(self):
         """
@@ -260,6 +299,193 @@ class DCNetwork(Containernet):
     def CLI(self):
         CLI(self)
 
+    def setLAN(self, vnf_list):
+        """
+        setup an E-LAN network by assigning the same VLAN tag to each DC interface of the VNFs in the E-LAN
+
+        :param vnf_list: names of the VNFs in this E-LAN  [{name:,interface:},...]
+        :return:
+        """
+        src_sw = None
+        src_sw_inport_nr = 0
+        src_sw_inport_name = None
+
+        # get a vlan tag for this E-LAN
+        vlan = self.vlans.pop()
+
+        for vnf in vnf_list:
+            vnf_src_name = vnf['name']
+            vnf_src_interface = vnf['interface']
+
+            # check if port is specified (vnf:port)
+            if vnf_src_interface is None:
+                # take first interface by default
+                connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
+                link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+                vnf_src_interface = link_dict[0]['src_port_id']
+
+            for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
+                link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+                for link in link_dict:
+                    if (link_dict[link]['src_port_id'] == vnf_src_interface or
+                                link_dict[link]['src_port_name'] == vnf_src_interface):  # Fix: we might also get interface names, e.g, from a son-emu-cli call
+                        # found the right link and connected switch
+                        src_sw = connected_sw
+                        src_sw_inport_nr = link_dict[link]['dst_port_nr']
+                        src_sw_inport_name = link_dict[link]['dst_port_name']
+                        break
+
+            # set the tag on the dc switch interface
+            LOG.debug('set E-LAN: vnf name: {0} interface: {1} tag: {2}'.format(vnf_src_name, vnf_src_interface,vlan))
+            switch_node = self.getNodeByName(src_sw)
+            self._set_vlan_tag(switch_node, src_sw_inport_name, vlan)
+
+    def _addMonitorFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None,
+                       tag=None, **kwargs):
+        """
+        Add a monitoring flow entry that adds a special flowentry/counter at the begin or end of a chain.
+        So this monitoring flowrule exists on top of a previously defined chain rule and uses the same vlan tag/routing.
+        :param vnf_src_name:
+        :param vnf_dst_name:
+        :param vnf_src_interface:
+        :param vnf_dst_interface:
+        :param tag: vlan tag to be used for this chain (same tag as existing chain)
+        :param monitor_placement: 'tx' or 'rx' indicating to place the extra flowentry resp. at the beginning or end of the chain
+        :return:
+        """
+
+        src_sw = None
+        src_sw_inport_nr = 0
+        src_sw_inport_name = None
+        dst_sw = None
+        dst_sw_outport_nr = 0
+        dst_sw_outport_name = None
+
+        LOG.debug("call AddMonitorFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
+                  vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
+
+        #check if port is specified (vnf:port)
+        if vnf_src_interface is None:
+            # take first interface by default
+            connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
+            link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+            vnf_src_interface = link_dict[0]['src_port_id']
+
+        for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
+            link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+            for link in link_dict:
+                if (link_dict[link]['src_port_id'] == vnf_src_interface or
+                        link_dict[link]['src_port_name'] == vnf_src_interface):  # Fix: we might also get interface names, e.g, from a son-emu-cli call
+                    # found the right link and connected switch
+                    src_sw = connected_sw
+                    src_sw_inport_nr = link_dict[link]['dst_port_nr']
+                    src_sw_inport_name = link_dict[link]['dst_port_name']
+                    break
+
+        if vnf_dst_interface is None:
+            # take first interface by default
+            connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]
+            link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
+            vnf_dst_interface = link_dict[0]['dst_port_id']
+
+        vnf_dst_name = vnf_dst_name.split(':')[0]
+        for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):
+            link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
+            for link in link_dict:
+                if link_dict[link]['dst_port_id'] == vnf_dst_interface or \
+                        link_dict[link]['dst_port_name'] == vnf_dst_interface:  # Fix: we might also get interface names, e.g, from a son-emu-cli call
+                    # found the right link and connected switch
+                    dst_sw = connected_sw
+                    dst_sw_outport_nr = link_dict[link]['src_port_nr']
+                    dst_sw_outport_name = link_dict[link]['src_port_name']
+                    break
+
+        if not tag >= 0:
+            LOG.exception('tag not valid: {0}'.format(tag))
+
+        # get shortest path
+        try:
+            # returns the first found shortest path
+            # if all shortest paths are wanted, use: all_shortest_paths
+            path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
+        except:
+            LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
+                vnf_src_name, vnf_dst_name, src_sw, dst_sw))
+            LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
+            LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
+            for e, v in self.DCNetwork_graph.edges():
+                LOG.debug("%r" % self.DCNetwork_graph[e][v])
+            return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+
+        LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
+
+        current_hop = src_sw
+        switch_inport_nr = src_sw_inport_nr
+
+        cmd = kwargs.get('cmd')
+
+        #iterate through the path to install the flow-entries
+        for i in range(0,len(path)):
+            current_node = self.getNodeByName(current_hop)
+
+            if path.index(current_hop) < len(path)-1:
+                next_hop = path[path.index(current_hop)+1]
+            else:
+                #last switch reached
+                next_hop = vnf_dst_name
+
+            next_node = self.getNodeByName(next_hop)
+
+            if next_hop == vnf_dst_name:
+                switch_outport_nr = dst_sw_outport_nr
+                LOG.info("end node reached: {0}".format(vnf_dst_name))
+            elif not isinstance( next_node, OVSSwitch ):
+                LOG.info("Next node: {0} is not a switch".format(next_hop))
+                return "Next node: {0} is not a switch".format(next_hop)
+            else:
+                # take first link between switches by default
+                index_edge_out = 0
+                switch_outport_nr = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port_nr']
+
+
+           # set of entry via ovs-ofctl
+            if isinstance( current_node, OVSSwitch ):
+                kwargs['vlan'] = tag
+                kwargs['path'] = path
+                kwargs['current_hop'] = current_hop
+                kwargs['switch_inport_name'] = src_sw_inport_name
+                kwargs['switch_outport_name'] = dst_sw_outport_name
+                kwargs['skip_vlan_tag'] = True
+
+                monitor_placement = kwargs.get('monitor_placement').strip()
+                # put monitor flow at the dst switch
+                insert_flow = False
+                if monitor_placement == 'tx' and path.index(current_hop) == 0:  # first node:
+                    insert_flow = True
+                # put monitoring flow at the src switch
+                elif monitor_placement == 'rx' and path.index(current_hop) == len(path) - 1:  # last node:
+                    insert_flow = True
+                elif monitor_placement not in ['rx', 'tx']:
+                    LOG.exception('invalid monitor command: {0}'.format(monitor_placement))
+
+
+                if self.controller == RemoteController and insert_flow:
+                    ## set flow entry via ryu rest api
+                    self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+                    break
+                elif insert_flow:
+                    ## set flow entry via ovs-ofctl
+                    self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+                    break
+
+            # take first link between switches by default
+            if isinstance( next_node, OVSSwitch ):
+                switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
+                current_hop = next_hop
+
+        return "path {2} between {0} and {1}".format(vnf_src_name, vnf_dst_name, cmd)
+
+
     def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
         """
         Chain 2 vnf interfaces together by installing the flowrules in the switches along their path.
@@ -274,19 +500,47 @@ class DCNetwork(Containernet):
         :param cookie: cookie for the installed flowrules (can be used later as identifier for a set of installed chains)
         :param match: custom match entry to be added to the flowrules (default: only in_port and vlan tag)
         :param priority: custom flowrule priority
+<<<<<<< HEAD
+        :param monitor: boolean to indicate whether this chain is a monitoring chain
+        :param tag: vlan tag to be used for this chain (pre-defined or new one if none is specified)
+=======
+        :param path: custom path between the two VNFs (list of switches)
+>>>>>>> upstream/master
         :return: output log string
         """
+
+        # special procedure for monitoring flows
+        if kwargs.get('monitor'):
+
+            # check if chain already exists
+            found_chains = [chain_dict for chain_dict in self.installed_chains if
+             (chain_dict['vnf_src_name'] == vnf_src_name and chain_dict['vnf_src_interface'] == vnf_src_interface
+             and chain_dict['vnf_dst_name'] == vnf_dst_name and chain_dict['vnf_dst_interface'] == vnf_dst_interface)]
+
+            if len(found_chains) > 0:
+                # this chain exists, so need an extra monitoring flow
+                # assume only 1 chain per vnf/interface pair
+                LOG.debug('*** installing monitoring chain on top of pre-defined chain from {0}:{1} -> {2}:{3}'.
+                            format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
+                tag = found_chains[0]['tag']
+                ret = self._addMonitorFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface,
+                                     tag=tag, table_id=0, **kwargs)
+                return ret
+            else:
+                # no chain existing (or E-LAN) -> install normal chain
+                LOG.warning('*** installing monitoring chain without pre-defined NSD chain from {0}:{1} -> {2}:{3}'.
+                            format(vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface))
+                pass
+
+
         cmd = kwargs.get('cmd')
-        if cmd == 'add-flow':
+        if cmd == 'add-flow' or cmd == 'del-flows':
             ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
             if kwargs.get('bidirectional'):
+                if kwargs.get('path') is not None:
+                    kwargs['path'] = list(reversed(kwargs.get('path')))
                 ret = ret +'\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
 
-        elif cmd == 'del-flows':
-            ret = self._chainAddFlow(vnf_src_name, vnf_dst_name, vnf_src_interface, vnf_dst_interface, **kwargs)
-            if kwargs.get('bidirectional'):
-                ret = ret + '\n' + self._chainAddFlow(vnf_dst_name, vnf_src_name, vnf_dst_interface, vnf_src_interface, **kwargs)
-
         else:
             ret = "Command unknown"
 
@@ -296,9 +550,11 @@ class DCNetwork(Containernet):
     def _chainAddFlow(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, **kwargs):
 
         src_sw = None
-        dst_sw = None
         src_sw_inport_nr = 0
+        src_sw_inport_name = None
+        dst_sw = None
         dst_sw_outport_nr = 0
+        dst_sw_outport_name = None
 
         LOG.debug("call chainAddFlow vnf_src_name=%r, vnf_src_interface=%r, vnf_dst_name=%r, vnf_dst_interface=%r",
                   vnf_src_name, vnf_src_interface, vnf_dst_name, vnf_dst_interface)
@@ -318,6 +574,7 @@ class DCNetwork(Containernet):
                     # found the right link and connected switch
                     src_sw = connected_sw
                     src_sw_inport_nr = link_dict[link]['dst_port_nr']
+                    src_sw_inport_name = link_dict[link]['dst_port_name']
                     break
 
         if vnf_dst_interface is None:
@@ -335,42 +592,59 @@ class DCNetwork(Containernet):
                     # found the right link and connected switch
                     dst_sw = connected_sw
                     dst_sw_outport_nr = link_dict[link]['src_port_nr']
+                    dst_sw_outport_name = link_dict[link]['src_port_name']
                     break
 
-
-        # get shortest path
-        try:
-            # returns the first found shortest path
-            # if all shortest paths are wanted, use: all_shortest_paths
-            path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
-        except:
-            LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
-                vnf_src_name, vnf_dst_name, src_sw, dst_sw))
-            LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
-            LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
-            for e, v in self.DCNetwork_graph.edges():
-                LOG.debug("%r" % self.DCNetwork_graph[e][v])
-            return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+        path = kwargs.get('path')
+        if path is None:
+            # get shortest path
+            try:
+                # returns the first found shortest path
+                # if all shortest paths are wanted, use: all_shortest_paths
+                path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw, weight=kwargs.get('weight'))
+            except:
+                LOG.exception("No path could be found between {0} and {1} using src_sw={2} and dst_sw={3}".format(
+                    vnf_src_name, vnf_dst_name, src_sw, dst_sw))
+                LOG.debug("Graph nodes: %r" % self.DCNetwork_graph.nodes())
+                LOG.debug("Graph edges: %r" % self.DCNetwork_graph.edges())
+                for e, v in self.DCNetwork_graph.edges():
+                    LOG.debug("%r" % self.DCNetwork_graph[e][v])
+                return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
 
         LOG.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
 
         current_hop = src_sw
         switch_inport_nr = src_sw_inport_nr
 
-        # choose free vlan if path contains more than 1 switch
+        # choose free vlan
+        ## if path contains more than 1 switch
         cmd = kwargs.get('cmd')
         vlan = None
         if cmd == 'add-flow':
-            if len(path) > 1:
+            if kwargs.get('tag'):
+                # use pre-defined tag
+                vlan = kwargs.get('tag')
+            else:
                 vlan = self.vlans.pop()
 
+        # store the used vlan tag to identify this chain
+        if not kwargs.get('monitor'):
+            chain_dict = {}
+            chain_dict['vnf_src_name'] = vnf_src_name
+            chain_dict['vnf_dst_name'] = vnf_dst_name
+            chain_dict['vnf_src_interface'] = vnf_src_interface
+            chain_dict['vnf_dst_interface'] = vnf_dst_interface
+            chain_dict['tag'] = vlan
+            self.installed_chains.append(chain_dict)
+
+        #iterate through the path to install the flow-entries
         for i in range(0,len(path)):
             current_node = self.getNodeByName(current_hop)
 
-            if path.index(current_hop) < len(path)-1:
-                next_hop = path[path.index(current_hop)+1]
+            if i < len(path) - 1:
+                next_hop = path[i + 1]
             else:
-                #last switch reached
+                # last switch reached
                 next_hop = vnf_dst_name
 
             next_node = self.getNodeByName(next_hop)
@@ -392,6 +666,9 @@ class DCNetwork(Containernet):
                 kwargs['vlan'] = vlan
                 kwargs['path'] = path
                 kwargs['current_hop'] = current_hop
+                kwargs['switch_inport_name'] = src_sw_inport_name
+                kwargs['switch_outport_name'] = dst_sw_outport_name
+                kwargs['pathindex'] = i
 
                 if self.controller == RemoteController:
                     ## set flow entry via ryu rest api
@@ -400,8 +677,6 @@ class DCNetwork(Containernet):
                     ## set flow entry via ovs-ofctl
                     self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
 
-
-
             # take first link between switches by default
             if isinstance( next_node, OVSSwitch ):
                 switch_inport_nr = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port_nr']
@@ -416,9 +691,16 @@ class DCNetwork(Containernet):
         match_input = kwargs.get('match')
         cmd = kwargs.get('cmd')
         path = kwargs.get('path')
-        current_hop = kwargs.get('current_hop')
+        index = kwargs.get('pathindex')
+
         vlan = kwargs.get('vlan')
         priority = kwargs.get('priority')
+        # flag to not set the ovs port vlan tag
+        skip_vlan_tag = kwargs.get('skip_vlan_tag')
+        # table id to put this flowentry
+        table_id = kwargs.get('table_id')
+        if not table_id:
+            table_id = 0
 
         s = ','
         if match_input:
@@ -432,6 +714,8 @@ class DCNetwork(Containernet):
         if priority:
             flow['priority'] = int(priority)
 
+        flow['table_id'] = table_id
+
         flow['actions'] = []
 
         # possible Ryu actions, match fields:
@@ -439,24 +723,39 @@ class DCNetwork(Containernet):
         if cmd == 'add-flow':
             prefix = 'stats/flowentry/add'
             if vlan != None:
-                if path.index(current_hop) == 0:  # first node
-                    action = {}
-                    action['type'] = 'PUSH_VLAN'  # Push a new VLAN tag if a input frame is non-VLAN-tagged
-                    action['ethertype'] = 33024   # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame
-                    flow['actions'].append(action)
-                    action = {}
-                    action['type'] = 'SET_FIELD'
-                    action['field'] = 'vlan_vid'
-                    # ryu expects the field to be masked
-                    action['value'] = vlan | 0x1000
-                    flow['actions'].append(action)
-                elif path.index(current_hop) == len(path) - 1:  # last node
-                    match += ',dl_vlan=%s' % vlan
-                    action = {}
-                    action['type'] = 'POP_VLAN'
-                    flow['actions'].append(action)
+                if index == 0:  # first node
+                    # set vlan tag in ovs instance (to isolate E-LANs)
+                    if not skip_vlan_tag:
+                        in_port_name = kwargs.get('switch_inport_name')
+                        self._set_vlan_tag(node, in_port_name, vlan)
+                    # set vlan push action if more than 1 switch in the path
+                    if len(path) > 1:
+                        action = {}
+                        action['type'] = 'PUSH_VLAN'  # Push a new VLAN tag if a input frame is non-VLAN-tagged
+                        action['ethertype'] = 33024   # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame
+                        flow['actions'].append(action)
+                        action = {}
+                        action['type'] = 'SET_FIELD'
+                        action['field'] = 'vlan_vid'
+                        # ryu expects the field to be masked
+                        action['value'] = vlan | 0x1000
+                        flow['actions'].append(action)
+
+                elif index == len(path) - 1:  # last node
+                    # set vlan tag in ovs instance (to isolate E-LANs)
+                    if not skip_vlan_tag:
+                        out_port_name = kwargs.get('switch_outport_name')
+                        self._set_vlan_tag(node, out_port_name, vlan)
+                    # set vlan pop action if more than 1 switch in the path
+                    if len(path) > 1:
+                        match += ',dl_vlan=%s' % vlan
+                        action = {}
+                        action['type'] = 'POP_VLAN'
+                        flow['actions'].append(action)
+
                 else:  # middle nodes
                     match += ',dl_vlan=%s' % vlan
+
             # output action must come last
             action = {}
             action['type'] = 'OUTPUT'
@@ -478,14 +777,19 @@ class DCNetwork(Containernet):
         flow['match'] = self._parse_match(match)
         self.ryu_REST(prefix, data=flow)
 
+    def _set_vlan_tag(self, node, switch_port, tag):
+        node.vsctl('set', 'port {0} tag={1}'.format(switch_port,tag))
+        LOG.debug("set vlan in switch: {0} in_port: {1} vlan tag: {2}".format(node.name, switch_port, tag))
+
     def _set_flow_entry_dpctl(self, node, switch_inport_nr, switch_outport_nr, **kwargs):
+
         match = 'in_port=%s' % switch_inport_nr
 
         cookie = kwargs.get('cookie')
         match_input = kwargs.get('match')
         cmd = kwargs.get('cmd')
         path = kwargs.get('path')
-        current_hop = kwargs.get('current_hop')
+        index = kwargs.get('pathindex')
         vlan = kwargs.get('vlan')
 
         s = ','
@@ -497,10 +801,10 @@ class DCNetwork(Containernet):
         if cmd == 'add-flow':
             action = 'action=%s' % switch_outport_nr
             if vlan != None:
-                if path.index(current_hop) == 0:  # first node
+                if index == 0: # first node
                     action = ('action=mod_vlan_vid:%s' % vlan) + (',output=%s' % switch_outport_nr)
                     match = '-O OpenFlow13 ' + match
-                elif path.index(current_hop) == len(path) - 1:  # last node
+                elif index == len(path) - 1:  # last node
                     match += ',dl_vlan=%s' % vlan
                     action = 'action=strip_vlan,output=%s' % switch_outport_nr
                 else:  # middle nodes
@@ -519,7 +823,11 @@ class DCNetwork(Containernet):
     def startRyu(self, learning_switch=True):
         # start Ryu controller with rest-API
         python_install_path = site.getsitepackages()[0]
-        ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
+        # ryu default learning switch
+        #ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
+        #custom learning switch that installs a default NORMAL action in the ovs switches
+        dir_path = os.path.dirname(os.path.realpath(__file__))
+        ryu_path = dir_path + '/son_emu_simple_switch_13.py'
         ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
         # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
         # Ryu still uses 6633 as default
@@ -529,9 +837,12 @@ class DCNetwork(Containernet):
         FNULL = open("/tmp/ryu.log", 'w')
         if learning_switch:
             self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+            LOG.debug('starting ryu-controller with {0}'.format(ryu_path))
+            LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
         else:
             # no learning switch, but with rest api
             self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+            LOG.debug('starting ryu-controller with {0}'.format(ryu_path2))
         time.sleep(1)
 
     def killRyu(self):
@@ -593,3 +904,14 @@ class DCNetwork(Containernet):
                 dict.update({match[0]:m2})
         return dict
 
+    def find_connected_dc_interface(self, vnf_src_name, vnf_src_interface):
+        for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
+            link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
+            for link in link_dict:
+                if (link_dict[link]['src_port_id'] == vnf_src_interface or
+                        link_dict[link]['src_port_name'] == vnf_src_interface):  # Fix: we might also get interface names, e.g, from a son-emu-cli call
+                    # found the right link and connected switch
+                    src_sw = connected_sw
+                    src_sw_inport_nr = link_dict[link]['dst_port_nr']
+                    src_sw_inport_name = link_dict[link]['dst_port_name']
+                    return src_sw_inport_name
index a0112cf..2702bf5 100755 (executable)
@@ -38,7 +38,6 @@ LOG.setLevel(logging.DEBUG)
 
 DCDPID_BASE = 1000  # start of switch dpid's used for data center switches
 
-
 class EmulatorCompute(Docker):
     """
     Emulator specific compute node class.
@@ -61,9 +60,17 @@ class EmulatorCompute(Docker):
         Helper method to receive information about the virtual networks
         this compute instance is connected to.
         """
-        # format list of tuples (name, Ip, MAC, isUp, status)
-        return [{'intf_name':str(i), 'ip':i.IP(), 'mac':i.MAC(), 'up':i.isUp(), 'status':i.status()}
-                for i in self.intfList()]
+        # get all links and find dc switch interface
+        networkStatusList = []
+        for i in self.intfList():
+            vnf_name = self.name
+            vnf_interface = str(i)
+            dc_port_name = self.datacenter.net.find_connected_dc_interface(vnf_name, vnf_interface)
+            # format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
+            intf_dict = {'intf_name': str(i), 'ip': i.IP(), 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
+            networkStatusList.append(intf_dict)
+
+        return networkStatusList
 
     def getStatus(self):
         """
@@ -83,6 +90,7 @@ class EmulatorCompute(Docker):
         status["memswap_limit"] = self.memswap_limit
         status["state"] = self.dcli.inspect_container(self.dc)["State"]
         status["id"] = self.dcli.inspect_container(self.dc)["Id"]
+        status["short_id"] = self.dcli.inspect_container(self.dc)["Id"][:12]
         status["datacenter"] = (None if self.datacenter is None
                                 else self.datacenter.label)
         return status
@@ -166,6 +174,12 @@ class Datacenter(object):
             if len(network) < 1:
                 network.append({})
 
+        # apply hard-set resource limits=0
+        cpu_percentage = params.get('cpu_percent')
+        if cpu_percentage:
+            params['cpu_period'] = self.net.cpu_period
+            params['cpu_quota'] = self.net.cpu_period * float(cpu_percentage)
+
         # create the container
         d = self.net.addDocker(
             "%s" % (name),
@@ -173,9 +187,12 @@ class Datacenter(object):
             dcmd=command,
             datacenter=self,
             flavor_name=flavor_name,
+            environment = {'VNF_NAME':name},
             **params
         )
 
+
+
         # apply resource limits to container if a resource model is defined
         if self._resource_model is not None:
             try:
diff --git a/src/emuvim/dcemulator/son_emu_simple_switch_13.py b/src/emuvim/dcemulator/son_emu_simple_switch_13.py
new file mode 100755 (executable)
index 0000000..53d1a2e
--- /dev/null
@@ -0,0 +1,134 @@
+# Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from ryu.base import app_manager
+from ryu.controller import ofp_event
+from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
+from ryu.controller.handler import set_ev_cls
+from ryu.ofproto import ofproto_v1_3
+from ryu.lib.packet import packet
+from ryu.lib.packet import ethernet
+from ryu.lib.packet import ether_types
+from ryu.topology.event import EventSwitchEnter, EventSwitchLeave, EventSwitchReconnected
+
+class SimpleSwitch13(app_manager.RyuApp):
+    OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
+
+    def __init__(self, *args, **kwargs):
+        super(SimpleSwitch13, self).__init__(*args, **kwargs)
+        self.mac_to_port = {}
+
+    @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
+    def switch_features_handler(self, ev):
+        datapath = ev.msg.datapath
+        ofproto = datapath.ofproto
+        parser = datapath.ofproto_parser
+
+        # install table-miss flow entry
+        #
+        # We specify NO BUFFER to max_len of the output action due to
+        # OVS bug. At this moment, if we specify a lesser number, e.g.,
+        # 128, OVS will send Packet-In with invalid buffer_id and
+        # truncated packet data. In that case, we cannot output packets
+        # correctly.  The bug has been fixed in OVS v2.1.0.
+        match = parser.OFPMatch()
+        #actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
+        #                                  ofproto.OFPCML_NO_BUFFER)]
+        actions = [parser.OFPActionOutput(ofproto.OFPCML_NO_BUFFER)]
+        self.add_flow(datapath, 0, match, actions)
+
+    def add_flow(self, datapath, priority, match, actions, buffer_id=None, table_id=0):
+        ofproto = datapath.ofproto
+        parser = datapath.ofproto_parser
+
+        inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
+                                             actions)]
+        if buffer_id:
+            mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
+                                    priority=priority, match=match,
+                                    instructions=inst, table_id=table_id)
+        else:
+            mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
+                                    match=match, instructions=inst, table_id=table_id)
+        datapath.send_msg(mod)
+
+        # new switch detected
+
+    @set_ev_cls([EventSwitchEnter, EventSwitchReconnected])
+    def _ev_switch_enter_handler(self, ev):
+        datapath = ev.switch.dp
+        self.logger.info('registered OF switch id: %s' % datapath.id)
+        ofproto = datapath.ofproto
+        self.logger.info('OF version: {0}'.format(ofproto))
+        # send NORMAL action for all undefined flows
+        ofp_parser = datapath.ofproto_parser
+        actions = [ofp_parser.OFPActionOutput(ofproto_v1_3.OFPP_NORMAL)]
+        self.add_flow(datapath, 0, None, actions, table_id=0)
+
+
+    @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
+    def _packet_in_handler(self, ev):
+        # If you hit this you might want to increase
+        # the "miss_send_length" of your switch
+        if ev.msg.msg_len < ev.msg.total_len:
+            self.logger.debug("packet truncated: only %s of %s bytes",
+                              ev.msg.msg_len, ev.msg.total_len)
+        msg = ev.msg
+        datapath = msg.datapath
+        ofproto = datapath.ofproto
+        parser = datapath.ofproto_parser
+        in_port = msg.match['in_port']
+
+        pkt = packet.Packet(msg.data)
+        eth = pkt.get_protocols(ethernet.ethernet)[0]
+
+        if eth.ethertype == ether_types.ETH_TYPE_LLDP:
+            # ignore lldp packet
+            return
+        dst = eth.dst
+        src = eth.src
+
+        dpid = datapath.id
+        self.mac_to_port.setdefault(dpid, {})
+
+        self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
+
+        # learn a mac address to avoid FLOOD next time.
+        self.mac_to_port[dpid][src] = in_port
+
+        if dst in self.mac_to_port[dpid]:
+            out_port = self.mac_to_port[dpid][dst]
+        else:
+            out_port = ofproto.OFPP_FLOOD
+
+        actions = [parser.OFPActionOutput(out_port)]
+
+        # install a flow to avoid packet_in next time
+        if out_port != ofproto.OFPP_FLOOD:
+            match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
+            # verify if we have a valid buffer_id, if yes avoid to send both
+            # flow_mod & packet_out
+            if msg.buffer_id != ofproto.OFP_NO_BUFFER:
+                self.add_flow(datapath, 1, match, actions, msg.buffer_id)
+                return
+            else:
+                self.add_flow(datapath, 1, match, actions)
+        data = None
+        if msg.buffer_id == ofproto.OFP_NO_BUFFER:
+            data = msg.data
+
+        out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
+                                  in_port=in_port, actions=actions, data=data)
+        datapath.send_msg(out)
diff --git a/src/emuvim/examples/sonata_simple.py b/src/emuvim/examples/sonata_simple.py
new file mode 100755 (executable)
index 0000000..0627247
--- /dev/null
@@ -0,0 +1,78 @@
+"""
+Copyright (c) 2015 SONATA-NFV
+ALL RIGHTS RESERVED.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
+nor the names of its contributors may be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+This work has been performed in the framework of the SONATA project,
+funded by the European Commission under Grant number 671517 through
+the Horizon 2020 and 5G-PPP programmes. The authors would like to
+acknowledge the contributions of their colleagues of the SONATA
+partner consortium (www.sonata-nfv.eu).
+"""
+"""
+A simple topology with two PoPs for the y1 demo story board.
+
+        (dc1) <<-->> s1 <<-->> (dc2)
+
+- SAP deployment enabled
+- learning switch enabled
+"""
+
+import logging
+from mininet.log import setLogLevel
+from emuvim.dcemulator.net import DCNetwork
+from emuvim.api.rest.rest_api_endpoint import RestApiEndpoint
+from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
+from mininet.node import RemoteController
+
+logging.basicConfig(level=logging.INFO)
+
+
+def create_topology1():
+    # create topology
+    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
+    dc1 = net.addDatacenter("dc1")
+
+
+    # add the command line interface endpoint to each DC (REST API)
+    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
+    rapi1.connectDCNetwork(net)
+    rapi1.connectDatacenter(dc1)
+    # run API endpoint server (in another thread, don't block)
+    rapi1.start()
+
+    # add the SONATA dummy gatekeeper to each DC
+    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
+    sdkg1.connectDatacenter(dc1)
+    # run the dummy gatekeeper (in another thread, don't block)
+    sdkg1.start()
+
+    # start the emulation platform
+    net.start()
+    net.CLI()
+    net.stop()
+
+
+def main():
+    setLogLevel('info')  # set Mininet loglevel
+    create_topology1()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/src/emuvim/examples/sonata_simple_topology.py b/src/emuvim/examples/sonata_simple_topology.py
new file mode 100755 (executable)
index 0000000..0627247
--- /dev/null
@@ -0,0 +1,78 @@
+"""
+Copyright (c) 2015 SONATA-NFV
+ALL RIGHTS RESERVED.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
+nor the names of its contributors may be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+This work has been performed in the framework of the SONATA project,
+funded by the European Commission under Grant number 671517 through
+the Horizon 2020 and 5G-PPP programmes. The authors would like to
+acknowledge the contributions of their colleagues of the SONATA
+partner consortium (www.sonata-nfv.eu).
+"""
+"""
+A simple topology with two PoPs for the y1 demo story board.
+
+        (dc1) <<-->> s1 <<-->> (dc2)
+
+- SAP deployment enabled
+- learning switch enabled
+"""
+
+import logging
+from mininet.log import setLogLevel
+from emuvim.dcemulator.net import DCNetwork
+from emuvim.api.rest.rest_api_endpoint import RestApiEndpoint
+from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
+from mininet.node import RemoteController
+
+logging.basicConfig(level=logging.INFO)
+
+
+def create_topology1():
+    # create topology
+    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
+    dc1 = net.addDatacenter("dc1")
+
+
+    # add the command line interface endpoint to each DC (REST API)
+    rapi1 = RestApiEndpoint("0.0.0.0", 5001)
+    rapi1.connectDCNetwork(net)
+    rapi1.connectDatacenter(dc1)
+    # run API endpoint server (in another thread, don't block)
+    rapi1.start()
+
+    # add the SONATA dummy gatekeeper to each DC
+    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000, deploy_sap=True)
+    sdkg1.connectDatacenter(dc1)
+    # run the dummy gatekeeper (in another thread, don't block)
+    sdkg1.start()
+
+    # start the emulation platform
+    net.start()
+    net.CLI()
+    net.stop()
+
+
+def main():
+    setLogLevel('info')  # set Mininet loglevel
+    create_topology1()
+
+
+if __name__ == '__main__':
+    main()
old mode 100644 (file)
new mode 100755 (executable)
index df7a6ac..b38720e
@@ -46,7 +46,7 @@ logging.basicConfig(level=logging.INFO)
 
 def create_topology1():
     # create topology
-    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=False)
+    net = DCNetwork(controller=RemoteController, monitor=True, enable_learning=True)
     dc1 = net.addDatacenter("dc1")
     dc2 = net.addDatacenter("dc2")
     s1 = net.addSwitch("s1")
index 614ecd2..57d4aa5 100755 (executable)
@@ -108,7 +108,7 @@ class SimpleTestTopology(unittest.TestCase):
         Helper to interact with local docker instance.
         """
         if self.docker_cli is None:
-            self.docker_cli = docker.Client(
+            self.docker_cli = docker.APIClient(
                 base_url='unix://var/run/docker.sock')
         return self.docker_cli
 
index 1b06f25..397e2a1 100755 (executable)
@@ -100,7 +100,7 @@ class SimpleTestTopology(unittest.TestCase):
         Helper to interact with local docker instance.
         """
         if self.docker_cli is None:
-            self.docker_cli = docker.Client(
+            self.docker_cli = docker.APIClient(
                 base_url='unix://var/run/docker.sock')
         return self.docker_cli
 
index c96e1ba..bfa9541 100755 (executable)
@@ -44,7 +44,7 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
 #    @unittest.skip("disabled")
     def test_GK_Api_start_service(self):
         # create network
-        self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0)
+        self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0, enable_learning=True)
         # setup links
         self.net.addLink(self.dc[0], self.h[0])
         self.net.addLink(self.dc[0], self.dc[1])
@@ -87,9 +87,37 @@ class testSonataDummyGatekeeper(SimpleTestTopology):
         # check compute list result
         self.assertEqual(len(self.dc[0].listCompute()), 2)
         # check connectivity by using ping
+        ELAN_list=[]
+        for i in [0]:
+            for vnf in self.dc[i].listCompute():
+                # check connection
+                p = self.net.ping([self.h[i], vnf])
+                print p
+                self.assertTrue(p <= 0.0)
+
+                # check E LAN connection
+                network_list = vnf.getNetworkStatus()
+                mgmt_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == 'mgmt']
+                self.assertTrue(len(mgmt_ip) > 0)
+                ip_address = mgmt_ip[0]
+                ELAN_list.append(ip_address)
+                print ip_address
+
+        # check ELAN connection by ping over the mgmt network (needs to be configured as ELAN in the test service)
         for vnf in self.dc[0].listCompute():
-            p = self.net.ping([self.h[0], vnf])
-            self.assertTrue(p <= 50.0)
+            network_list = vnf.getNetworkStatus()
+            mgmt_ip = [intf['ip'] for intf in network_list if intf['intf_name'] == 'mgmt']
+            self.assertTrue(len(mgmt_ip) > 0)
+            ip_address = mgmt_ip[0]
+            print ELAN_list
+            print ip_address
+            test_ip_list = list(ELAN_list)
+            test_ip_list.remove(ip_address)
+            for ip in test_ip_list:
+                p = self.net.ping([vnf],manualdestip=ip)
+                print p
+                self.assertTrue(p <= 0.0)
+
         # stop Mininet network
         self.stopNet()
         initialize_GK()
index 31da835..facbb0e 100755 (executable)
@@ -24,7 +24,7 @@
 # acknowledge the contributions of their colleagues of the SONATA
 # partner consortium (www.sonata-nfv.eu).
 
-FROM mpeuster/containernet
+FROM containernet/containernet
 MAINTAINER manuel@peuster.de
 
 ENV SON_EMU_IN_DOCKER 1