Moving emuvim into the src directory
authorcgeoffroy <geoffroy.chollon@thalesgroup.com>
Thu, 3 Mar 2016 17:18:13 +0000 (18:18 +0100)
committercgeoffroy <geoffroy.chollon@thalesgroup.com>
Thu, 3 Mar 2016 17:18:13 +0000 (18:18 +0100)
39 files changed:
README.md
emuvim/api/__init__.py [deleted file]
emuvim/api/zerorpcapi.py [deleted file]
emuvim/api/zerorpcapi_DCNetwork.py [deleted file]
emuvim/cli/__init__.py [deleted file]
emuvim/cli/compute.py [deleted file]
emuvim/cli/datacenter.py [deleted file]
emuvim/cli/monitor.py [deleted file]
emuvim/cli/network.py [deleted file]
emuvim/cli/son-emu-cli [deleted file]
emuvim/dcemulator/__init__.py [deleted file]
emuvim/dcemulator/link.py [deleted file]
emuvim/dcemulator/monitoring.py [deleted file]
emuvim/dcemulator/net.py [deleted file]
emuvim/dcemulator/node.py [deleted file]
emuvim/example_topology.py [deleted file]
emuvim/test/__main__.py [deleted file]
emuvim/test/runner.py [deleted file]
emuvim/test/test_api_zerorpc.py [deleted file]
emuvim/test/test_emulator.py [deleted file]
src/emuvim/api/__init__.py [new file with mode: 0755]
src/emuvim/api/zerorpcapi.py [new file with mode: 0755]
src/emuvim/api/zerorpcapi_DCNetwork.py [new file with mode: 0755]
src/emuvim/cli/__init__.py [new file with mode: 0755]
src/emuvim/cli/compute.py [new file with mode: 0755]
src/emuvim/cli/datacenter.py [new file with mode: 0755]
src/emuvim/cli/monitor.py [new file with mode: 0755]
src/emuvim/cli/network.py [new file with mode: 0755]
src/emuvim/cli/son-emu-cli [new file with mode: 0755]
src/emuvim/dcemulator/__init__.py [new file with mode: 0755]
src/emuvim/dcemulator/link.py [new file with mode: 0755]
src/emuvim/dcemulator/monitoring.py [new file with mode: 0755]
src/emuvim/dcemulator/net.py [new file with mode: 0755]
src/emuvim/dcemulator/node.py [new file with mode: 0755]
src/emuvim/example_topology.py [new file with mode: 0755]
src/emuvim/test/__main__.py [new file with mode: 0755]
src/emuvim/test/runner.py [new file with mode: 0755]
src/emuvim/test/test_api_zerorpc.py [new file with mode: 0755]
src/emuvim/test/test_emulator.py [new file with mode: 0755]

index c3fa61f..e2b6658 100755 (executable)
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@ Contributors:
  * (This will be replaced / extended by a REST API later)
 
 ### Project structure
-* **emuvim/** all emulator code 
+* **src/emuvim/** all emulator code 
  * **api/** Data center API endpoint implementations (zerorpc, OpenStack REST, ...)
  * **cli/** CLI client to interact with a running emulator
  * **dcemulator/** Dockernet wrapper that introduces the notion of data centers and API endpoints
@@ -44,10 +44,10 @@ Automatic installation is provide through Ansible playbooks.
 
 ### Run
 * First terminal:
- * `cd ~/son-emu/emuvim`
+ * `cd ~/son-emu/src/emuvim`
  * `sudo python example_topology.py`
 * Second terminal:
- * `cd ~/son-emu/emuvim/cli`
+ * `cd ~/son-emu/src/emuvim/cli`
  * `./son-emu-cli compute start -d datacenter1 -n vnf1`
  * `./son-emu-cli compute start -d datacenter1 -n vnf2`
  * `./son-emu-cli compute list`
@@ -61,7 +61,7 @@ Automatic installation is provide through Ansible playbooks.
  * `./start_example_chain` sets up an example service chain, using the example docker container from `package_samples` https://github.com/sonata-nfv/packaging_samples/tree/master/VNFs
 
 ### Run Unit Tests
-* `cd ~/son-emu/emuvim`
+* `cd ~/son-emu/src/emuvim`
 * `sudo python test` or `sudo python test -v` for more outputs
 
 ### CLI
diff --git a/emuvim/api/__init__.py b/emuvim/api/__init__.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/emuvim/api/zerorpcapi.py b/emuvim/api/zerorpcapi.py
deleted file mode 100755 (executable)
index 59b960c..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-"""
-Distributed Cloud Emulator (dcemulator)
-(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
-"""
-
-import logging
-import threading
-import zerorpc
-
-logging.basicConfig(level=logging.INFO)
-
-
-class ZeroRpcApiEndpoint(object):
-    """
-    Simple API endpoint that offers a zerorpc-based
-    interface. This interface will be used by the
-    default command line client.
-    It can be used as a reference to implement
-    REST interfaces providing the same semantics,
-    like e.g. OpenStack compute API.
-    """
-
-    def __init__(self, listenip, port):
-        self.dcs = {}
-        self.ip = listenip
-        self.port = port
-        logging.debug("Created API endpoint %s(%s:%d)" % (
-            self.__class__.__name__, self.ip, self.port))
-
-    def connectDatacenter(self, dc):
-        self.dcs[dc.label] = dc
-        logging.info("Connected DC(%s) to API endpoint %s(%s:%d)" % (
-            dc.label, self.__class__.__name__, self.ip, self.port))
-
-    def start(self):
-        thread = threading.Thread(target=self._api_server_thread, args=())
-        thread.daemon = True
-        thread.start()
-        logging.debug("Started API endpoint %s(%s:%d)" % (
-            self.__class__.__name__, self.ip, self.port))
-
-    def _api_server_thread(self):
-        s = zerorpc.Server(MultiDatacenterApi(self.dcs))
-        s.bind("tcp://%s:%d" % (self.ip, self.port))
-        s.run()
-
-
-class MultiDatacenterApi(object):
-    """
-        Just pass through the corresponding request to the
-        selected data center. Do not implement provisioning
-        logic here because will will have multiple API
-        endpoint implementations at the end.
-    """
-
-    def __init__(self, dcs):
-        self.dcs = dcs
-
-    def compute_action_start(self, dc_label, compute_name, image, command, network):
-        """
-        Start a new compute instance: A docker container
-        :param dc_label: name of the DC
-        :param compute_name: compute container name
-        :param image: image name
-        :param command: command to execute
-        :param network:
-        :return: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
-        """
-        # TODO what to return UUID / given name / internal name ?
-        logging.debug("RPC CALL: compute start")
-        try:
-            c = self.dcs.get(dc_label).startCompute(
-                compute_name, image=image, command=command, network=network)
-            return str(c.name)
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
-
-    def compute_action_stop(self, dc_label, compute_name):
-        logging.debug("RPC CALL: compute stop")
-        try:
-            return self.dcs.get(dc_label).stopCompute(compute_name)
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
-
-    def compute_list(self, dc_label):
-        logging.debug("RPC CALL: compute list")
-        try:
-            if dc_label is None:
-                # return list with all compute nodes in all DCs
-                all_containers = []
-                for dc in self.dcs.itervalues():
-                    all_containers += dc.listCompute()
-                return [(c.name, c.getStatus())
-                        for c in all_containers]
-            else:
-                # return list of compute nodes for specified DC
-                return [(c.name, c.getStatus())
-                        for c in self.dcs.get(dc_label).listCompute()]
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
-
-    def compute_status(self, dc_label, compute_name):
-        logging.debug("RPC CALL: compute status")
-        try:
-            return self.dcs.get(
-                dc_label).containers.get(compute_name).getStatus()
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
-
-    def datacenter_list(self):
-        logging.debug("RPC CALL: datacenter list")
-        try:
-            return [d.getStatus() for d in self.dcs.itervalues()]
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
-
-    def datacenter_status(self, dc_label):
-        logging.debug("RPC CALL: datacenter status")
-        try:
-                return self.dcs.get(dc_label).getStatus()
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
diff --git a/emuvim/api/zerorpcapi_DCNetwork.py b/emuvim/api/zerorpcapi_DCNetwork.py
deleted file mode 100755 (executable)
index 27527aa..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-"""\r
-Distributed Cloud Emulator (dcemulator)\r
-(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>\r
-"""\r
-\r
-import logging\r
-import threading\r
-import zerorpc\r
-\r
-\r
-logging.basicConfig(level=logging.INFO)\r
-\r
-\r
-class ZeroRpcApiEndpointDCNetwork(object):\r
-    """\r
-    Simple API endpoint that offers a zerorpc-based\r
-    interface. This interface will be used by the\r
-    default command line client.\r
-    It can be used as a reference to implement\r
-    REST interfaces providing the same semantics,\r
-    like e.g. OpenStack compute API.\r
-    """\r
-\r
-    def __init__(self, listenip, port, DCNetwork=None):\r
-        if DCNetwork :\r
-            self.connectDCNetwork(DCNetwork)\r
-        self.ip = listenip\r
-        self.port = port\r
-        logging.debug("Created monitoring API endpoint %s(%s:%d)" % (\r
-            self.__class__.__name__, self.ip, self.port))\r
-\r
-    def connectDCNetwork(self, net):\r
-        self.net = net\r
-        logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % (\r
-            self.__class__.__name__, self.ip, self.port))\r
-\r
-    def start(self):\r
-        thread = threading.Thread(target=self._api_server_thread, args=())\r
-        thread.daemon = True\r
-        thread.start()\r
-        logging.debug("Started API endpoint %s(%s:%d)" % (\r
-            self.__class__.__name__, self.ip, self.port))\r
-\r
-    def _api_server_thread(self):\r
-        s = zerorpc.Server(DCNetworkApi(self.net))\r
-        s.bind("tcp://%s:%d" % (self.ip, self.port))\r
-        s.run()\r
-\r
-    def stop(self):\r
-        logging.info("Stop the monitoring API endpoint")\r
-        return\r
-\r
-\r
-class DCNetworkApi(object):\r
-    """\r
-        The networking and monitoring commands need the scope of the\r
-        whole DC network to find the requested vnf. So this API is intended\r
-        to work with a DCNetwork.\r
-        Just pass through the corresponding request to the\r
-        selected data center network. Do not implement provisioning\r
-        logic here because will will have multiple API\r
-        endpoint implementations at the end.\r
-    """\r
-\r
-    def __init__(self, net):\r
-        self.net = net\r
-\r
-    def network_action_start(self, vnf_src_name, vnf_dst_name):\r
-        # call DCNetwork method, not really datacenter specific API for now...\r
-        # provided dc name needs to be part of API endpoint\r
-        # no check if vnfs are really connected to this datacenter...\r
-        logging.debug("RPC CALL: network chain start")\r
-        try:\r
-            c = self.net.setChain(\r
-                vnf_src_name, vnf_dst_name)\r
-            return str(c)\r
-        except Exception as ex:\r
-            logging.exception("RPC error.")\r
-            return ex.message\r
-\r
-    def network_action_stop(self, vnf_src_name, vnf_dst_name):\r
-        # call DCNetwork method, not really datacenter specific API for now...\r
-        # provided dc name needs to be part of API endpoint\r
-        # no check if vnfs are really connected to this datacenter...\r
-        logging.debug("RPC CALL: network chain stop")\r
-        try:\r
-            c = self.net.setChain(\r
-                vnf_src_name, vnf_dst_name, cmd='del-flows')\r
-            return c\r
-        except Exception as ex:\r
-            logging.exception("RPC error.")\r
-            return ex.message\r
-\r
-    # get egress(default) or ingress rate of a vnf\r
-    def monitor_get_rate(self, vnf_name, direction):\r
-        logging.debug("RPC CALL: get rate")\r
-        try:\r
-            c = self.net.monitor_agent.get_rate(vnf_name, direction)\r
-            return c\r
-        except Exception as ex:\r
-            logging.exception("RPC error.")\r
-            return ex.message\r
-\r
-\r
diff --git a/emuvim/cli/__init__.py b/emuvim/cli/__init__.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/emuvim/cli/compute.py b/emuvim/cli/compute.py
deleted file mode 100755 (executable)
index 70de20a..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-"""
-son-emu compute CLI
-(c) 2016 by Manuel Peuster <manuel.peuster@upb.de>
-"""
-
-import argparse
-import pprint
-from tabulate import tabulate
-import zerorpc
-
-
-pp = pprint.PrettyPrinter(indent=4)
-
-
-class ZeroRpcClient(object):
-
-    def __init__(self):
-        self.c = zerorpc.Client()
-        self.c.connect("tcp://127.0.0.1:4242")  # TODO hard coded for now. we'll change this later
-        self.cmds = {}
-
-    def execute_command(self, args):
-        if getattr(self, args["command"]) is not None:
-            # call the local method with the same name as the command arg
-            getattr(self, args["command"])(args)
-        else:
-            print "Command not implemented."
-
-    def start(self, args):
-        nw_list = list()
-        if args.get("network") is not None:
-            networks = args.get("network").split(",")
-            for nw in networks:
-                nw_list.append({"ip": nw})
-        r = self.c.compute_action_start(
-            args.get("datacenter"),
-            args.get("name"),
-            args.get("image"),
-            args.get("docker_command"),
-            nw_list)
-        pp.pprint(r)
-
-    def stop(self, args):
-        r = self.c.compute_action_stop(
-            args.get("datacenter"), args.get("name"))
-        pp.pprint(r)
-
-    def list(self, args):
-        r = self.c.compute_list(
-            args.get("datacenter"))
-        table = []
-        for c in r:
-            # for each container add a line to the output table
-            if len(c) > 1:
-                name = c[0]
-                status = c[1]
-                eth0ip = None
-                eth0status = "down"
-                if len(status.get("network")) > 0:
-                    eth0ip = status.get("network")[0][1]
-                    eth0status = "up" if status.get(
-                        "network")[0][3] else "down"
-                table.append([status.get("datacenter"),
-                              name,
-                              status.get("image"),
-                              eth0ip,
-                              eth0status,
-                              status.get("state").get("Status")])
-        headers = ["Datacenter",
-                   "Container",
-                   "Image",
-                   "eth0 IP",
-                   "eth0 status",
-                   "Status"]
-        print tabulate(table, headers=headers, tablefmt="grid")
-
-    def status(self, args):
-        r = self.c.compute_status(
-            args.get("datacenter"), args.get("name"))
-        pp.pprint(r)
-
-
-parser = argparse.ArgumentParser(description='son-emu compute')
-parser.add_argument(
-    "command",
-    choices=['start', 'stop', 'list', 'status'],
-    help="Action to be executed.")
-parser.add_argument(
-    "--datacenter", "-d", dest="datacenter",
-    help="Data center to in which the compute instance should be executed")
-parser.add_argument(
-    "--name", "-n", dest="name",
-    help="Name of compute instance e.g. 'vnf1'")
-parser.add_argument(
-    "--image","-i", dest="image",
-    help="Name of container image to be used e.g. 'ubuntu'")
-parser.add_argument(
-    "--dcmd", "-c", dest="docker_command",
-    help="Startup command of the container e.g. './start.sh'")
-parser.add_argument(
-    "--net", dest="network",
-    help="Network properties of compute instance e.g. \
-          '10.0.0.123/8' or '10.0.0.123/8,11.0.0.123/24' for multiple interfaces.")
-
-
-def main(argv):
-    args = vars(parser.parse_args(argv))
-    c = ZeroRpcClient()
-    c.execute_command(args)
diff --git a/emuvim/cli/datacenter.py b/emuvim/cli/datacenter.py
deleted file mode 100755 (executable)
index c3850fc..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-"""
-son-emu datacenter CLI
-(c) 2016 by Manuel Peuster <manuel.peuster@upb.de>
-"""
-
-import argparse
-import pprint
-from tabulate import tabulate
-import zerorpc
-
-
-pp = pprint.PrettyPrinter(indent=4)
-
-
-class ZeroRpcClient(object):
-
-    def __init__(self):
-        self.c = zerorpc.Client()
-        self.c.connect("tcp://127.0.0.1:4242")  # TODO hard coded for now. we'll change this later
-        self.cmds = {}
-
-    def execute_command(self, args):
-        if getattr(self, args["command"]) is not None:
-            # call the local method with the same name as the command arg
-            getattr(self, args["command"])(args)
-        else:
-            print "Command not implemented."
-
-    def list(self, args):
-        r = self.c.datacenter_list()
-        table = []
-        for d in r:
-            # for each dc add a line to the output table
-            if len(d) > 0:
-                table.append([d.get("label"),
-                              d.get("internalname"),
-                              d.get("switch"),
-                              d.get("n_running_containers"),
-                              len(d.get("metadata"))])
-        headers = ["Label",
-                   "Internal Name",
-                   "Switch",
-                   "# Containers",
-                   "# Metadata Items"]
-        print tabulate(table, headers=headers, tablefmt="grid")
-
-    def status(self, args):
-        r = self.c.datacenter_status(
-            args.get("datacenter"))
-        pp.pprint(r)
-
-
-parser = argparse.ArgumentParser(description='son-emu datacenter')
-parser.add_argument(
-    "command",
-    choices=['list', 'status'],
-    help="Action to be executed.")
-parser.add_argument(
-    "--datacenter", "-d", dest="datacenter",
-    help="Data center to which the command should be applied.")
-
-
-def main(argv):
-    args = vars(parser.parse_args(argv))
-    c = ZeroRpcClient()
-    c.execute_command(args)
diff --git a/emuvim/cli/monitor.py b/emuvim/cli/monitor.py
deleted file mode 100755 (executable)
index 6885a3c..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-"""\r
-son-emu network CLI\r
-(c) 2016 by Manuel Peuster <manuel.peuster@upb.de>\r
-"""\r
-\r
-import argparse\r
-import pprint\r
-from tabulate import tabulate\r
-import zerorpc\r
-\r
-\r
-pp = pprint.PrettyPrinter(indent=4)\r
-\r
-class ZeroRpcClient(object):\r
-\r
-    def __init__(self):\r
-        self.c = zerorpc.Client()\r
-        # TODO connect to DCNetwork API\r
-        #self.c.connect("tcp://127.0.0.1:4242")  # TODO hard coded for now. we'll change this later\r
-        self.c.connect("tcp://127.0.0.1:5151")\r
-        self.cmds = {}\r
-\r
-    def execute_command(self, args):\r
-        if getattr(self, args["command"]) is not None:\r
-            # call the local method with the same name as the command arg\r
-            getattr(self, args["command"])(args)\r
-        else:\r
-            print "Command not implemented."\r
-\r
-    def get_rate(self, args):\r
-        r = self.c.monitor_get_rate(\r
-            args.get("vnf_name"),\r
-            args.get("direction"))\r
-        pp.pprint(r)\r
-\r
-\r
-parser = argparse.ArgumentParser(description='son-emu network')\r
-parser.add_argument(\r
-    "command",\r
-    help="Action to be executed: get_rate")\r
-parser.add_argument(\r
-    "--vnf_name", "-vnf", dest="vnf_name",\r
-    help="vnf name to be monitored")\r
-parser.add_argument(\r
-    "--direction", "-d", dest="direction",\r
-    help="in (ingress rate) or out (egress rate)")\r
-\r
-def main(argv):\r
-    print "This is the son-emu monitor CLI."\r
-    print "Arguments: %s" % str(argv)\r
-    args = vars(parser.parse_args(argv))\r
-    c = ZeroRpcClient()\r
-    c.execute_command(args)\r
diff --git a/emuvim/cli/network.py b/emuvim/cli/network.py
deleted file mode 100755 (executable)
index 8d4219b..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-"""\r
-son-emu network CLI\r
-(c) 2016 by Manuel Peuster <manuel.peuster@upb.de>\r
-"""\r
-\r
-import argparse\r
-import pprint\r
-from tabulate import tabulate\r
-import zerorpc\r
-\r
-\r
-pp = pprint.PrettyPrinter(indent=4)\r
-\r
-class ZeroRpcClient(object):\r
-\r
-    def __init__(self):\r
-        self.c = zerorpc.Client()\r
-        # TODO connect to DCNetwork API\r
-        #self.c.connect("tcp://127.0.0.1:4242")  # TODO hard coded for now. we'll change this later\r
-        self.c.connect("tcp://127.0.0.1:5151")\r
-        self.cmds = {}\r
-\r
-    def execute_command(self, args):\r
-        if getattr(self, args["command"]) is not None:\r
-            # call the local method with the same name as the command arg\r
-            getattr(self, args["command"])(args)\r
-        else:\r
-            print "Command not implemented."\r
-\r
-    def add(self, args):\r
-        r = self.c.network_action_start(\r
-            #args.get("datacenter"),\r
-            args.get("source"),\r
-            args.get("destination"))\r
-        pp.pprint(r)\r
-\r
-    def remove(self, args):\r
-        r = self.c.network_action_stop(\r
-            #args.get("datacenter"),\r
-            args.get("source"),\r
-            args.get("destination"))\r
-        pp.pprint(r)\r
-\r
-\r
-parser = argparse.ArgumentParser(description='son-emu network')\r
-parser.add_argument(\r
-    "command",\r
-    help="Action to be executed: add|remove")\r
-parser.add_argument(\r
-    "--datacenter", "-d", dest="datacenter",\r
-    help="Data center to in which the network action should be initiated")\r
-parser.add_argument(\r
-    "--source", "-src", dest="source",\r
-    help="vnf name of the source of the chain")\r
-parser.add_argument(\r
-    "--destination", "-dst", dest="destination",\r
-    help="vnf name of the destination of the chain")\r
-\r
-def main(argv):\r
-    args = vars(parser.parse_args(argv))\r
-    c = ZeroRpcClient()\r
-    c.execute_command(args)\r
diff --git a/emuvim/cli/son-emu-cli b/emuvim/cli/son-emu-cli
deleted file mode 100755 (executable)
index 61cbd43..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/python
-"""
- Simple CLI client to interact with a running emulator.
-
- (c) 2016 by Manuel Peuster <manuel.peuster@upb.de>
-
- The CLI offers different tools, e.g., compute, network, ...
- Each of these tools is implemented as an independent Python
- module.
-
- cli compute start dc1 my_name flavor_a
- cli network create dc1 11.0.0.0/24
-"""
-
-import sys
-import compute
-import network
-import datacenter
-import monitor
-
-def main():
-    if len(sys.argv) < 2:
-        print "Usage: son-emu-cli <toolname> <arguments>"
-        exit(0)
-    if sys.argv[1] == "compute":
-        compute.main(sys.argv[2:])
-    elif sys.argv[1] == "network":
-        network.main(sys.argv[2:])
-    elif sys.argv[1] == "datacenter":
-        datacenter.main(sys.argv[2:])
-    elif sys.argv[1] == "monitor":
-        monitor.main(sys.argv[2:])
-
-if __name__ == '__main__':
-    main()
diff --git a/emuvim/dcemulator/__init__.py b/emuvim/dcemulator/__init__.py
deleted file mode 100755 (executable)
index 64f6616..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-"""
-Distributed Cloud Emulator (dcemulator)
-(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
-"""
\ No newline at end of file
diff --git a/emuvim/dcemulator/link.py b/emuvim/dcemulator/link.py
deleted file mode 100755 (executable)
index e69de29..0000000
diff --git a/emuvim/dcemulator/monitoring.py b/emuvim/dcemulator/monitoring.py
deleted file mode 100755 (executable)
index 094c09b..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-__author__ = 'Administrator'\r
-\r
-import urllib2\r
-import logging\r
-from mininet.node import  OVSSwitch\r
-import ast\r
-logging.basicConfig(level=logging.INFO)\r
-\r
-"""\r
-class to read openflow stats from the Ryu controller of the DCNEtwork\r
-"""\r
-\r
-class DCNetworkMonitor():\r
-    def __init__(self, net):\r
-        self.net = net\r
-        # link to REST_API\r
-        self.ip = '0.0.0.0'\r
-        self.port = '8080'\r
-        self.REST_api = 'http://{0}:{1}'.format(self.ip,self.port)\r
-\r
-\r
-    def get_rate(self, vnf_name, direction='tx'):\r
-        try:\r
-            vnf_switch = self.net.DCNetwork_graph.neighbors(str(vnf_name))\r
-\r
-            if len(vnf_switch) > 1:\r
-                logging.info("vnf: {0} has multiple ports".format(vnf_name))\r
-                return\r
-            elif len(vnf_switch) == 0:\r
-                logging.info("vnf: {0} is not connected".format(vnf_name))\r
-                return\r
-            else:\r
-                vnf_switch = vnf_switch[0]\r
-            next_node = self.net.getNodeByName(vnf_switch)\r
-\r
-            if not isinstance( next_node, OVSSwitch ):\r
-                logging.info("vnf: {0} is not connected to switch".format(vnf_name))\r
-                return\r
-\r
-            mon_port = self.net.DCNetwork_graph[vnf_name][vnf_switch]['dst_port']\r
-            switch_dpid = x = int(str(next_node.dpid),16)\r
-\r
-            ret = self.REST_cmd('stats/port', switch_dpid)\r
-            port_stat_dict = ast.literal_eval(ret)\r
-            for port_stat in port_stat_dict[str(switch_dpid)]:\r
-                if port_stat['port_no'] == mon_port:\r
-                    return port_stat\r
-                    break\r
-\r
-            return ret\r
-\r
-        except Exception as ex:\r
-            logging.exception("get_txrate error.")\r
-            return ex.message\r
-\r
-\r
-\r
-    def REST_cmd(self, prefix, dpid):\r
-        url = self.REST_api + '/' + str(prefix) + '/' + str(dpid)\r
-        req = urllib2.Request(url)\r
-        ret = urllib2.urlopen(req).read()\r
-        return ret
\ No newline at end of file
diff --git a/emuvim/dcemulator/net.py b/emuvim/dcemulator/net.py
deleted file mode 100755 (executable)
index 20ab33a..0000000
+++ /dev/null
@@ -1,206 +0,0 @@
-"""
-Distributed Cloud Emulator (dcemulator)
-(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
-"""
-import logging
-
-import site
-from subprocess import Popen
-import os
-
-from mininet.net import Dockernet
-from mininet.node import Controller, OVSSwitch, OVSKernelSwitch, Switch, Docker, Host, RemoteController
-from mininet.cli import CLI
-from mininet.log import setLogLevel, info, debug
-from mininet.link import TCLink, Link
-import networkx as nx
-from monitoring import DCNetworkMonitor
-
-from node import Datacenter, EmulatorCompute
-
-
-class DCNetwork(Dockernet):
-    """
-    Wraps the original Mininet/Dockernet class and provides
-    methods to add data centers, switches, etc.
-
-    This class is used by topology definition scripts.
-    """
-
-    def __init__(self, **kwargs):
-        self.dcs = {}
-        # create a Mininet/Dockernet network
-        # call original Docker.__init__ and setup default controller
-        #Dockernet.__init__(
-        #    self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
-        Dockernet.__init__(
-            self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
-        self.addController('c0', controller=RemoteController)
-
-        # graph of the complete DC network
-        self.DCNetwork_graph=nx.DiGraph()
-
-        # monitoring agent
-        self.monitor_agent = DCNetworkMonitor(self)
-
-        # start Ryu controller
-        self.startRyu()
-
-
-    def addDatacenter(self, label, metadata={}):
-        """
-        Create and add a logical cloud data center to the network.
-        """
-        if label in self.dcs:
-            raise Exception("Data center label already exists: %s" % label)
-        dc = Datacenter(label, metadata=metadata)
-        dc.net = self  # set reference to network
-        self.dcs[label] = dc
-        dc.create()  # finally create the data center in our Mininet instance
-        logging.info("added data center: %s" % label)
-        return dc
-
-    def addLink(self, node1, node2, **params):
-        """
-        Able to handle Datacenter objects as link
-        end points.
-        """
-        assert node1 is not None
-        assert node2 is not None
-        logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
-        # ensure type of node1
-        if isinstance( node1, basestring ):
-            if node1 in self.dcs:
-                node1 = self.dcs[node1].switch
-        if isinstance( node1, Datacenter ):
-            node1 = node1.switch
-        # ensure type of node2
-        if isinstance( node2, basestring ):
-            if node2 in self.dcs:
-                node2 = self.dcs[node2].switch
-        if isinstance( node2, Datacenter ):
-            node2 = node2.switch
-        # try to give containers a default IP
-        if isinstance( node1, Docker ):
-            if not "params1" in params:
-                params["params1"] = {}
-            if not "ip" in params["params1"]:
-                params["params1"]["ip"] = self.getNextIp()
-        if isinstance( node2, Docker ):
-            if not "params2" in params:
-                params["params2"] = {}
-            if not "ip" in params["params2"]:
-                params["params2"]["ip"] = self.getNextIp()
-
-        link = Dockernet.addLink(self, node1, node2, **params)  # TODO we need TCLinks with user defined performance here
-
-        # add edge and assigned port number to graph in both directions between node1 and node2
-        self.DCNetwork_graph.add_edge(node1.name, node2.name, \
-                                      {'src_port': node1.ports[link.intf1], 'dst_port': node2.ports[link.intf2]})
-        self.DCNetwork_graph.add_edge(node2.name, node1.name, \
-                                       {'src_port': node2.ports[link.intf2], 'dst_port': node1.ports[link.intf1]})
-
-        return link
-
-    def addDocker( self, label, **params ):
-        """
-        Wrapper for addDocker method to use custom container class.
-        """
-        self.DCNetwork_graph.add_node(label)
-        return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
-
-    def removeDocker( self, label, **params ):
-        """
-        Wrapper for removeDocker method to update graph.
-        """
-        self.DCNetwork_graph.remove_node(label)
-        return Dockernet.removeDocker(self, label, **params)
-
-    def addSwitch( self, name, add_to_graph=True, **params ):
-        """
-        Wrapper for addSwitch method to store switch also in graph.
-        """
-        if add_to_graph:
-            self.DCNetwork_graph.add_node(name)
-        return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
-
-    def getAllContainers(self):
-        """
-        Returns a list with all containers within all data centers.
-        """
-        all_containers = []
-        for dc in self.dcs.itervalues():
-            all_containers += dc.listCompute()
-        return all_containers
-
-    def start(self):
-        # start
-        for dc in self.dcs.itervalues():
-            dc.start()
-        Dockernet.start(self)
-
-    def stop(self):
-        # stop Ryu controller
-        self.ryu_process.terminate()
-        #self.ryu_process.kill()
-        Dockernet.stop(self)
-
-    def CLI(self):
-        CLI(self)
-
-    # to remove chain do setChain( src, dst, cmd='del-flows')
-    def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'):
-        # get shortest path
-        path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
-        logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
-
-        current_hop = vnf_src_name
-        for i in range(0,len(path)):
-            next_hop = path[path.index(current_hop)+1]
-            next_node = self.getNodeByName(next_hop)
-
-            if next_hop == vnf_dst_name:
-                return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
-            elif not isinstance( next_node, OVSSwitch ):
-                logging.info("Next node: {0} is not a switch".format(next_hop))
-                return "Next node: {0} is not a switch".format(next_hop)
-
-
-            switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port']
-            next2_hop = path[path.index(current_hop)+2]
-            switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port']
-
-            logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport))
-            # set of entry via ovs-ofctl
-            # TODO use rest API of ryu to set flow entries to correct witch dpid
-            if isinstance( next_node, OVSSwitch ):
-                match = 'in_port=%s' % switch_inport
-
-                if cmd=='add-flow':
-                    action = 'action=%s' % switch_outport
-                    s = ','
-                    ofcmd = s.join([match,action])
-                elif cmd=='del-flows':
-                    ofcmd = match
-                else:
-                    ofcmd=''
-
-                next_node.dpctl(cmd, ofcmd)
-
-            current_hop = next_hop
-
-        return "destination node: {0} not reached".format(vnf_dst_name)
-
-    # start Ryu Openflow controller as Remote Controller for the DCNetwork
-    def startRyu(self):
-        # start Ryu controller with rest-API
-        python_install_path = site.getsitepackages()[0]
-        ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
-        ryu_path2 =  python_install_path + '/ryu/app/ofctl_rest.py'
-        # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
-        # Ryu still uses 6633 as default
-        ryu_option = '--ofp-tcp-listen-port'
-        ryu_of_port = '6653'
-        ryu_cmd =  'ryu-manager'
-        FNULL = open(os.devnull, 'w')
-        self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
\ No newline at end of file
diff --git a/emuvim/dcemulator/node.py b/emuvim/dcemulator/node.py
deleted file mode 100755 (executable)
index 336126c..0000000
+++ /dev/null
@@ -1,167 +0,0 @@
-"""
-Distributed Cloud Emulator (dcemulator)
-(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
-"""
-from mininet.node import Docker
-import logging
-
-
-DCDPID_BASE = 1000  # start of switch dpid's used for data center switches
-
-
-class EmulatorCompute(Docker):
-    """
-    Emulator specific compute node class.
-    Inherits from Dockernet's Docker host class.
-    Represents a single container connected to a (logical)
-    data center.
-    We can add emulator specific helper functions to it.
-    """
-
-    def __init__(
-            self, name, dimage, **kwargs):
-        logging.debug("Create EmulatorCompute instance: %s" % name)
-        self.datacenter = None  # pointer to current DC
-
-        # call original Docker.__init__
-        Docker.__init__(self, name, dimage, **kwargs)
-
-    def getNetworkStatus(self):
-        """
-        Helper method to receive information about the virtual networks
-        this compute instance is connected to.
-        """
-        # format list of tuples (name, Ip, MAC, isUp, status)
-        return [(str(i), i.IP(), i.MAC(), i.isUp(), i.status())
-                for i in self.intfList()]
-
-    def getStatus(self):
-        """
-        Helper method to receive information about this compute instance.
-        """
-        status = {}
-        status["name"] = self.name
-        status["network"] = self.getNetworkStatus()
-        status["image"] = self.dimage
-        status["cpu_quota"] = self.cpu_quota
-        status["cpu_period"] = self.cpu_period
-        status["cpu_shares"] = self.cpu_shares
-        status["cpuset"] = self.cpuset
-        status["mem_limit"] = self.mem_limit
-        status["memswap_limit"] = self.memswap_limit
-        status["state"] = self.dcli.inspect_container(self.dc)["State"]
-        status["id"] = self.dcli.inspect_container(self.dc)["Id"]
-        status["datacenter"] = (None if self.datacenter is None
-                                else self.datacenter.label)
-        return status
-
-
-class Datacenter(object):
-    """
-    Represents a logical data center to which compute resources
-    (Docker containers) can be added at runtime.
-
-    Will also implement resource bookkeeping in later versions.
-    """
-
-    DC_COUNTER = 1
-
-    def __init__(self, label, metadata={}):
-        self.net = None  # DCNetwork to which we belong
-        # each node (DC) has a short internal name used by Mininet
-        # this is caused by Mininets naming limitations for swtiches etc.
-        self.name = "dc%d" % Datacenter.DC_COUNTER
-        Datacenter.DC_COUNTER += 1
-        # use this for user defined names that can be longer than self.name
-        self.label = label  
-        # dict to store arbitrary metadata (e.g. latitude and longitude)
-        self.metadata = metadata
-        self.switch = None  # first prototype assumes one "bigswitch" per DC
-        self.containers = {}  # keep track of running containers
-
-    def _get_next_dc_dpid(self):
-        global DCDPID_BASE
-        DCDPID_BASE += 1
-        return DCDPID_BASE
-
-    def create(self):
-        """
-        Each data center is represented by a single switch to which
-        compute resources can be connected at run time.
-
-        TODO: This will be changed in the future to support multiple networks
-        per data center
-        """
-        self.switch = self.net.addSwitch(
-            "%s.s1" % self.name, dpid=hex(self._get_next_dc_dpid())[2:])
-        logging.debug("created data center switch: %s" % str(self.switch))
-
-    def start(self):
-        pass
-
-    def startCompute(self, name, image=None, command=None, network=None):
-        """
-        Create a new container as compute resource and connect it to this
-        data center.
-        :param name: name (string)
-        :param image: image name (string)
-        :param command: command (string)
-        :param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
-        :return:
-        """
-        assert name is not None
-        # no duplications
-        if name in [c.name for c in self.net.getAllContainers()]:
-            raise Exception("Container with name %s already exists." % name)
-        # set default parameter
-        if image is None:
-            image = "ubuntu"
-        if network is None:
-            network = {}  # {"ip": "10.0.0.254/8"}
-        if isinstance(network, dict):
-            network = [network]  # if we have only one network, put it in a list
-        if isinstance(network, list):
-            if len(network) < 1:
-                network.append({})
-
-        # create the container
-        d = self.net.addDocker("%s" % (name), dimage=image, dcmd=command)
-        # connect all given networks
-        for nw in network:
-            self.net.addLink(d, self.switch, params1=nw)
-        # do bookkeeping
-        self.containers[name] = d
-        d.datacenter = self
-        return d  # we might use UUIDs for naming later on
-
-    def stopCompute(self, name):
-        """
-        Stop and remove a container from this data center.
-        """
-        assert name is not None
-        if name not in self.containers:
-            raise Exception("Container with name %s not found." % name)
-        self.net.removeLink(
-            link=None, node1=self.containers[name], node2=self.switch)
-        self.net.removeDocker("%s" % (name))
-        del self.containers[name]
-        return True
-
-    def listCompute(self):
-        """
-        Return a list of all running containers assigned to this
-        data center.
-        """
-        return list(self.containers.itervalues())
-
-    def getStatus(self):
-        """
-        Return a dict with status information about this DC.
-        """
-        return {
-            "label": self.label,
-            "internalname": self.name,
-            "switch": self.switch.name,
-            "n_running_containers": len(self.containers),
-            "metadata": self.metadata
-        }
diff --git a/emuvim/example_topology.py b/emuvim/example_topology.py
deleted file mode 100755 (executable)
index eba751c..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-"""
-This is an example topology for the distributed cloud emulator (dcemulator).
-(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
-
-
-This is an example that shows how a user of the emulation tool can
-define network topologies with multiple emulated cloud data centers.
-
-The definition is done with a Python API which looks very similar to the
-Mininet API (in fact it is a wrapper for it).
-
-We only specify the topology *between* data centers not within a single
-data center (data center internal setups or placements are not of interest,
-we want to experiment with VNF chains deployed across multiple PoPs).
-
-The original Mininet API has to be completely hidden and not be used by this
-script.
-"""
-import logging
-from mininet.log import setLogLevel
-from dcemulator.net import DCNetwork
-from api.zerorpcapi import ZeroRpcApiEndpoint
-from api.zerorpcapi_DCNetwork import ZeroRpcApiEndpointDCNetwork
-
-logging.basicConfig(level=logging.INFO)
-
-
-def create_topology1():
-    """
-    1. Create a data center network object (DCNetwork)
-    """
-    net = DCNetwork()
-
-    """
-    1b. add a monitoring agent to the DCNetwork
-    """
-    mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
-    mon_api.connectDCNetwork(net)
-    mon_api.start()
-    """
-    2. Add (logical) data centers to the topology
-       (each data center is one "bigswitch" in our simplified
-        first prototype)
-    """
-    dc1 = net.addDatacenter("datacenter1")
-    dc2 = net.addDatacenter("datacenter2")
-    dc3 = net.addDatacenter("long_data_center_name3")
-    dc4 = net.addDatacenter(
-        "datacenter4",
-        metadata={"mydata": "we can also add arbitrary metadata to each DC"})
-
-    """
-    3. You can add additional SDN switches for data center
-       interconnections to the network.
-    """
-    s1 = net.addSwitch("s1")
-
-    """
-    4. Add links between your data centers and additional switches
-       to define you topology.
-       These links can use Mininet's features to limit bw, add delay or jitter.
-    """
-    net.addLink(dc1, dc2)
-    net.addLink("datacenter1", s1)
-    net.addLink(s1, dc3)
-    net.addLink(s1, "datacenter4")
-
-    """
-    5. We want to access and control our data centers from the outside,
-       e.g., we want to connect an orchestrator to start/stop compute
-       resources aka. VNFs (represented by Docker containers in the emulated)
-
-       So we need to instantiate API endpoints (e.g. a zerorpc or REST
-       interface). Depending on the endpoint implementations, we can connect
-       one or more data centers to it, which can then be controlled through
-       this API, e.g., start/stop/list compute instances.
-    """
-    # create a new instance of a endpoint implementation
-    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
-    # connect data centers to this endpoint
-    zapi1.connectDatacenter(dc1)
-    zapi1.connectDatacenter(dc2)
-    zapi1.connectDatacenter(dc3)
-    zapi1.connectDatacenter(dc4)
-    # run API endpoint server (in another thread, don't block)
-    zapi1.start()
-
-    """
-    5.1. For our example, we create a second endpoint to illustrate that
-         this is supported by our design. This feature allows us to have
-         one API endpoint for each data center. This makes the emulation
-         environment more realistic because you can easily create one
-         OpenStack-like REST API endpoint for *each* data center.
-         This will look like a real-world multi PoP/data center deployment
-         from the perspective of an orchestrator.
-    """
-    zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
-    zapi2.connectDatacenter(dc3)
-    zapi2.connectDatacenter(dc4)
-    zapi2.start()
-
-    """
-    6. Finally we are done and can start our network (the emulator).
-       We can also enter the Mininet CLI to interactively interact
-       with our compute resources (just like in default Mininet).
-       But we can also implement fully automated experiments that
-       can be executed again and again.
-    """
-    net.start()
-    net.CLI()
-    # when the user types exit in the CLI, we stop the emulator
-    net.stop()
-
-
-def main():
-    setLogLevel('info')  # set Mininet loglevel
-    create_topology1()
-
-
-if __name__ == '__main__':
-    main()
diff --git a/emuvim/test/__main__.py b/emuvim/test/__main__.py
deleted file mode 100755 (executable)
index f7fa66d..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-import runner
-import os
-
-
-if __name__ == '__main__':
-    thisdir = os.path.dirname( os.path.realpath( __file__ ) )
-    runner.main(thisdir)
diff --git a/emuvim/test/runner.py b/emuvim/test/runner.py
deleted file mode 100755 (executable)
index 469a99e..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Run all tests
- -v : verbose output
- -e : emulator test only (no API tests)
- -a : API tests only
-"""
-
-from unittest import defaultTestLoader, TextTestRunner, TestSuite
-import os
-import sys
-from mininet.util import ensureRoot
-from mininet.clean import cleanup
-from mininet.log import setLogLevel
-
-
-def runTests( testDir, verbosity=1, emuonly=False, apionly=False ):
-    "discover and run all tests in testDir"
-    # ensure inport paths work
-    sys.path.append("%s/.." % testDir)
-    # ensure root and cleanup before starting tests
-    ensureRoot()
-    cleanup()
-    # discover all tests in testDir
-    testSuite = defaultTestLoader.discover( testDir )
-    if emuonly:
-        testSuiteFiltered = [s for s in testSuite if "Emulator" in str(s)]
-        testSuite = TestSuite()
-        testSuite.addTests(testSuiteFiltered)
-    if apionly:
-        testSuiteFiltered = [s for s in testSuite if "Api" in str(s)]
-        testSuite = TestSuite()
-        testSuite.addTests(testSuiteFiltered)
-
-    # run tests
-    TextTestRunner( verbosity=verbosity ).run( testSuite )
-
-
-def main(thisdir):
-    setLogLevel( 'warning' )
-    # get the directory containing example tests
-    vlevel = 2 if '-v' in sys.argv else 1
-    emuonly = ('-e' in sys.argv)
-    apionly = ('-a' in sys.argv)
-    runTests(
-        testDir=thisdir, verbosity=vlevel, emuonly=emuonly, apionly=apionly)
-
-
-if __name__ == '__main__':
-    thisdir = os.path.dirname( os.path.realpath( __file__ ) )
-    main(thisdir)
diff --git a/emuvim/test/test_api_zerorpc.py b/emuvim/test/test_api_zerorpc.py
deleted file mode 100755 (executable)
index 2830872..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#TODO we'll need this at some time. But I'am lazy. A good REST API seems to be more important.
diff --git a/emuvim/test/test_emulator.py b/emuvim/test/test_emulator.py
deleted file mode 100755 (executable)
index ef22a22..0000000
+++ /dev/null
@@ -1,360 +0,0 @@
-"""
-Test suite to automatically test emulator functionalities.
-Directly interacts with the emulator through the Mininet-like
-Python API.
-
-Does not test API endpoints. This is done in separated test suites.
-"""
-
-import unittest
-import os
-import time
-import subprocess
-import docker
-from dcemulator.net import DCNetwork
-from dcemulator.node import EmulatorCompute
-from mininet.node import Host, Controller, OVSSwitch, Docker
-from mininet.link import TCLink
-from mininet.topo import SingleSwitchTopo, LinearTopo
-from mininet.log import setLogLevel
-from mininet.util import quietRun
-from mininet.clean import cleanup
-
-
-class simpleTestTopology( unittest.TestCase ):
-    """
-        Helper class to do basic test setups.
-        s1 -- s2 -- s3 -- ... -- sN
-    """
-
-    def __init__(self, *args, **kwargs):
-        self.net = None
-        self.s = []   # list of switches
-        self.h = []   # list of hosts
-        self.d = []   # list of docker containers
-        self.dc = []  # list of data centers
-        self.docker_cli = None
-        super(simpleTestTopology, self).__init__(*args, **kwargs)
-
-    def createNet(
-            self,
-            nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
-            autolinkswitches=False):
-        """
-        Creates a Mininet instance and automatically adds some
-        nodes to it.
-        """
-        self.net = net = DCNetwork()
-
-        # add some switches
-        for i in range(0, nswitches):
-            self.s.append(self.net.addSwitch('s%d' % i))
-        # if specified, chain all switches
-        if autolinkswitches:
-            for i in range(0, len(self.s) - 1):
-                self.net.addLink(self.s[i], self.s[i + 1])
-        # add some data centers
-        for i in range(0, ndatacenter):
-            self.dc.append(
-                self.net.addDatacenter(
-                    'datacenter%d' % i,
-                    metadata={"unittest_dc": i}))
-        # add some hosts
-        for i in range(0, nhosts):
-            self.h.append(self.net.addHost('h%d' % i))
-        # add some dockers
-        for i in range(0, ndockers):
-            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu"))
-
-    def startNet(self):
-        self.net.start()
-
-    def stopNet(self):
-        self.net.stop()
-
-    def getDockerCli(self):
-        """
-        Helper to interact with local docker instance.
-        """
-        if self.docker_cli is None:
-            self.docker_cli = docker.Client(
-                base_url='unix://var/run/docker.sock')
-        return self.docker_cli
-
-    def getDockernetContainers(self):
-        """
-        List the containers managed by dockernet
-        """
-        return self.getDockerCli().containers(filters={"label": "com.dockernet"})
-
-    @staticmethod
-    def setUp():
-        pass
-
-    @staticmethod
-    def tearDown():
-        cleanup()
-        # make sure that all pending docker containers are killed
-        with open(os.devnull, 'w') as devnull:
-            subprocess.call(
-                "sudo docker rm -f $(sudo docker ps --filter 'label=com.dockernet' -a -q)",
-                stdout=devnull,
-                stderr=devnull,
-                shell=True)
-
-
-#@unittest.skip("disabled topology tests for development")
-class testEmulatorTopology( simpleTestTopology ):
-    """
-    Tests to check the topology API of the emulator.
-    """
-
-    def testSingleDatacenter(self):
-        """
-        Create a single data center and add check if its switch is up
-        by using manually added hosts. Tests especially the
-        data center specific addLink method.
-        """
-        # create network
-        self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0)
-        # setup links
-        self.net.addLink(self.dc[0], self.h[0])
-        self.net.addLink(self.h[1], self.dc[0])
-        # start Mininet network
-        self.startNet()
-        # check number of running nodes
-        assert(len(self.getDockernetContainers()) == 0)
-        assert(len(self.net.hosts) == 2)
-        assert(len(self.net.switches) == 1)
-        # check connectivity by using ping
-        assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
-        # stop Mininet network
-        self.stopNet()
-
-    def testMultipleDatacenterDirect(self):
-        """
-        Create a two data centers and interconnect them.
-        """
-        # create network
-        self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0)
-        # setup links
-        self.net.addLink(self.dc[0], self.h[0])
-        self.net.addLink(self.h[1], self.dc[1])
-        self.net.addLink(self.dc[0], self.dc[1])
-        # start Mininet network
-        self.startNet()
-        # check number of running nodes
-        assert(len(self.getDockernetContainers()) == 0)
-        assert(len(self.net.hosts) == 2)
-        assert(len(self.net.switches) == 2)
-        # check connectivity by using ping
-        assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
-        # stop Mininet network
-        self.stopNet()
-
-    def testMultipleDatacenterWithIntermediateSwitches(self):
-        """
-        Create a two data centers and interconnect them with additional
-        switches between them.
-        """
-        # create network
-        self.createNet(
-            nswitches=3, ndatacenter=2, nhosts=2, ndockers=0,
-            autolinkswitches=True)
-        # setup links
-        self.net.addLink(self.dc[0], self.h[0])
-        self.net.addLink(self.h[1], self.dc[1])
-        self.net.addLink(self.dc[0], self.s[0])
-        self.net.addLink(self.s[2], self.dc[1])
-        # start Mininet network
-        self.startNet()
-        # check number of running nodes
-        assert(len(self.getDockernetContainers()) == 0)
-        assert(len(self.net.hosts) == 2)
-        assert(len(self.net.switches) == 5)
-        # check connectivity by using ping
-        assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
-        # stop Mininet network
-        self.stopNet()
-
-
-#@unittest.skip("disabled compute tests for development")
-class testEmulatorCompute( simpleTestTopology ):
-    """
-    Tests to check the emulator's API to add and remove
-    compute resources at runtime.
-    """
-
-    def testAddSingleComputeSingleDC(self):
-        """
-        Adds a single compute instance to
-        a single DC and checks its connectivity with a
-        manually added host.
-        """
-        # create network
-        self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
-        # setup links
-        self.net.addLink(self.dc[0], self.h[0])
-        # start Mininet network
-        self.startNet()
-        # add compute resources
-        vnf1 = self.dc[0].startCompute("vnf1")
-        # check number of running nodes
-        assert(len(self.getDockernetContainers()) == 1)
-        assert(len(self.net.hosts) == 2)
-        assert(len(self.net.switches) == 1)
-        # check compute list result
-        assert(len(self.dc[0].listCompute()) == 1)
-        assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
-        assert(self.dc[0].listCompute()[0].name == "vnf1")
-        # check connectivity by using ping
-        assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
-        # stop Mininet network
-        self.stopNet()
-
-    def testRemoveSingleComputeSingleDC(self):
-        """
-        Test stop method for compute instances.
-        Check that the instance is really removed.
-        """
-        # create network
-        self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
-        # setup links
-        self.net.addLink(self.dc[0], self.h[0])
-        # start Mininet network
-        self.startNet()
-        # add compute resources
-        vnf1 = self.dc[0].startCompute("vnf1")
-        # check number of running nodes
-        assert(len(self.getDockernetContainers()) == 1)
-        assert(len(self.net.hosts) == 2)
-        assert(len(self.net.switches) == 1)
-        # check compute list result
-        assert(len(self.dc[0].listCompute()) == 1)
-        # check connectivity by using ping
-        assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
-        # remove compute resources
-        self.dc[0].stopCompute("vnf1")
-        # check number of running nodes
-        assert(len(self.getDockernetContainers()) == 0)
-        assert(len(self.net.hosts) == 1)
-        assert(len(self.net.switches) == 1)
-        # check compute list result
-        assert(len(self.dc[0].listCompute()) == 0)
-        # stop Mininet network
-        self.stopNet()
-
-    def testGetStatusSingleComputeSingleDC(self):
-        """
-        Check if the getStatus functionality of EmulatorCompute
-        objects works well.
-        """
-        # create network
-        self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
-        # setup links
-        self.net.addLink(self.dc[0], self.h[0])
-        # start Mininet network
-        self.startNet()
-        # add compute resources
-        vnf1 = self.dc[0].startCompute("vnf1")
-        # check number of running nodes
-        assert(len(self.getDockernetContainers()) == 1)
-        assert(len(self.net.hosts) == 2)
-        assert(len(self.net.switches) == 1)
-        # check compute list result
-        assert(len(self.dc[0].listCompute()) == 1)
-        assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
-        assert(self.dc[0].listCompute()[0].name == "vnf1")
-        # check connectivity by using ping
-        assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
-        # check get status
-        s = self.dc[0].containers.get("vnf1").getStatus()
-        assert(s["name"] == "vnf1")
-        assert(s["state"]["Running"])
-        # stop Mininet network
-        self.stopNet()
-
-    def testConnectivityMultiDC(self):
-        """
-        Test if compute instances started in different data centers
-        are able to talk to each other.
-        """
-        # create network
-        self.createNet(
-            nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
-            autolinkswitches=True)
-        # setup links
-        self.net.addLink(self.dc[0], self.s[0])
-        self.net.addLink(self.dc[1], self.s[2])
-        # start Mininet network
-        self.startNet()
-        # add compute resources
-        vnf1 = self.dc[0].startCompute("vnf1")
-        vnf2 = self.dc[1].startCompute("vnf2")
-        # check number of running nodes
-        assert(len(self.getDockernetContainers()) == 2)
-        assert(len(self.net.hosts) == 2)
-        assert(len(self.net.switches) == 5)
-        # check compute list result
-        assert(len(self.dc[0].listCompute()) == 1)
-        assert(len(self.dc[1].listCompute()) == 1)
-        # check connectivity by using ping
-        assert(self.net.ping([vnf1, vnf2]) <= 0.0)
-        # stop Mininet network
-        self.stopNet()
-
-    def testInterleavedAddRemoveMultiDC(self):
-        """
-        Test multiple, interleaved add and remove operations and ensure
-        that always all expected compute instances are reachable.
-        """
-                # create network
-        self.createNet(
-            nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
-            autolinkswitches=True)
-        # setup links
-        self.net.addLink(self.dc[0], self.s[0])
-        self.net.addLink(self.dc[1], self.s[2])
-        # start Mininet network
-        self.startNet()
-        # add compute resources
-        vnf1 = self.dc[0].startCompute("vnf1")
-        vnf2 = self.dc[1].startCompute("vnf2")
-        # check number of running nodes
-        assert(len(self.getDockernetContainers()) == 2)
-        assert(len(self.net.hosts) == 2)
-        assert(len(self.net.switches) == 5)
-        # check compute list result
-        assert(len(self.dc[0].listCompute()) == 1)
-        assert(len(self.dc[1].listCompute()) == 1)
-        # check connectivity by using ping
-        assert(self.net.ping([vnf1, vnf2]) <= 0.0)
-        # remove compute resources
-        self.dc[0].stopCompute("vnf1")
-        # check number of running nodes
-        assert(len(self.getDockernetContainers()) == 1)
-        assert(len(self.net.hosts) == 1)
-        assert(len(self.net.switches) == 5)
-        # check compute list result
-        assert(len(self.dc[0].listCompute()) == 0)
-        assert(len(self.dc[1].listCompute()) == 1)
-        # add compute resources
-        vnf3 = self.dc[0].startCompute("vnf3")
-        vnf4 = self.dc[0].startCompute("vnf4")
-        # check compute list result
-        assert(len(self.dc[0].listCompute()) == 2)
-        assert(len(self.dc[1].listCompute()) == 1)
-        assert(self.net.ping([vnf3, vnf2]) <= 0.0)
-        assert(self.net.ping([vnf4, vnf2]) <= 0.0)
-        # remove compute resources
-        self.dc[0].stopCompute("vnf3")
-        self.dc[0].stopCompute("vnf4")
-        self.dc[1].stopCompute("vnf2")
-        # check compute list result
-        assert(len(self.dc[0].listCompute()) == 0)
-        assert(len(self.dc[1].listCompute()) == 0)
-        # stop Mininet network
-        self.stopNet()
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/src/emuvim/api/__init__.py b/src/emuvim/api/__init__.py
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/src/emuvim/api/zerorpcapi.py b/src/emuvim/api/zerorpcapi.py
new file mode 100755 (executable)
index 0000000..59b960c
--- /dev/null
@@ -0,0 +1,128 @@
+"""
+Distributed Cloud Emulator (dcemulator)
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
+"""
+
+import logging
+import threading
+import zerorpc
+
+logging.basicConfig(level=logging.INFO)
+
+
+class ZeroRpcApiEndpoint(object):
+    """
+    Simple API endpoint that offers a zerorpc-based
+    interface. This interface will be used by the
+    default command line client.
+    It can be used as a reference to implement
+    REST interfaces providing the same semantics,
+    like e.g. OpenStack compute API.
+    """
+
+    def __init__(self, listenip, port):
+        self.dcs = {}
+        self.ip = listenip
+        self.port = port
+        logging.debug("Created API endpoint %s(%s:%d)" % (
+            self.__class__.__name__, self.ip, self.port))
+
+    def connectDatacenter(self, dc):
+        self.dcs[dc.label] = dc
+        logging.info("Connected DC(%s) to API endpoint %s(%s:%d)" % (
+            dc.label, self.__class__.__name__, self.ip, self.port))
+
+    def start(self):
+        thread = threading.Thread(target=self._api_server_thread, args=())
+        thread.daemon = True
+        thread.start()
+        logging.debug("Started API endpoint %s(%s:%d)" % (
+            self.__class__.__name__, self.ip, self.port))
+
+    def _api_server_thread(self):
+        s = zerorpc.Server(MultiDatacenterApi(self.dcs))
+        s.bind("tcp://%s:%d" % (self.ip, self.port))
+        s.run()
+
+
+class MultiDatacenterApi(object):
+    """
+        Just pass through the corresponding request to the
+        selected data center. Do not implement provisioning
+        logic here because will will have multiple API
+        endpoint implementations at the end.
+    """
+
+    def __init__(self, dcs):
+        self.dcs = dcs
+
+    def compute_action_start(self, dc_label, compute_name, image, command, network):
+        """
+        Start a new compute instance: A docker container
+        :param dc_label: name of the DC
+        :param compute_name: compute container name
+        :param image: image name
+        :param command: command to execute
+        :param network:
+        :return: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
+        """
+        # TODO what to return UUID / given name / internal name ?
+        logging.debug("RPC CALL: compute start")
+        try:
+            c = self.dcs.get(dc_label).startCompute(
+                compute_name, image=image, command=command, network=network)
+            return str(c.name)
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def compute_action_stop(self, dc_label, compute_name):
+        logging.debug("RPC CALL: compute stop")
+        try:
+            return self.dcs.get(dc_label).stopCompute(compute_name)
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def compute_list(self, dc_label):
+        logging.debug("RPC CALL: compute list")
+        try:
+            if dc_label is None:
+                # return list with all compute nodes in all DCs
+                all_containers = []
+                for dc in self.dcs.itervalues():
+                    all_containers += dc.listCompute()
+                return [(c.name, c.getStatus())
+                        for c in all_containers]
+            else:
+                # return list of compute nodes for specified DC
+                return [(c.name, c.getStatus())
+                        for c in self.dcs.get(dc_label).listCompute()]
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def compute_status(self, dc_label, compute_name):
+        logging.debug("RPC CALL: compute status")
+        try:
+            return self.dcs.get(
+                dc_label).containers.get(compute_name).getStatus()
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def datacenter_list(self):
+        logging.debug("RPC CALL: datacenter list")
+        try:
+            return [d.getStatus() for d in self.dcs.itervalues()]
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def datacenter_status(self, dc_label):
+        logging.debug("RPC CALL: datacenter status")
+        try:
+                return self.dcs.get(dc_label).getStatus()
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
diff --git a/src/emuvim/api/zerorpcapi_DCNetwork.py b/src/emuvim/api/zerorpcapi_DCNetwork.py
new file mode 100755 (executable)
index 0000000..27527aa
--- /dev/null
@@ -0,0 +1,104 @@
+"""\r
+Distributed Cloud Emulator (dcemulator)\r
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>\r
+"""\r
+\r
+import logging\r
+import threading\r
+import zerorpc\r
+\r
+\r
+logging.basicConfig(level=logging.INFO)\r
+\r
+\r
+class ZeroRpcApiEndpointDCNetwork(object):\r
+    """\r
+    Simple API endpoint that offers a zerorpc-based\r
+    interface. This interface will be used by the\r
+    default command line client.\r
+    It can be used as a reference to implement\r
+    REST interfaces providing the same semantics,\r
+    like e.g. OpenStack compute API.\r
+    """\r
+\r
+    def __init__(self, listenip, port, DCNetwork=None):\r
+        if DCNetwork :\r
+            self.connectDCNetwork(DCNetwork)\r
+        self.ip = listenip\r
+        self.port = port\r
+        logging.debug("Created monitoring API endpoint %s(%s:%d)" % (\r
+            self.__class__.__name__, self.ip, self.port))\r
+\r
+    def connectDCNetwork(self, net):\r
+        self.net = net\r
+        logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % (\r
+            self.__class__.__name__, self.ip, self.port))\r
+\r
+    def start(self):\r
+        thread = threading.Thread(target=self._api_server_thread, args=())\r
+        thread.daemon = True\r
+        thread.start()\r
+        logging.debug("Started API endpoint %s(%s:%d)" % (\r
+            self.__class__.__name__, self.ip, self.port))\r
+\r
+    def _api_server_thread(self):\r
+        s = zerorpc.Server(DCNetworkApi(self.net))\r
+        s.bind("tcp://%s:%d" % (self.ip, self.port))\r
+        s.run()\r
+\r
+    def stop(self):\r
+        logging.info("Stop the monitoring API endpoint")\r
+        return\r
+\r
+\r
+class DCNetworkApi(object):\r
+    """\r
+        The networking and monitoring commands need the scope of the\r
+        whole DC network to find the requested vnf. So this API is intended\r
+        to work with a DCNetwork.\r
+        Just pass through the corresponding request to the\r
+        selected data center network. Do not implement provisioning\r
+        logic here because will will have multiple API\r
+        endpoint implementations at the end.\r
+    """\r
+\r
+    def __init__(self, net):\r
+        self.net = net\r
+\r
+    def network_action_start(self, vnf_src_name, vnf_dst_name):\r
+        # call DCNetwork method, not really datacenter specific API for now...\r
+        # provided dc name needs to be part of API endpoint\r
+        # no check if vnfs are really connected to this datacenter...\r
+        logging.debug("RPC CALL: network chain start")\r
+        try:\r
+            c = self.net.setChain(\r
+                vnf_src_name, vnf_dst_name)\r
+            return str(c)\r
+        except Exception as ex:\r
+            logging.exception("RPC error.")\r
+            return ex.message\r
+\r
+    def network_action_stop(self, vnf_src_name, vnf_dst_name):\r
+        # call DCNetwork method, not really datacenter specific API for now...\r
+        # provided dc name needs to be part of API endpoint\r
+        # no check if vnfs are really connected to this datacenter...\r
+        logging.debug("RPC CALL: network chain stop")\r
+        try:\r
+            c = self.net.setChain(\r
+                vnf_src_name, vnf_dst_name, cmd='del-flows')\r
+            return c\r
+        except Exception as ex:\r
+            logging.exception("RPC error.")\r
+            return ex.message\r
+\r
+    # get egress(default) or ingress rate of a vnf\r
+    def monitor_get_rate(self, vnf_name, direction):\r
+        logging.debug("RPC CALL: get rate")\r
+        try:\r
+            c = self.net.monitor_agent.get_rate(vnf_name, direction)\r
+            return c\r
+        except Exception as ex:\r
+            logging.exception("RPC error.")\r
+            return ex.message\r
+\r
+\r
diff --git a/src/emuvim/cli/__init__.py b/src/emuvim/cli/__init__.py
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/src/emuvim/cli/compute.py b/src/emuvim/cli/compute.py
new file mode 100755 (executable)
index 0000000..70de20a
--- /dev/null
@@ -0,0 +1,109 @@
+"""
+son-emu compute CLI
+(c) 2016 by Manuel Peuster <manuel.peuster@upb.de>
+"""
+
+import argparse
+import pprint
+from tabulate import tabulate
+import zerorpc
+
+
+pp = pprint.PrettyPrinter(indent=4)
+
+
+class ZeroRpcClient(object):
+
+    def __init__(self):
+        self.c = zerorpc.Client()
+        self.c.connect("tcp://127.0.0.1:4242")  # TODO hard coded for now. we'll change this later
+        self.cmds = {}
+
+    def execute_command(self, args):
+        if getattr(self, args["command"]) is not None:
+            # call the local method with the same name as the command arg
+            getattr(self, args["command"])(args)
+        else:
+            print "Command not implemented."
+
+    def start(self, args):
+        nw_list = list()
+        if args.get("network") is not None:
+            networks = args.get("network").split(",")
+            for nw in networks:
+                nw_list.append({"ip": nw})
+        r = self.c.compute_action_start(
+            args.get("datacenter"),
+            args.get("name"),
+            args.get("image"),
+            args.get("docker_command"),
+            nw_list)
+        pp.pprint(r)
+
+    def stop(self, args):
+        r = self.c.compute_action_stop(
+            args.get("datacenter"), args.get("name"))
+        pp.pprint(r)
+
+    def list(self, args):
+        r = self.c.compute_list(
+            args.get("datacenter"))
+        table = []
+        for c in r:
+            # for each container add a line to the output table
+            if len(c) > 1:
+                name = c[0]
+                status = c[1]
+                eth0ip = None
+                eth0status = "down"
+                if len(status.get("network")) > 0:
+                    eth0ip = status.get("network")[0][1]
+                    eth0status = "up" if status.get(
+                        "network")[0][3] else "down"
+                table.append([status.get("datacenter"),
+                              name,
+                              status.get("image"),
+                              eth0ip,
+                              eth0status,
+                              status.get("state").get("Status")])
+        headers = ["Datacenter",
+                   "Container",
+                   "Image",
+                   "eth0 IP",
+                   "eth0 status",
+                   "Status"]
+        print tabulate(table, headers=headers, tablefmt="grid")
+
+    def status(self, args):
+        r = self.c.compute_status(
+            args.get("datacenter"), args.get("name"))
+        pp.pprint(r)
+
+
+parser = argparse.ArgumentParser(description='son-emu compute')
+parser.add_argument(
+    "command",
+    choices=['start', 'stop', 'list', 'status'],
+    help="Action to be executed.")
+parser.add_argument(
+    "--datacenter", "-d", dest="datacenter",
+    help="Data center to in which the compute instance should be executed")
+parser.add_argument(
+    "--name", "-n", dest="name",
+    help="Name of compute instance e.g. 'vnf1'")
+parser.add_argument(
+    "--image","-i", dest="image",
+    help="Name of container image to be used e.g. 'ubuntu'")
+parser.add_argument(
+    "--dcmd", "-c", dest="docker_command",
+    help="Startup command of the container e.g. './start.sh'")
+parser.add_argument(
+    "--net", dest="network",
+    help="Network properties of compute instance e.g. \
+          '10.0.0.123/8' or '10.0.0.123/8,11.0.0.123/24' for multiple interfaces.")
+
+
+def main(argv):
+    args = vars(parser.parse_args(argv))
+    c = ZeroRpcClient()
+    c.execute_command(args)
diff --git a/src/emuvim/cli/datacenter.py b/src/emuvim/cli/datacenter.py
new file mode 100755 (executable)
index 0000000..c3850fc
--- /dev/null
@@ -0,0 +1,66 @@
+"""
+son-emu datacenter CLI
+(c) 2016 by Manuel Peuster <manuel.peuster@upb.de>
+"""
+
+import argparse
+import pprint
+from tabulate import tabulate
+import zerorpc
+
+
+pp = pprint.PrettyPrinter(indent=4)
+
+
+class ZeroRpcClient(object):
+
+    def __init__(self):
+        self.c = zerorpc.Client()
+        self.c.connect("tcp://127.0.0.1:4242")  # TODO hard coded for now. we'll change this later
+        self.cmds = {}
+
+    def execute_command(self, args):
+        if getattr(self, args["command"]) is not None:
+            # call the local method with the same name as the command arg
+            getattr(self, args["command"])(args)
+        else:
+            print "Command not implemented."
+
+    def list(self, args):
+        r = self.c.datacenter_list()
+        table = []
+        for d in r:
+            # for each dc add a line to the output table
+            if len(d) > 0:
+                table.append([d.get("label"),
+                              d.get("internalname"),
+                              d.get("switch"),
+                              d.get("n_running_containers"),
+                              len(d.get("metadata"))])
+        headers = ["Label",
+                   "Internal Name",
+                   "Switch",
+                   "# Containers",
+                   "# Metadata Items"]
+        print tabulate(table, headers=headers, tablefmt="grid")
+
+    def status(self, args):
+        r = self.c.datacenter_status(
+            args.get("datacenter"))
+        pp.pprint(r)
+
+
+parser = argparse.ArgumentParser(description='son-emu datacenter')
+parser.add_argument(
+    "command",
+    choices=['list', 'status'],
+    help="Action to be executed.")
+parser.add_argument(
+    "--datacenter", "-d", dest="datacenter",
+    help="Data center to which the command should be applied.")
+
+
+def main(argv):
+    args = vars(parser.parse_args(argv))
+    c = ZeroRpcClient()
+    c.execute_command(args)
diff --git a/src/emuvim/cli/monitor.py b/src/emuvim/cli/monitor.py
new file mode 100755 (executable)
index 0000000..6885a3c
--- /dev/null
@@ -0,0 +1,53 @@
+"""\r
+son-emu network CLI\r
+(c) 2016 by Manuel Peuster <manuel.peuster@upb.de>\r
+"""\r
+\r
+import argparse\r
+import pprint\r
+from tabulate import tabulate\r
+import zerorpc\r
+\r
+\r
+pp = pprint.PrettyPrinter(indent=4)\r
+\r
+class ZeroRpcClient(object):\r
+\r
+    def __init__(self):\r
+        self.c = zerorpc.Client()\r
+        # TODO connect to DCNetwork API\r
+        #self.c.connect("tcp://127.0.0.1:4242")  # TODO hard coded for now. we'll change this later\r
+        self.c.connect("tcp://127.0.0.1:5151")\r
+        self.cmds = {}\r
+\r
+    def execute_command(self, args):\r
+        if getattr(self, args["command"]) is not None:\r
+            # call the local method with the same name as the command arg\r
+            getattr(self, args["command"])(args)\r
+        else:\r
+            print "Command not implemented."\r
+\r
+    def get_rate(self, args):\r
+        r = self.c.monitor_get_rate(\r
+            args.get("vnf_name"),\r
+            args.get("direction"))\r
+        pp.pprint(r)\r
+\r
+\r
+parser = argparse.ArgumentParser(description='son-emu network')\r
+parser.add_argument(\r
+    "command",\r
+    help="Action to be executed: get_rate")\r
+parser.add_argument(\r
+    "--vnf_name", "-vnf", dest="vnf_name",\r
+    help="vnf name to be monitored")\r
+parser.add_argument(\r
+    "--direction", "-d", dest="direction",\r
+    help="in (ingress rate) or out (egress rate)")\r
+\r
+def main(argv):\r
+    print "This is the son-emu monitor CLI."\r
+    print "Arguments: %s" % str(argv)\r
+    args = vars(parser.parse_args(argv))\r
+    c = ZeroRpcClient()\r
+    c.execute_command(args)\r
diff --git a/src/emuvim/cli/network.py b/src/emuvim/cli/network.py
new file mode 100755 (executable)
index 0000000..8d4219b
--- /dev/null
@@ -0,0 +1,62 @@
+"""\r
+son-emu network CLI\r
+(c) 2016 by Manuel Peuster <manuel.peuster@upb.de>\r
+"""\r
+\r
+import argparse\r
+import pprint\r
+from tabulate import tabulate\r
+import zerorpc\r
+\r
+\r
+pp = pprint.PrettyPrinter(indent=4)\r
+\r
+class ZeroRpcClient(object):\r
+\r
+    def __init__(self):\r
+        self.c = zerorpc.Client()\r
+        # TODO connect to DCNetwork API\r
+        #self.c.connect("tcp://127.0.0.1:4242")  # TODO hard coded for now. we'll change this later\r
+        self.c.connect("tcp://127.0.0.1:5151")\r
+        self.cmds = {}\r
+\r
+    def execute_command(self, args):\r
+        if getattr(self, args["command"]) is not None:\r
+            # call the local method with the same name as the command arg\r
+            getattr(self, args["command"])(args)\r
+        else:\r
+            print "Command not implemented."\r
+\r
+    def add(self, args):\r
+        r = self.c.network_action_start(\r
+            #args.get("datacenter"),\r
+            args.get("source"),\r
+            args.get("destination"))\r
+        pp.pprint(r)\r
+\r
+    def remove(self, args):\r
+        r = self.c.network_action_stop(\r
+            #args.get("datacenter"),\r
+            args.get("source"),\r
+            args.get("destination"))\r
+        pp.pprint(r)\r
+\r
+\r
+parser = argparse.ArgumentParser(description='son-emu network')\r
+parser.add_argument(\r
+    "command",\r
+    help="Action to be executed: add|remove")\r
+parser.add_argument(\r
+    "--datacenter", "-d", dest="datacenter",\r
+    help="Data center to in which the network action should be initiated")\r
+parser.add_argument(\r
+    "--source", "-src", dest="source",\r
+    help="vnf name of the source of the chain")\r
+parser.add_argument(\r
+    "--destination", "-dst", dest="destination",\r
+    help="vnf name of the destination of the chain")\r
+\r
+def main(argv):\r
+    args = vars(parser.parse_args(argv))\r
+    c = ZeroRpcClient()\r
+    c.execute_command(args)\r
diff --git a/src/emuvim/cli/son-emu-cli b/src/emuvim/cli/son-emu-cli
new file mode 100755 (executable)
index 0000000..61cbd43
--- /dev/null
@@ -0,0 +1,35 @@
+#!/usr/bin/python
+"""
+ Simple CLI client to interact with a running emulator.
+
+ (c) 2016 by Manuel Peuster <manuel.peuster@upb.de>
+
+ The CLI offers different tools, e.g., compute, network, ...
+ Each of these tools is implemented as an independent Python
+ module.
+
+ cli compute start dc1 my_name flavor_a
+ cli network create dc1 11.0.0.0/24
+"""
+
+import sys
+import compute
+import network
+import datacenter
+import monitor
+
+def main():
+    if len(sys.argv) < 2:
+        print "Usage: son-emu-cli <toolname> <arguments>"
+        exit(0)
+    if sys.argv[1] == "compute":
+        compute.main(sys.argv[2:])
+    elif sys.argv[1] == "network":
+        network.main(sys.argv[2:])
+    elif sys.argv[1] == "datacenter":
+        datacenter.main(sys.argv[2:])
+    elif sys.argv[1] == "monitor":
+        monitor.main(sys.argv[2:])
+
+if __name__ == '__main__':
+    main()
diff --git a/src/emuvim/dcemulator/__init__.py b/src/emuvim/dcemulator/__init__.py
new file mode 100755 (executable)
index 0000000..64f6616
--- /dev/null
@@ -0,0 +1,4 @@
+"""
+Distributed Cloud Emulator (dcemulator)
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
+"""
\ No newline at end of file
diff --git a/src/emuvim/dcemulator/link.py b/src/emuvim/dcemulator/link.py
new file mode 100755 (executable)
index 0000000..e69de29
diff --git a/src/emuvim/dcemulator/monitoring.py b/src/emuvim/dcemulator/monitoring.py
new file mode 100755 (executable)
index 0000000..094c09b
--- /dev/null
@@ -0,0 +1,62 @@
+__author__ = 'Administrator'\r
+\r
+import urllib2\r
+import logging\r
+from mininet.node import  OVSSwitch\r
+import ast\r
+logging.basicConfig(level=logging.INFO)\r
+\r
+"""\r
+class to read openflow stats from the Ryu controller of the DCNEtwork\r
+"""\r
+\r
+class DCNetworkMonitor():\r
+    def __init__(self, net):\r
+        self.net = net\r
+        # link to REST_API\r
+        self.ip = '0.0.0.0'\r
+        self.port = '8080'\r
+        self.REST_api = 'http://{0}:{1}'.format(self.ip,self.port)\r
+\r
+\r
+    def get_rate(self, vnf_name, direction='tx'):\r
+        try:\r
+            vnf_switch = self.net.DCNetwork_graph.neighbors(str(vnf_name))\r
+\r
+            if len(vnf_switch) > 1:\r
+                logging.info("vnf: {0} has multiple ports".format(vnf_name))\r
+                return\r
+            elif len(vnf_switch) == 0:\r
+                logging.info("vnf: {0} is not connected".format(vnf_name))\r
+                return\r
+            else:\r
+                vnf_switch = vnf_switch[0]\r
+            next_node = self.net.getNodeByName(vnf_switch)\r
+\r
+            if not isinstance( next_node, OVSSwitch ):\r
+                logging.info("vnf: {0} is not connected to switch".format(vnf_name))\r
+                return\r
+\r
+            mon_port = self.net.DCNetwork_graph[vnf_name][vnf_switch]['dst_port']\r
+            switch_dpid = x = int(str(next_node.dpid),16)\r
+\r
+            ret = self.REST_cmd('stats/port', switch_dpid)\r
+            port_stat_dict = ast.literal_eval(ret)\r
+            for port_stat in port_stat_dict[str(switch_dpid)]:\r
+                if port_stat['port_no'] == mon_port:\r
+                    return port_stat\r
+                    break\r
+\r
+            return ret\r
+\r
+        except Exception as ex:\r
+            logging.exception("get_txrate error.")\r
+            return ex.message\r
+\r
+\r
+\r
+    def REST_cmd(self, prefix, dpid):\r
+        url = self.REST_api + '/' + str(prefix) + '/' + str(dpid)\r
+        req = urllib2.Request(url)\r
+        ret = urllib2.urlopen(req).read()\r
+        return ret
\ No newline at end of file
diff --git a/src/emuvim/dcemulator/net.py b/src/emuvim/dcemulator/net.py
new file mode 100755 (executable)
index 0000000..20ab33a
--- /dev/null
@@ -0,0 +1,206 @@
+"""
+Distributed Cloud Emulator (dcemulator)
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
+"""
+import logging
+
+import site
+from subprocess import Popen
+import os
+
+from mininet.net import Dockernet
+from mininet.node import Controller, OVSSwitch, OVSKernelSwitch, Switch, Docker, Host, RemoteController
+from mininet.cli import CLI
+from mininet.log import setLogLevel, info, debug
+from mininet.link import TCLink, Link
+import networkx as nx
+from monitoring import DCNetworkMonitor
+
+from node import Datacenter, EmulatorCompute
+
+
+class DCNetwork(Dockernet):
+    """
+    Wraps the original Mininet/Dockernet class and provides
+    methods to add data centers, switches, etc.
+
+    This class is used by topology definition scripts.
+    """
+
+    def __init__(self, **kwargs):
+        self.dcs = {}
+        # create a Mininet/Dockernet network
+        # call original Docker.__init__ and setup default controller
+        #Dockernet.__init__(
+        #    self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
+        Dockernet.__init__(
+            self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
+        self.addController('c0', controller=RemoteController)
+
+        # graph of the complete DC network
+        self.DCNetwork_graph=nx.DiGraph()
+
+        # monitoring agent
+        self.monitor_agent = DCNetworkMonitor(self)
+
+        # start Ryu controller
+        self.startRyu()
+
+
+    def addDatacenter(self, label, metadata={}):
+        """
+        Create and add a logical cloud data center to the network.
+        """
+        if label in self.dcs:
+            raise Exception("Data center label already exists: %s" % label)
+        dc = Datacenter(label, metadata=metadata)
+        dc.net = self  # set reference to network
+        self.dcs[label] = dc
+        dc.create()  # finally create the data center in our Mininet instance
+        logging.info("added data center: %s" % label)
+        return dc
+
+    def addLink(self, node1, node2, **params):
+        """
+        Able to handle Datacenter objects as link
+        end points.
+        """
+        assert node1 is not None
+        assert node2 is not None
+        logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
+        # ensure type of node1
+        if isinstance( node1, basestring ):
+            if node1 in self.dcs:
+                node1 = self.dcs[node1].switch
+        if isinstance( node1, Datacenter ):
+            node1 = node1.switch
+        # ensure type of node2
+        if isinstance( node2, basestring ):
+            if node2 in self.dcs:
+                node2 = self.dcs[node2].switch
+        if isinstance( node2, Datacenter ):
+            node2 = node2.switch
+        # try to give containers a default IP
+        if isinstance( node1, Docker ):
+            if not "params1" in params:
+                params["params1"] = {}
+            if not "ip" in params["params1"]:
+                params["params1"]["ip"] = self.getNextIp()
+        if isinstance( node2, Docker ):
+            if not "params2" in params:
+                params["params2"] = {}
+            if not "ip" in params["params2"]:
+                params["params2"]["ip"] = self.getNextIp()
+
+        link = Dockernet.addLink(self, node1, node2, **params)  # TODO we need TCLinks with user defined performance here
+
+        # add edge and assigned port number to graph in both directions between node1 and node2
+        self.DCNetwork_graph.add_edge(node1.name, node2.name, \
+                                      {'src_port': node1.ports[link.intf1], 'dst_port': node2.ports[link.intf2]})
+        self.DCNetwork_graph.add_edge(node2.name, node1.name, \
+                                       {'src_port': node2.ports[link.intf2], 'dst_port': node1.ports[link.intf1]})
+
+        return link
+
+    def addDocker( self, label, **params ):
+        """
+        Wrapper for addDocker method to use custom container class.
+        """
+        self.DCNetwork_graph.add_node(label)
+        return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
+
+    def removeDocker( self, label, **params ):
+        """
+        Wrapper for removeDocker method to update graph.
+        """
+        self.DCNetwork_graph.remove_node(label)
+        return Dockernet.removeDocker(self, label, **params)
+
+    def addSwitch( self, name, add_to_graph=True, **params ):
+        """
+        Wrapper for addSwitch method to store switch also in graph.
+        """
+        if add_to_graph:
+            self.DCNetwork_graph.add_node(name)
+        return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
+
+    def getAllContainers(self):
+        """
+        Returns a list with all containers within all data centers.
+        """
+        all_containers = []
+        for dc in self.dcs.itervalues():
+            all_containers += dc.listCompute()
+        return all_containers
+
+    def start(self):
+        # start
+        for dc in self.dcs.itervalues():
+            dc.start()
+        Dockernet.start(self)
+
+    def stop(self):
+        # stop Ryu controller
+        self.ryu_process.terminate()
+        #self.ryu_process.kill()
+        Dockernet.stop(self)
+
+    def CLI(self):
+        CLI(self)
+
+    # to remove chain do setChain( src, dst, cmd='del-flows')
+    def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'):
+        # get shortest path
+        path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
+        logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
+
+        current_hop = vnf_src_name
+        for i in range(0,len(path)):
+            next_hop = path[path.index(current_hop)+1]
+            next_node = self.getNodeByName(next_hop)
+
+            if next_hop == vnf_dst_name:
+                return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
+            elif not isinstance( next_node, OVSSwitch ):
+                logging.info("Next node: {0} is not a switch".format(next_hop))
+                return "Next node: {0} is not a switch".format(next_hop)
+
+
+            switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port']
+            next2_hop = path[path.index(current_hop)+2]
+            switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port']
+
+            logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport))
+            # set of entry via ovs-ofctl
+            # TODO use rest API of ryu to set flow entries to correct witch dpid
+            if isinstance( next_node, OVSSwitch ):
+                match = 'in_port=%s' % switch_inport
+
+                if cmd=='add-flow':
+                    action = 'action=%s' % switch_outport
+                    s = ','
+                    ofcmd = s.join([match,action])
+                elif cmd=='del-flows':
+                    ofcmd = match
+                else:
+                    ofcmd=''
+
+                next_node.dpctl(cmd, ofcmd)
+
+            current_hop = next_hop
+
+        return "destination node: {0} not reached".format(vnf_dst_name)
+
+    # start Ryu Openflow controller as Remote Controller for the DCNetwork
+    def startRyu(self):
+        # start Ryu controller with rest-API
+        python_install_path = site.getsitepackages()[0]
+        ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
+        ryu_path2 =  python_install_path + '/ryu/app/ofctl_rest.py'
+        # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
+        # Ryu still uses 6633 as default
+        ryu_option = '--ofp-tcp-listen-port'
+        ryu_of_port = '6653'
+        ryu_cmd =  'ryu-manager'
+        FNULL = open(os.devnull, 'w')
+        self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
\ No newline at end of file
diff --git a/src/emuvim/dcemulator/node.py b/src/emuvim/dcemulator/node.py
new file mode 100755 (executable)
index 0000000..336126c
--- /dev/null
@@ -0,0 +1,167 @@
+"""
+Distributed Cloud Emulator (dcemulator)
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
+"""
+from mininet.node import Docker
+import logging
+
+
+DCDPID_BASE = 1000  # start of switch dpid's used for data center switches
+
+
+class EmulatorCompute(Docker):
+    """
+    Emulator specific compute node class.
+    Inherits from Dockernet's Docker host class.
+    Represents a single container connected to a (logical)
+    data center.
+    We can add emulator specific helper functions to it.
+    """
+
+    def __init__(
+            self, name, dimage, **kwargs):
+        logging.debug("Create EmulatorCompute instance: %s" % name)
+        self.datacenter = None  # pointer to current DC
+
+        # call original Docker.__init__
+        Docker.__init__(self, name, dimage, **kwargs)
+
+    def getNetworkStatus(self):
+        """
+        Helper method to receive information about the virtual networks
+        this compute instance is connected to.
+        """
+        # format list of tuples (name, Ip, MAC, isUp, status)
+        return [(str(i), i.IP(), i.MAC(), i.isUp(), i.status())
+                for i in self.intfList()]
+
+    def getStatus(self):
+        """
+        Helper method to receive information about this compute instance.
+        """
+        status = {}
+        status["name"] = self.name
+        status["network"] = self.getNetworkStatus()
+        status["image"] = self.dimage
+        status["cpu_quota"] = self.cpu_quota
+        status["cpu_period"] = self.cpu_period
+        status["cpu_shares"] = self.cpu_shares
+        status["cpuset"] = self.cpuset
+        status["mem_limit"] = self.mem_limit
+        status["memswap_limit"] = self.memswap_limit
+        status["state"] = self.dcli.inspect_container(self.dc)["State"]
+        status["id"] = self.dcli.inspect_container(self.dc)["Id"]
+        status["datacenter"] = (None if self.datacenter is None
+                                else self.datacenter.label)
+        return status
+
+
+class Datacenter(object):
+    """
+    Represents a logical data center to which compute resources
+    (Docker containers) can be added at runtime.
+
+    Will also implement resource bookkeeping in later versions.
+    """
+
+    DC_COUNTER = 1
+
+    def __init__(self, label, metadata={}):
+        self.net = None  # DCNetwork to which we belong
+        # each node (DC) has a short internal name used by Mininet
+        # this is caused by Mininets naming limitations for swtiches etc.
+        self.name = "dc%d" % Datacenter.DC_COUNTER
+        Datacenter.DC_COUNTER += 1
+        # use this for user defined names that can be longer than self.name
+        self.label = label  
+        # dict to store arbitrary metadata (e.g. latitude and longitude)
+        self.metadata = metadata
+        self.switch = None  # first prototype assumes one "bigswitch" per DC
+        self.containers = {}  # keep track of running containers
+
+    def _get_next_dc_dpid(self):
+        global DCDPID_BASE
+        DCDPID_BASE += 1
+        return DCDPID_BASE
+
+    def create(self):
+        """
+        Each data center is represented by a single switch to which
+        compute resources can be connected at run time.
+
+        TODO: This will be changed in the future to support multiple networks
+        per data center
+        """
+        self.switch = self.net.addSwitch(
+            "%s.s1" % self.name, dpid=hex(self._get_next_dc_dpid())[2:])
+        logging.debug("created data center switch: %s" % str(self.switch))
+
+    def start(self):
+        pass
+
+    def startCompute(self, name, image=None, command=None, network=None):
+        """
+        Create a new container as compute resource and connect it to this
+        data center.
+        :param name: name (string)
+        :param image: image name (string)
+        :param command: command (string)
+        :param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
+        :return:
+        """
+        assert name is not None
+        # no duplications
+        if name in [c.name for c in self.net.getAllContainers()]:
+            raise Exception("Container with name %s already exists." % name)
+        # set default parameter
+        if image is None:
+            image = "ubuntu"
+        if network is None:
+            network = {}  # {"ip": "10.0.0.254/8"}
+        if isinstance(network, dict):
+            network = [network]  # if we have only one network, put it in a list
+        if isinstance(network, list):
+            if len(network) < 1:
+                network.append({})
+
+        # create the container
+        d = self.net.addDocker("%s" % (name), dimage=image, dcmd=command)
+        # connect all given networks
+        for nw in network:
+            self.net.addLink(d, self.switch, params1=nw)
+        # do bookkeeping
+        self.containers[name] = d
+        d.datacenter = self
+        return d  # we might use UUIDs for naming later on
+
+    def stopCompute(self, name):
+        """
+        Stop and remove a container from this data center.
+        """
+        assert name is not None
+        if name not in self.containers:
+            raise Exception("Container with name %s not found." % name)
+        self.net.removeLink(
+            link=None, node1=self.containers[name], node2=self.switch)
+        self.net.removeDocker("%s" % (name))
+        del self.containers[name]
+        return True
+
+    def listCompute(self):
+        """
+        Return a list of all running containers assigned to this
+        data center.
+        """
+        return list(self.containers.itervalues())
+
+    def getStatus(self):
+        """
+        Return a dict with status information about this DC.
+        """
+        return {
+            "label": self.label,
+            "internalname": self.name,
+            "switch": self.switch.name,
+            "n_running_containers": len(self.containers),
+            "metadata": self.metadata
+        }
diff --git a/src/emuvim/example_topology.py b/src/emuvim/example_topology.py
new file mode 100755 (executable)
index 0000000..eba751c
--- /dev/null
@@ -0,0 +1,121 @@
+"""
+This is an example topology for the distributed cloud emulator (dcemulator).
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
+
+
+This is an example that shows how a user of the emulation tool can
+define network topologies with multiple emulated cloud data centers.
+
+The definition is done with a Python API which looks very similar to the
+Mininet API (in fact it is a wrapper for it).
+
+We only specify the topology *between* data centers not within a single
+data center (data center internal setups or placements are not of interest,
+we want to experiment with VNF chains deployed across multiple PoPs).
+
+The original Mininet API has to be completely hidden and not be used by this
+script.
+"""
+import logging
+from mininet.log import setLogLevel
+from dcemulator.net import DCNetwork
+from api.zerorpcapi import ZeroRpcApiEndpoint
+from api.zerorpcapi_DCNetwork import ZeroRpcApiEndpointDCNetwork
+
+logging.basicConfig(level=logging.INFO)
+
+
+def create_topology1():
+    """
+    1. Create a data center network object (DCNetwork)
+    """
+    net = DCNetwork()
+
+    """
+    1b. add a monitoring agent to the DCNetwork
+    """
+    mon_api = ZeroRpcApiEndpointDCNetwork("0.0.0.0", 5151)
+    mon_api.connectDCNetwork(net)
+    mon_api.start()
+    """
+    2. Add (logical) data centers to the topology
+       (each data center is one "bigswitch" in our simplified
+        first prototype)
+    """
+    dc1 = net.addDatacenter("datacenter1")
+    dc2 = net.addDatacenter("datacenter2")
+    dc3 = net.addDatacenter("long_data_center_name3")
+    dc4 = net.addDatacenter(
+        "datacenter4",
+        metadata={"mydata": "we can also add arbitrary metadata to each DC"})
+
+    """
+    3. You can add additional SDN switches for data center
+       interconnections to the network.
+    """
+    s1 = net.addSwitch("s1")
+
+    """
+    4. Add links between your data centers and additional switches
+       to define you topology.
+       These links can use Mininet's features to limit bw, add delay or jitter.
+    """
+    net.addLink(dc1, dc2)
+    net.addLink("datacenter1", s1)
+    net.addLink(s1, dc3)
+    net.addLink(s1, "datacenter4")
+
+    """
+    5. We want to access and control our data centers from the outside,
+       e.g., we want to connect an orchestrator to start/stop compute
+       resources aka. VNFs (represented by Docker containers in the emulated)
+
+       So we need to instantiate API endpoints (e.g. a zerorpc or REST
+       interface). Depending on the endpoint implementations, we can connect
+       one or more data centers to it, which can then be controlled through
+       this API, e.g., start/stop/list compute instances.
+    """
+    # create a new instance of a endpoint implementation
+    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
+    # connect data centers to this endpoint
+    zapi1.connectDatacenter(dc1)
+    zapi1.connectDatacenter(dc2)
+    zapi1.connectDatacenter(dc3)
+    zapi1.connectDatacenter(dc4)
+    # run API endpoint server (in another thread, don't block)
+    zapi1.start()
+
+    """
+    5.1. For our example, we create a second endpoint to illustrate that
+         this is supported by our design. This feature allows us to have
+         one API endpoint for each data center. This makes the emulation
+         environment more realistic because you can easily create one
+         OpenStack-like REST API endpoint for *each* data center.
+         This will look like a real-world multi PoP/data center deployment
+         from the perspective of an orchestrator.
+    """
+    zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
+    zapi2.connectDatacenter(dc3)
+    zapi2.connectDatacenter(dc4)
+    zapi2.start()
+
+    """
+    6. Finally we are done and can start our network (the emulator).
+       We can also enter the Mininet CLI to interactively interact
+       with our compute resources (just like in default Mininet).
+       But we can also implement fully automated experiments that
+       can be executed again and again.
+    """
+    net.start()
+    net.CLI()
+    # when the user types exit in the CLI, we stop the emulator
+    net.stop()
+
+
+def main():
+    setLogLevel('info')  # set Mininet loglevel
+    create_topology1()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/src/emuvim/test/__main__.py b/src/emuvim/test/__main__.py
new file mode 100755 (executable)
index 0000000..f7fa66d
--- /dev/null
@@ -0,0 +1,7 @@
+import runner
+import os
+
+
+if __name__ == '__main__':
+    thisdir = os.path.dirname( os.path.realpath( __file__ ) )
+    runner.main(thisdir)
diff --git a/src/emuvim/test/runner.py b/src/emuvim/test/runner.py
new file mode 100755 (executable)
index 0000000..469a99e
--- /dev/null
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+"""
+Run all tests
+ -v : verbose output
+ -e : emulator test only (no API tests)
+ -a : API tests only
+"""
+
+from unittest import defaultTestLoader, TextTestRunner, TestSuite
+import os
+import sys
+from mininet.util import ensureRoot
+from mininet.clean import cleanup
+from mininet.log import setLogLevel
+
+
+def runTests( testDir, verbosity=1, emuonly=False, apionly=False ):
+    "discover and run all tests in testDir"
+    # ensure inport paths work
+    sys.path.append("%s/.." % testDir)
+    # ensure root and cleanup before starting tests
+    ensureRoot()
+    cleanup()
+    # discover all tests in testDir
+    testSuite = defaultTestLoader.discover( testDir )
+    if emuonly:
+        testSuiteFiltered = [s for s in testSuite if "Emulator" in str(s)]
+        testSuite = TestSuite()
+        testSuite.addTests(testSuiteFiltered)
+    if apionly:
+        testSuiteFiltered = [s for s in testSuite if "Api" in str(s)]
+        testSuite = TestSuite()
+        testSuite.addTests(testSuiteFiltered)
+
+    # run tests
+    TextTestRunner( verbosity=verbosity ).run( testSuite )
+
+
+def main(thisdir):
+    setLogLevel( 'warning' )
+    # get the directory containing example tests
+    vlevel = 2 if '-v' in sys.argv else 1
+    emuonly = ('-e' in sys.argv)
+    apionly = ('-a' in sys.argv)
+    runTests(
+        testDir=thisdir, verbosity=vlevel, emuonly=emuonly, apionly=apionly)
+
+
+if __name__ == '__main__':
+    thisdir = os.path.dirname( os.path.realpath( __file__ ) )
+    main(thisdir)
diff --git a/src/emuvim/test/test_api_zerorpc.py b/src/emuvim/test/test_api_zerorpc.py
new file mode 100755 (executable)
index 0000000..2830872
--- /dev/null
@@ -0,0 +1 @@
+#TODO we'll need this at some time. But I'am lazy. A good REST API seems to be more important.
diff --git a/src/emuvim/test/test_emulator.py b/src/emuvim/test/test_emulator.py
new file mode 100755 (executable)
index 0000000..ef22a22
--- /dev/null
@@ -0,0 +1,360 @@
+"""
+Test suite to automatically test emulator functionalities.
+Directly interacts with the emulator through the Mininet-like
+Python API.
+
+Does not test API endpoints. This is done in separated test suites.
+"""
+
+import unittest
+import os
+import time
+import subprocess
+import docker
+from dcemulator.net import DCNetwork
+from dcemulator.node import EmulatorCompute
+from mininet.node import Host, Controller, OVSSwitch, Docker
+from mininet.link import TCLink
+from mininet.topo import SingleSwitchTopo, LinearTopo
+from mininet.log import setLogLevel
+from mininet.util import quietRun
+from mininet.clean import cleanup
+
+
+class simpleTestTopology( unittest.TestCase ):
+    """
+        Helper class to do basic test setups.
+        s1 -- s2 -- s3 -- ... -- sN
+    """
+
+    def __init__(self, *args, **kwargs):
+        self.net = None
+        self.s = []   # list of switches
+        self.h = []   # list of hosts
+        self.d = []   # list of docker containers
+        self.dc = []  # list of data centers
+        self.docker_cli = None
+        super(simpleTestTopology, self).__init__(*args, **kwargs)
+
+    def createNet(
+            self,
+            nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
+            autolinkswitches=False):
+        """
+        Creates a Mininet instance and automatically adds some
+        nodes to it.
+        """
+        self.net = net = DCNetwork()
+
+        # add some switches
+        for i in range(0, nswitches):
+            self.s.append(self.net.addSwitch('s%d' % i))
+        # if specified, chain all switches
+        if autolinkswitches:
+            for i in range(0, len(self.s) - 1):
+                self.net.addLink(self.s[i], self.s[i + 1])
+        # add some data centers
+        for i in range(0, ndatacenter):
+            self.dc.append(
+                self.net.addDatacenter(
+                    'datacenter%d' % i,
+                    metadata={"unittest_dc": i}))
+        # add some hosts
+        for i in range(0, nhosts):
+            self.h.append(self.net.addHost('h%d' % i))
+        # add some dockers
+        for i in range(0, ndockers):
+            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu"))
+
+    def startNet(self):
+        self.net.start()
+
+    def stopNet(self):
+        self.net.stop()
+
+    def getDockerCli(self):
+        """
+        Helper to interact with local docker instance.
+        """
+        if self.docker_cli is None:
+            self.docker_cli = docker.Client(
+                base_url='unix://var/run/docker.sock')
+        return self.docker_cli
+
+    def getDockernetContainers(self):
+        """
+        List the containers managed by dockernet
+        """
+        return self.getDockerCli().containers(filters={"label": "com.dockernet"})
+
+    @staticmethod
+    def setUp():
+        pass
+
+    @staticmethod
+    def tearDown():
+        cleanup()
+        # make sure that all pending docker containers are killed
+        with open(os.devnull, 'w') as devnull:
+            subprocess.call(
+                "sudo docker rm -f $(sudo docker ps --filter 'label=com.dockernet' -a -q)",
+                stdout=devnull,
+                stderr=devnull,
+                shell=True)
+
+
+#@unittest.skip("disabled topology tests for development")
+class testEmulatorTopology( simpleTestTopology ):
+    """
+    Tests to check the topology API of the emulator.
+    """
+
+    def testSingleDatacenter(self):
+        """
+        Create a single data center and add check if its switch is up
+        by using manually added hosts. Tests especially the
+        data center specific addLink method.
+        """
+        # create network
+        self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        self.net.addLink(self.h[1], self.dc[0])
+        # start Mininet network
+        self.startNet()
+        # check number of running nodes
+        assert(len(self.getDockernetContainers()) == 0)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 1)
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+        # stop Mininet network
+        self.stopNet()
+
+    def testMultipleDatacenterDirect(self):
+        """
+        Create a two data centers and interconnect them.
+        """
+        # create network
+        self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        self.net.addLink(self.h[1], self.dc[1])
+        self.net.addLink(self.dc[0], self.dc[1])
+        # start Mininet network
+        self.startNet()
+        # check number of running nodes
+        assert(len(self.getDockernetContainers()) == 0)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 2)
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+        # stop Mininet network
+        self.stopNet()
+
+    def testMultipleDatacenterWithIntermediateSwitches(self):
+        """
+        Create a two data centers and interconnect them with additional
+        switches between them.
+        """
+        # create network
+        self.createNet(
+            nswitches=3, ndatacenter=2, nhosts=2, ndockers=0,
+            autolinkswitches=True)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        self.net.addLink(self.h[1], self.dc[1])
+        self.net.addLink(self.dc[0], self.s[0])
+        self.net.addLink(self.s[2], self.dc[1])
+        # start Mininet network
+        self.startNet()
+        # check number of running nodes
+        assert(len(self.getDockernetContainers()) == 0)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 5)
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+        # stop Mininet network
+        self.stopNet()
+
+
+#@unittest.skip("disabled compute tests for development")
+class testEmulatorCompute( simpleTestTopology ):
+    """
+    Tests to check the emulator's API to add and remove
+    compute resources at runtime.
+    """
+
+    def testAddSingleComputeSingleDC(self):
+        """
+        Adds a single compute instance to
+        a single DC and checks its connectivity with a
+        manually added host.
+        """
+        # create network
+        self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        # start Mininet network
+        self.startNet()
+        # add compute resources
+        vnf1 = self.dc[0].startCompute("vnf1")
+        # check number of running nodes
+        assert(len(self.getDockernetContainers()) == 1)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 1)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 1)
+        assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+        assert(self.dc[0].listCompute()[0].name == "vnf1")
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
+        # stop Mininet network
+        self.stopNet()
+
+    def testRemoveSingleComputeSingleDC(self):
+        """
+        Test stop method for compute instances.
+        Check that the instance is really removed.
+        """
+        # create network
+        self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        # start Mininet network
+        self.startNet()
+        # add compute resources
+        vnf1 = self.dc[0].startCompute("vnf1")
+        # check number of running nodes
+        assert(len(self.getDockernetContainers()) == 1)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 1)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 1)
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
+        # remove compute resources
+        self.dc[0].stopCompute("vnf1")
+        # check number of running nodes
+        assert(len(self.getDockernetContainers()) == 0)
+        assert(len(self.net.hosts) == 1)
+        assert(len(self.net.switches) == 1)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 0)
+        # stop Mininet network
+        self.stopNet()
+
+    def testGetStatusSingleComputeSingleDC(self):
+        """
+        Check if the getStatus functionality of EmulatorCompute
+        objects works well.
+        """
+        # create network
+        self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        # start Mininet network
+        self.startNet()
+        # add compute resources
+        vnf1 = self.dc[0].startCompute("vnf1")
+        # check number of running nodes
+        assert(len(self.getDockernetContainers()) == 1)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 1)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 1)
+        assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+        assert(self.dc[0].listCompute()[0].name == "vnf1")
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
+        # check get status
+        s = self.dc[0].containers.get("vnf1").getStatus()
+        assert(s["name"] == "vnf1")
+        assert(s["state"]["Running"])
+        # stop Mininet network
+        self.stopNet()
+
+    def testConnectivityMultiDC(self):
+        """
+        Test if compute instances started in different data centers
+        are able to talk to each other.
+        """
+        # create network
+        self.createNet(
+            nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
+            autolinkswitches=True)
+        # setup links
+        self.net.addLink(self.dc[0], self.s[0])
+        self.net.addLink(self.dc[1], self.s[2])
+        # start Mininet network
+        self.startNet()
+        # add compute resources
+        vnf1 = self.dc[0].startCompute("vnf1")
+        vnf2 = self.dc[1].startCompute("vnf2")
+        # check number of running nodes
+        assert(len(self.getDockernetContainers()) == 2)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 5)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 1)
+        assert(len(self.dc[1].listCompute()) == 1)
+        # check connectivity by using ping
+        assert(self.net.ping([vnf1, vnf2]) <= 0.0)
+        # stop Mininet network
+        self.stopNet()
+
+    def testInterleavedAddRemoveMultiDC(self):
+        """
+        Test multiple, interleaved add and remove operations and ensure
+        that always all expected compute instances are reachable.
+        """
+                # create network
+        self.createNet(
+            nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
+            autolinkswitches=True)
+        # setup links
+        self.net.addLink(self.dc[0], self.s[0])
+        self.net.addLink(self.dc[1], self.s[2])
+        # start Mininet network
+        self.startNet()
+        # add compute resources
+        vnf1 = self.dc[0].startCompute("vnf1")
+        vnf2 = self.dc[1].startCompute("vnf2")
+        # check number of running nodes
+        assert(len(self.getDockernetContainers()) == 2)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 5)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 1)
+        assert(len(self.dc[1].listCompute()) == 1)
+        # check connectivity by using ping
+        assert(self.net.ping([vnf1, vnf2]) <= 0.0)
+        # remove compute resources
+        self.dc[0].stopCompute("vnf1")
+        # check number of running nodes
+        assert(len(self.getDockernetContainers()) == 1)
+        assert(len(self.net.hosts) == 1)
+        assert(len(self.net.switches) == 5)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 0)
+        assert(len(self.dc[1].listCompute()) == 1)
+        # add compute resources
+        vnf3 = self.dc[0].startCompute("vnf3")
+        vnf4 = self.dc[0].startCompute("vnf4")
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 2)
+        assert(len(self.dc[1].listCompute()) == 1)
+        assert(self.net.ping([vnf3, vnf2]) <= 0.0)
+        assert(self.net.ping([vnf4, vnf2]) <= 0.0)
+        # remove compute resources
+        self.dc[0].stopCompute("vnf3")
+        self.dc[0].stopCompute("vnf4")
+        self.dc[1].stopCompute("vnf2")
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 0)
+        assert(len(self.dc[1].listCompute()) == 0)
+        # stop Mininet network
+        self.stopNet()
+
+if __name__ == '__main__':
+    unittest.main()