Merge pull request #1 from mpeuster/master
authorpeusterm <manuel.peuster@uni-paderborn.de>
Tue, 2 Feb 2016 16:07:53 +0000 (17:07 +0100)
committerpeusterm <manuel.peuster@uni-paderborn.de>
Tue, 2 Feb 2016 16:07:53 +0000 (17:07 +0100)
Initial emulator code from old private repository.

19 files changed:
.gitignore
LICENSE [new file with mode: 0644]
README.md
ansible/install.yml [new file with mode: 0644]
emuvim/api/__init__.py [new file with mode: 0644]
emuvim/api/zerorpcapi.py [new file with mode: 0644]
emuvim/cli/__init__.py [new file with mode: 0644]
emuvim/cli/compute.py [new file with mode: 0644]
emuvim/cli/network.py [new file with mode: 0644]
emuvim/cli/son-emu-cli [new file with mode: 0755]
emuvim/dcemulator/__init__.py [new file with mode: 0644]
emuvim/dcemulator/link.py [new file with mode: 0644]
emuvim/dcemulator/net.py [new file with mode: 0644]
emuvim/dcemulator/node.py [new file with mode: 0644]
emuvim/example_topology.py [new file with mode: 0644]
emuvim/test/__main__.py [new file with mode: 0644]
emuvim/test/runner.py [new file with mode: 0644]
emuvim/test/test_api_zerorpc.py [new file with mode: 0644]
emuvim/test/test_emulator.py [new file with mode: 0644]

index 1dbc687..5cbc2c6 100644 (file)
@@ -3,6 +3,7 @@ __pycache__/
 *.py[cod]
 *$py.class
 
+
 # C extensions
 *.so
 
@@ -60,3 +61,4 @@ target/
 
 #Ipython Notebook
 .ipynb_checkpoints
+
diff --git a/LICENSE b/LICENSE
new file mode 100644 (file)
index 0000000..8f71f43
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,202 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
index 4cd094a..6309f7c 100644 (file)
--- a/README.md
+++ b/README.md
@@ -1 +1,87 @@
-# son-emu
+# Distributed Cloud Emulator
+
+Contributors:
+
+* Manuel Peuster <manuel.peuster@upb.de>
+
+
+### Requirements
+* needs the latest Dockernet to be installed in the system
+ * the wrapper uses standard Python imports to use the Dockernet modules
+* Uses ZeroMQ based RPC to open a cloud-like interface that can be used by a demo CLI client
+ * pip install import zerorpc
+ * This will be replaced / extended by a REST API later
+
+### Project structure
+* **emuvim/** all emulator code 
+ * **api/** Data center API endpoint implementations (zerorpc, OpenStack REST, ...)
+ * **cli/** CLI client to interact with a running emulator
+ * **dcemulator/** Dockernet wrapper that introduces the notion of data centers and API endpoints
+ * **test/** Unit tests
+ * **example_topology.py** An example topology script to show how topologies can be specified
+
+### Installation
+Automatic installation is provide through an Ansible playbook.
+* Requires: Ubuntu 14.04 LTS
+* `sudo apt-get install ansible git`
+* `sudo vim /etc/ansible/hosts`
+* Add: `localhost ansible_connection=local`
+
+#### 1. Dockernet
+* `git clone https://github.com/mpeuster/dockernet.git`
+* `cd dockernet/ansible`
+* `sudo ansible-playbook install.yml`
+* Wait (and have a coffee) ...
+
+#### 2. Emulator
+* Fork the repository.
+* `cd`
+* `git clone https://github.com/<user>/son-emu.git`
+* `cd emulator-strawman/ansible`
+* `sudo ansible-playbook install.yml`
+
+
+### Run
+* First terminal:
+ * `cd emulator-strawman/emuvim`
+ * `sudo python example_topology.py`
+* Second terminal:
+ * `cd emulator-strawman/emuvim/cli`
+ * `./son-emu-cli compute start -d dc1 -n vnf1`
+ * `./son-emu-cli compute start -d dc1 -n vnf2`
+ * `./son-emu-cli compute list`
+* First terminal:
+ * `dockernet> vnf1 ping -c 2 vnf2`
+
+
+### TODO
+* DCemulator
+ * Advanced network model
+  * improve network management, multiple interfaces per container
+  * API to create multiple networks (per DC?)
+
+
+* Add resource constraints to datacenters
+* Check if we can use the Mininet GUI to visualize our DCs?
+* (Unit tests for zerorpc API endpoint)
+
+
+### Features / Done
+* Define a topology (Python script)
+ * Add data centers
+ * Add switches and links between the,
+* Define API endpoints in topology
+ * call startAPI from topology definition and start it in a own thread
+ * make it possible to start different API endpoints for different DCs
+* DCemulator
+ * correctly start and connect new compute resources at runtime
+ * remove and disconnect compute resources at runtime
+ * do IP management for new containers
+ * list active compute resources
+* Cloud-like reference API with CLI for demonstrations
+ * Write CLI client
+ * Start compute (name, DC, image, network)
+ * Stop compute
+* Create an Ansible-based automatic installation routine
+* Unit tests
+
diff --git a/ansible/install.yml b/ansible/install.yml
new file mode 100644 (file)
index 0000000..6e73908
--- /dev/null
@@ -0,0 +1,25 @@
+- hosts: localhost
+  tasks:
+   - name: updates apt
+     apt: update_cache=yes
+     
+   - name: install python-dev
+     apt: pkg=python-dev state=installed
+
+   - name: install python-zmq
+     apt: pkg=python-zmq state=installed
+
+   - name: install libzmq-dev
+     apt: pkg=libzmq-dev state=installed
+
+   - name: install pip
+     apt: pkg=python-pip state=installed
+
+   - name: install zerorpc
+     pip: name=zerorpc
+
+   - name: install tabulate
+     pip: name=tabulate
+
+   - name: install argparse
+     pip: name=argparse
diff --git a/emuvim/api/__init__.py b/emuvim/api/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/emuvim/api/zerorpcapi.py b/emuvim/api/zerorpcapi.py
new file mode 100644 (file)
index 0000000..7aecba4
--- /dev/null
@@ -0,0 +1,104 @@
+"""
+Distributed Cloud Emulator (dcemulator)
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
+"""
+
+import logging
+import threading
+import zerorpc
+
+logging.basicConfig(level=logging.INFO)
+
+
+class ZeroRpcApiEndpoint(object):
+    """
+    Simple API endpoint that offers a zerorpc-based
+    interface. This interface will be used by the
+    default command line client.
+    It can be used as a reference to implement
+    REST interfaces providing the same semantics,
+    like e.g. OpenStack compute API.
+    """
+
+    def __init__(self, listenip, port):
+        self.dcs = {}
+        self.ip = listenip
+        self.port = port
+        logging.debug("Created API endpoint %s(%s:%d)" % (
+            self.__class__.__name__, self.ip, self.port))
+
+    def connectDatacenter(self, dc):
+        self.dcs[dc.name] = dc
+        logging.info("Connected DC(%s) to API endpoint %s(%s:%d)" % (
+            dc.name, self.__class__.__name__, self.ip, self.port))
+
+    def start(self):
+        thread = threading.Thread(target=self._api_server_thread, args=())
+        thread.daemon = True
+        thread.start()
+        logging.debug("Started API endpoint %s(%s:%d)" % (
+            self.__class__.__name__, self.ip, self.port))
+
+    def _api_server_thread(self):
+        s = zerorpc.Server(MultiDatacenterApi(self.dcs))
+        s.bind("tcp://%s:%d" % (self.ip, self.port))
+        s.run()
+
+
+class MultiDatacenterApi(object):
+    """
+        Just pass through the corresponding request to the
+        selected data center. Do not implement provisioning
+        logic here because will will have multiple API
+        endpoint implementations at the end.
+    """
+
+    def __init__(self, dcs):
+        self.dcs = dcs
+
+    def compute_action_start(self, dc_name, compute_name, image, network):
+        # network e.g. {"ip": "10.0.0.254/8"}
+        # TODO what to return UUID / given name / internal name ?
+        logging.debug("RPC CALL: compute start")
+        try:
+            c = self.dcs.get(dc_name).startCompute(
+                compute_name, image=image, network=network)
+            return str(c.name)
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def compute_action_stop(self, dc_name, compute_name):
+        logging.debug("RPC CALL: compute stop")
+        try:
+            return self.dcs.get(dc_name).stopCompute(compute_name)
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def compute_list(self, dc_name):
+        logging.debug("RPC CALL: compute list")
+        try:
+            if dc_name is None:
+                # return list with all compute nodes in all DCs
+                all_containers = []
+                for dc in self.dcs.itervalues():
+                    all_containers += dc.listCompute()
+                return [(c.name, c.getStatus())
+                        for c in all_containers]
+            else:
+                # return list of compute nodes for specified DC
+                return [(c.name, c.getStatus())
+                        for c in self.dcs.get(dc_name).listCompute()]
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def compute_status(self, dc_name, compute_name):
+        logging.debug("RPC CALL: compute status")
+        try:
+            return self.dcs.get(
+                dc_name).containers.get(compute_name).getStatus()
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
diff --git a/emuvim/cli/__init__.py b/emuvim/cli/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/emuvim/cli/compute.py b/emuvim/cli/compute.py
new file mode 100644 (file)
index 0000000..df40814
--- /dev/null
@@ -0,0 +1,101 @@
+"""
+son-emu compute CLI
+(c) 2016 by Manuel Peuster <manuel.peuster@upb.de>
+"""
+
+import argparse
+import pprint
+from tabulate import tabulate
+import zerorpc
+
+
+pp = pprint.PrettyPrinter(indent=4)
+
+
+class ZeroRpcClient(object):
+
+    def __init__(self):
+        self.c = zerorpc.Client()
+        self.c.connect("tcp://127.0.0.1:4242")  # TODO hard coded for now. we'll change this later
+        self.cmds = {}
+
+    def execute_command(self, args):
+        if getattr(self, args["command"]) is not None:
+            # call the local method with the same name as the command arg
+            getattr(self, args["command"])(args)
+        else:
+            print "Command not implemented."
+
+    def start(self, args):
+        network = {}
+        if args.get("network") is not None:
+            network = {"ip": args.get("network")}
+        r = self.c.compute_action_start(
+            args.get("datacenter"),
+            args.get("name"),
+            args.get("image"),
+            network)
+        pp.pprint(r)
+
+    def stop(self, args):
+        r = self.c.compute_action_stop(
+            args.get("datacenter"), args.get("name"))
+        pp.pprint(r)
+
+    def list(self, args):
+        r = self.c.compute_list(
+            args.get("datacenter"))
+        table = []
+        for c in r:
+            # for each container add a line to the output table
+            if len(c) > 1:
+                name = c[0]
+                status = c[1]
+                eth0ip = None
+                eth0status = "down"
+                if len(status.get("network")) > 0:
+                    eth0ip = status.get("network")[0][1]
+                    eth0status = "up" if status.get(
+                        "network")[0][3] else "down"
+                table.append([status.get("datacenter"),
+                              name,
+                              status.get("image"),
+                              eth0ip,
+                              eth0status,
+                              status.get("state").get("Status")])
+        headers = ["Datacenter",
+                   "Container",
+                   "Image",
+                   "eth0 IP",
+                   "eth0 status",
+                   "Status"]
+        print tabulate(table, headers=headers, tablefmt="grid")
+
+    def status(self, args):
+        r = self.c.compute_status(
+            args.get("datacenter"), args.get("name"))
+        pp.pprint(r)
+
+
+parser = argparse.ArgumentParser(description='son-emu compute')
+parser.add_argument(
+    "command",
+    help="Action to be executed: start|stop|list")
+parser.add_argument(
+    "--datacenter", "-d", dest="datacenter",
+    help="Data center to in which the compute instance should be executed")
+parser.add_argument(
+    "--name", "-n", dest="name",
+    help="Name of compute instance e.g. 'vnf1'")
+parser.add_argument(
+    "--image", dest="image",
+    help="Name of container image to be used e.g. 'ubuntu'")
+parser.add_argument(
+    "--net", dest="network",
+    help="Network properties of compute instance e.g. '10.0.0.123/8'")
+
+
+def main(argv):
+    args = vars(parser.parse_args(argv))
+    c = ZeroRpcClient()
+    c.execute_command(args)
diff --git a/emuvim/cli/network.py b/emuvim/cli/network.py
new file mode 100644 (file)
index 0000000..080b0ac
--- /dev/null
@@ -0,0 +1,9 @@
+"""
+son-emu network CLI
+(c) 2016 by Manuel Peuster <manuel.peuster@upb.de>
+"""
+
+
+def main(argv):
+    print "This is the son-emu network CLI."
+    print "Arguments: %s" % str(argv)
diff --git a/emuvim/cli/son-emu-cli b/emuvim/cli/son-emu-cli
new file mode 100755 (executable)
index 0000000..56fe58b
--- /dev/null
@@ -0,0 +1,30 @@
+#!/usr/bin/python
+"""
+ Simple CLI client to interact with a running emulator.
+
+ (c) 2016 by Manuel Peuster <manuel.peuster@upb.de>
+
+ The CLI offers different tools, e.g., compute, network, ...
+ Each of these tools is implemented as an independent Python
+ module.
+
+ cli compute start dc1 my_name flavor_a
+ cli network create dc1 11.0.0.0/24
+"""
+
+import sys
+import compute
+import network
+
+
+def main():
+    if len(sys.argv) < 2:
+        print "Usage: son-emu-cli <toolname> <arguments>"
+        exit(0)
+    if sys.argv[1] == "compute":
+        compute.main(sys.argv[2:])
+    elif sys.argv[1] == "network":
+        network.main(sys.argv[2:])
+
+if __name__ == '__main__':
+    main()
diff --git a/emuvim/dcemulator/__init__.py b/emuvim/dcemulator/__init__.py
new file mode 100644 (file)
index 0000000..64f6616
--- /dev/null
@@ -0,0 +1,4 @@
+"""
+Distributed Cloud Emulator (dcemulator)
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
+"""
\ No newline at end of file
diff --git a/emuvim/dcemulator/link.py b/emuvim/dcemulator/link.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/emuvim/dcemulator/net.py b/emuvim/dcemulator/net.py
new file mode 100644 (file)
index 0000000..eace03a
--- /dev/null
@@ -0,0 +1,104 @@
+"""
+Distributed Cloud Emulator (dcemulator)
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
+"""
+import logging
+
+from mininet.net import Dockernet
+from mininet.node import Controller, OVSKernelSwitch, Switch, Docker, Host
+from mininet.cli import CLI
+from mininet.log import setLogLevel, info
+from mininet.link import TCLink, Link
+
+from node import Datacenter, EmulatorCompute
+
+
+class DCNetwork(Dockernet):
+    """
+    Wraps the original Mininet/Dockernet class and provides
+    methods to add data centers, switches, etc.
+
+    This class is used by topology definition scripts.
+    """
+
+    def __init__(self, **kwargs):
+        self.dcs = {}
+        # create a Mininet/Dockernet network
+        # call original Docker.__init__ and setup default controller
+        Dockernet.__init__(
+            self, controller=Controller, switch=OVSKernelSwitch, **kwargs)
+        self.addController('c0')
+
+    def addDatacenter(self, name):
+        """
+        Create and add a logical cloud data center to the network.
+        """
+        if name in self.dcs:
+            raise Exception("Data center name already exists: %s" % name)
+        dc = Datacenter(name)
+        dc.net = self  # set reference to network
+        self.dcs[name] = dc
+        dc.create()  # finally create the data center in our Mininet instance
+        logging.info("added data center: %s" % name)
+        return dc
+
+    def addLink(self, node1, node2, **params):
+        """
+        Able to handle Datacenter objects as link
+        end points.
+        """
+        assert node1 is not None
+        assert node2 is not None
+        logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
+        # ensure type of node1
+        if isinstance( node1, basestring ):
+            if node1 in self.dcs:
+                node1 = self.dcs[node1].switch
+        if isinstance( node1, Datacenter ):
+            node1 = node1.switch
+        # ensure type of node2
+        if isinstance( node2, basestring ):
+            if node2 in self.dcs:
+                node2 = self.dcs[node2].switch
+        if isinstance( node2, Datacenter ):
+            node2 = node2.switch
+        # try to give containers a default IP
+        if isinstance( node1, Docker ):
+            if not "params1" in params:
+                params["params1"] = {}
+            if not "ip" in params["params1"]:
+                params["params1"]["ip"] = self.getNextIp()
+        if isinstance( node2, Docker ):
+            if not "params2" in params:
+                params["params2"] = {}
+            if not "ip" in params["params2"]:
+                params["params2"]["ip"] = self.getNextIp()
+
+        return Dockernet.addLink(self, node1, node2, **params)  # TODO we need TCLinks with user defined performance here
+
+    def addDocker( self, name, **params ):
+        """
+        Wrapper for addDocker method to use custom container class.
+        """
+        return Dockernet.addDocker(self, name, cls=EmulatorCompute, **params)
+
+    def getAllContainers(self):
+        """
+        Returns a list with all containers within all data centers.
+        """
+        all_containers = []
+        for dc in self.dcs.itervalues():
+            all_containers += dc.listCompute()
+        return all_containers
+
+    def start(self):
+        # start
+        for dc in self.dcs.itervalues():
+            dc.start()
+        Dockernet.start(self)
+
+    def stop(self):
+        Dockernet.stop(self)
+
+    def CLI(self):
+        CLI(self)
diff --git a/emuvim/dcemulator/node.py b/emuvim/dcemulator/node.py
new file mode 100644 (file)
index 0000000..0e6eae8
--- /dev/null
@@ -0,0 +1,136 @@
+"""
+Distributed Cloud Emulator (dcemulator)
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
+"""
+from mininet.node import Docker
+import logging
+
+
+DCDPID_BASE = 1000  # start of switch dpid's used for data center switches
+
+
+class EmulatorCompute(Docker):
+    """
+    Emulator specific compute node class.
+    Inherits from Dockernet's Docker host class.
+    Represents a single container connected to a (logical)
+    data center.
+    We can add emulator specific helper functions to it.
+    """
+
+    def __init__(
+            self, name, dimage, **kwargs):
+        logging.debug("Create EmulatorCompute instance: %s" % name)
+        self.datacenter = None  # pointer to current DC
+
+        # call original Docker.__init__
+        Docker.__init__(self, name, dimage, **kwargs)
+
+    def getNetworkStatus(self):
+        """
+        Helper method to receive information about the virtual networks
+        this compute instance is connected to.
+        """
+        # format list of tuples (name, Ip, MAC, isUp, status)
+        return [(str(i), i.IP(), i.MAC(), i.isUp(), i.status())
+                for i in self.intfList()]
+
+    def getStatus(self):
+        """
+        Helper method to receive information about this compute instance.
+        """
+        status = {}
+        status["name"] = self.name
+        status["network"] = self.getNetworkStatus()
+        status["image"] = self.dimage
+        status["cpu_quota"] = self.cpu_quota
+        status["cpu_period"] = self.cpu_period
+        status["cpu_shares"] = self.cpu_shares
+        status["cpuset"] = self.cpuset
+        status["mem_limit"] = self.mem_limit
+        status["memswap_limit"] = self.memswap_limit
+        status["state"] = self.dcli.inspect_container(self.dc)["State"]
+        status["id"] = self.dcli.inspect_container(self.dc)["Id"]
+        status["datacenter"] = (None if self.datacenter is None
+                                else self.datacenter.name)
+        return status
+
+
+class Datacenter(object):
+    """
+    Represents a logical data center to which compute resources
+    (Docker containers) can be added at runtime.
+
+    Will also implement resource bookkeeping in later versions.
+    """
+
+    def __init__(self, name):
+        self.net = None  # DCNetwork to which we belong
+        self.name = name
+        self.switch = None  # first prototype assumes one "bigswitch" per DC
+        self.containers = {}  # keep track of running containers
+
+    def _get_next_dc_dpid(self):
+        global DCDPID_BASE
+        DCDPID_BASE += 1
+        return DCDPID_BASE
+
+    def create(self):
+        """
+        Each data center is represented by a single switch to which
+        compute resources can be connected at run time.
+
+        TODO: This will be changed in the future to support multiple networks
+        per data center
+        """
+        self.switch = self.net.addSwitch(
+            "%s.s1" % self.name, dpid=hex(self._get_next_dc_dpid())[2:])
+        logging.debug("created data center switch: %s" % str(self.switch))
+
+    def start(self):
+        pass
+
+    def startCompute(self, name, image=None, network=None):
+        """
+        Create a new container as compute resource and connect it to this
+        data center.
+
+        TODO: This interface will change to support multiple networks to which
+        a single container can be connected.
+        """
+        assert name is not None
+        # no duplications
+        if name in [c.name for c in self.net.getAllContainers()]:
+            raise Exception("Container with name %s already exists." % name)
+        # set default parameter
+        if image is None:
+            image = "ubuntu"
+        if network is None:
+            network = {}  # {"ip": "10.0.0.254/8"}
+        # create the container and connect it to the given network
+        d = self.net.addDocker("%s" % (name), dimage=image)
+        self.net.addLink(d, self.switch, params1=network)
+        # do bookkeeping
+        self.containers[name] = d
+        d.datacenter = self
+        return d  # we might use UUIDs for naming later on
+
+    def stopCompute(self, name):
+        """
+        Stop and remove a container from this data center.
+        """
+        assert name is not None
+        if name not in self.containers:
+            raise Exception("Container with name %s not found." % name)
+        self.net.removeLink(
+            link=None, node1=self.containers[name], node2=self.switch)
+        self.net.removeDocker("%s" % (name))
+        del self.containers[name]
+        return True
+
+    def listCompute(self):
+        """
+        Return a list of all running containers assigned to this
+        data center.
+        """
+        return list(self.containers.itervalues())
diff --git a/emuvim/example_topology.py b/emuvim/example_topology.py
new file mode 100644 (file)
index 0000000..91285f8
--- /dev/null
@@ -0,0 +1,110 @@
+"""
+This is an example topology for the distributed cloud emulator (dcemulator).
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
+
+
+This is an example that shows how a user of the emulation tool can
+define network topologies with multiple emulated cloud data centers.
+
+The definition is done with a Python API which looks very similar to the
+Mininet API (in fact it is a wrapper for it).
+
+We only specify the topology *between* data centers not within a single
+data center (data center internal setups or placements are not of interest,
+we want to experiment with VNF chains deployed across multiple PoPs).
+
+The original Mininet API has to be completely hidden and not be used by this
+script.
+"""
+import logging
+from mininet.log import setLogLevel
+from dcemulator.net import DCNetwork
+from api.zerorpcapi import ZeroRpcApiEndpoint
+
+logging.basicConfig(level=logging.INFO)
+
+
+def create_topology1():
+    """
+    1. Create a data center network object (DCNetwork)
+    """
+    net = DCNetwork()
+
+    """
+    2. Add (logical) data centers to the topology
+       (each data center is one "bigswitch" in our simplified
+        first prototype)
+    """
+    dc1 = net.addDatacenter("dc1")
+    dc2 = net.addDatacenter("dc2")
+    dc3 = net.addDatacenter("dc3")
+    dc4 = net.addDatacenter("dc4")
+
+    """
+    3. You can add additional SDN switches for data center
+       interconnections to the network.
+    """
+    s1 = net.addSwitch("s1")
+
+    """
+    4. Add links between your data centers and additional switches
+       to define you topology.
+       These links can use Mininet's features to limit bw, add delay or jitter.
+    """
+    net.addLink(dc1, dc2)
+    net.addLink("dc1", s1)
+    net.addLink(s1, "dc3")
+    net.addLink(s1, dc4)
+
+    """
+    5. We want to access and control our data centers from the outside,
+       e.g., we want to connect an orchestrator to start/stop compute
+       resources aka. VNFs (represented by Docker containers in the emulated)
+
+       So we need to instantiate API endpoints (e.g. a zerorpc or REST
+       interface). Depending on the endpoint implementations, we can connect
+       one or more data centers to it, which can then be controlled through
+       this API, e.g., start/stop/list compute instances.
+    """
+    # create a new instance of a endpoint implementation
+    zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
+    # connect data centers to this endpoint
+    zapi1.connectDatacenter(dc1)
+    zapi1.connectDatacenter(dc2)
+    # run API endpoint server (in another thread, don't block)
+    zapi1.start()
+
+    """
+    5.1. For our example, we create a second endpoint to illustrate that
+         this is supported by our design. This feature allows us to have
+         one API endpoint for each data center. This makes the emulation
+         environment more realistic because you can easily create one
+         OpenStack-like REST API endpoint for *each* data center.
+         This will look like a real-world multi PoP/data center deployment
+         from the perspective of an orchestrator.
+    """
+    zapi2 = ZeroRpcApiEndpoint("0.0.0.0", 4343)
+    zapi2.connectDatacenter(dc3)
+    zapi2.connectDatacenter(dc4)
+    zapi2.start()
+
+    """
+    6. Finally we are done and can start our network (the emulator).
+       We can also enter the Mininet CLI to interactively interact
+       with our compute resources (just like in default Mininet).
+       But we can also implement fully automated experiments that
+       can be executed again and again.
+    """
+    net.start()
+    net.CLI()
+    # when the user types exit in the CLI, we stop the emulator
+    net.stop()
+
+
+def main():
+    setLogLevel('info')  # set Mininet loglevel
+    create_topology1()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/emuvim/test/__main__.py b/emuvim/test/__main__.py
new file mode 100644 (file)
index 0000000..f7fa66d
--- /dev/null
@@ -0,0 +1,7 @@
+import runner
+import os
+
+
+if __name__ == '__main__':
+    thisdir = os.path.dirname( os.path.realpath( __file__ ) )
+    runner.main(thisdir)
diff --git a/emuvim/test/runner.py b/emuvim/test/runner.py
new file mode 100644 (file)
index 0000000..93b0822
--- /dev/null
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+"""
+Run all tests
+ -v : verbose output
+ -e : emulator test only (no API tests)
+ -a : API tests only
+"""
+
+from unittest import defaultTestLoader, TextTestRunner, TestSuite
+import os
+import sys
+from mininet.util import ensureRoot
+from mininet.clean import cleanup
+from mininet.log import setLogLevel
+
+
+def runTests( testDir, verbosity=1, emuonly=False, apionly=False ):
+    "discover and run all tests in testDir"
+    # ensure root and cleanup before starting tests
+    ensureRoot()
+    cleanup()
+    # discover all tests in testDir
+    testSuite = defaultTestLoader.discover( testDir )
+    if emuonly:
+        testSuiteFiltered = [s for s in testSuite if "Emulator" in str(s)]
+        testSuite = TestSuite()
+        testSuite.addTests(testSuiteFiltered)
+    if apionly:
+        testSuiteFiltered = [s for s in testSuite if "Api" in str(s)]
+        testSuite = TestSuite()
+        testSuite.addTests(testSuiteFiltered)
+
+    # run tests
+    TextTestRunner( verbosity=verbosity ).run( testSuite )
+
+
+def main(thisdir):
+    setLogLevel( 'warning' )
+    # get the directory containing example tests
+    vlevel = 2 if '-v' in sys.argv else 1
+    emuonly = ('-e' in sys.argv)
+    apionly = ('-a' in sys.argv)
+    runTests(
+        testDir=thisdir, verbosity=vlevel, emuonly=emuonly, apionly=apionly)
+
+
+if __name__ == '__main__':
+    thisdir = os.path.dirname( os.path.realpath( __file__ ) )
+    main(thisdir)
diff --git a/emuvim/test/test_api_zerorpc.py b/emuvim/test/test_api_zerorpc.py
new file mode 100644 (file)
index 0000000..2830872
--- /dev/null
@@ -0,0 +1 @@
+#TODO we'll need this at some time. But I'am lazy. A good REST API seems to be more important.
diff --git a/emuvim/test/test_emulator.py b/emuvim/test/test_emulator.py
new file mode 100644 (file)
index 0000000..7da7aaf
--- /dev/null
@@ -0,0 +1,351 @@
+"""
+Test suite to automatically test emulator functionalities.
+Directly interacts with the emulator through the Mininet-like
+Python API.
+
+Does not test API endpoints. This is done in separated test suites.
+"""
+
+import unittest
+import os
+import time
+import subprocess
+import docker
+from dcemulator.net import DCNetwork
+from dcemulator.node import EmulatorCompute
+from mininet.node import Host, Controller, OVSSwitch, Docker
+from mininet.link import TCLink
+from mininet.topo import SingleSwitchTopo, LinearTopo
+from mininet.log import setLogLevel
+from mininet.util import quietRun
+from mininet.clean import cleanup
+
+
+class simpleTestTopology( unittest.TestCase ):
+    """
+        Helper class to do basic test setups.
+        s1 -- s2 -- s3 -- ... -- sN
+    """
+
+    def __init__(self, *args, **kwargs):
+        self.net = None
+        self.s = []   # list of switches
+        self.h = []   # list of hosts
+        self.d = []   # list of docker containers
+        self.dc = []  # list of data centers
+        self.docker_cli = None
+        super(simpleTestTopology, self).__init__(*args, **kwargs)
+
+    def createNet(
+            self,
+            nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
+            autolinkswitches=False):
+        """
+        Creates a Mininet instance and automatically adds some
+        nodes to it.
+        """
+        self.net = net = DCNetwork()
+
+        # add some switches
+        for i in range(0, nswitches):
+            self.s.append(self.net.addSwitch('s%d' % i))
+        # if specified, chain all switches
+        if autolinkswitches:
+            for i in range(0, len(self.s) - 1):
+                self.net.addLink(self.s[i], self.s[i + 1])
+        # add some data centers
+        for i in range(0, ndatacenter):
+            self.dc.append(self.net.addDatacenter('dc%d' % i))
+        # add some hosts
+        for i in range(0, nhosts):
+            self.h.append(self.net.addHost('h%d' % i))
+        # add some dockers
+        for i in range(0, ndockers):
+            self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu"))
+
+    def startNet(self):
+        self.net.start()
+
+    def stopNet(self):
+        self.net.stop()
+
+    def getDockerCli(self):
+        """
+        Helper to interact with local docker instance.
+        """
+        if self.docker_cli is None:
+            self.docker_cli = docker.Client(
+                base_url='unix://var/run/docker.sock')
+        return self.docker_cli
+
+    @staticmethod
+    def setUp():
+        pass
+
+    @staticmethod
+    def tearDown():
+        cleanup()
+        # make sure that all pending docker containers are killed
+        with open(os.devnull, 'w') as devnull:
+            subprocess.call(
+                "sudo docker rm -f $(sudo docker ps -a -q)",
+                stdout=devnull,
+                stderr=devnull,
+                shell=True)
+
+
+#@unittest.skip("disabled topology tests for development")
+class testEmulatorTopology( simpleTestTopology ):
+    """
+    Tests to check the topology API of the emulator.
+    """
+
+    def testSingleDatacenter(self):
+        """
+        Create a single data center and add check if its switch is up
+        by using manually added hosts. Tests especially the
+        data center specific addLink method.
+        """
+        # create network
+        self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        self.net.addLink(self.h[1], self.dc[0])
+        # start Mininet network
+        self.startNet()
+        # check number of running nodes
+        assert(len(self.getDockerCli().containers()) == 0)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 1)
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+        # stop Mininet network
+        self.stopNet()
+
+    def testMultipleDatacenterDirect(self):
+        """
+        Create a two data centers and interconnect them.
+        """
+        # create network
+        self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        self.net.addLink(self.h[1], self.dc[1])
+        self.net.addLink(self.dc[0], self.dc[1])
+        # start Mininet network
+        self.startNet()
+        # check number of running nodes
+        assert(len(self.getDockerCli().containers()) == 0)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 2)
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+        # stop Mininet network
+        self.stopNet()
+
+    def testMultipleDatacenterWithIntermediateSwitches(self):
+        """
+        Create a two data centers and interconnect them with additional
+        switches between them.
+        """
+        # create network
+        self.createNet(
+            nswitches=3, ndatacenter=2, nhosts=2, ndockers=0,
+            autolinkswitches=True)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        self.net.addLink(self.h[1], self.dc[1])
+        self.net.addLink(self.dc[0], self.s[0])
+        self.net.addLink(self.s[2], self.dc[1])
+        # start Mininet network
+        self.startNet()
+        # check number of running nodes
+        assert(len(self.getDockerCli().containers()) == 0)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 5)
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+        # stop Mininet network
+        self.stopNet()
+
+
+#@unittest.skip("disabled compute tests for development")
+class testEmulatorCompute( simpleTestTopology ):
+    """
+    Tests to check the emulator's API to add and remove
+    compute resources at runtime.
+    """
+
+    def testAddSingleComputeSingleDC(self):
+        """
+        Adds a single compute instance to
+        a single DC and checks its connectivity with a
+        manually added host.
+        """
+        # create network
+        self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        # start Mininet network
+        self.startNet()
+        # add compute resources
+        vnf1 = self.dc[0].startCompute("vnf1")
+        # check number of running nodes
+        assert(len(self.getDockerCli().containers()) == 1)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 1)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 1)
+        assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+        assert(self.dc[0].listCompute()[0].name == "vnf1")
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
+        # stop Mininet network
+        self.stopNet()
+
+    def testRemoveSingleComputeSingleDC(self):
+        """
+        Test stop method for compute instances.
+        Check that the instance is really removed.
+        """
+        # create network
+        self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        # start Mininet network
+        self.startNet()
+        # add compute resources
+        vnf1 = self.dc[0].startCompute("vnf1")
+        # check number of running nodes
+        assert(len(self.getDockerCli().containers()) == 1)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 1)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 1)
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
+        # remove compute resources
+        self.dc[0].stopCompute("vnf1")
+        # check number of running nodes
+        assert(len(self.getDockerCli().containers()) == 0)
+        assert(len(self.net.hosts) == 1)
+        assert(len(self.net.switches) == 1)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 0)
+        # stop Mininet network
+        self.stopNet()
+
+    def testGetStatusSingleComputeSingleDC(self):
+        """
+        Check if the getStatus functionality of EmulatorCompute
+        objects works well.
+        """
+        # create network
+        self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
+        # setup links
+        self.net.addLink(self.dc[0], self.h[0])
+        # start Mininet network
+        self.startNet()
+        # add compute resources
+        vnf1 = self.dc[0].startCompute("vnf1")
+        # check number of running nodes
+        assert(len(self.getDockerCli().containers()) == 1)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 1)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 1)
+        assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+        assert(self.dc[0].listCompute()[0].name == "vnf1")
+        # check connectivity by using ping
+        assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
+        # check get status
+        s = self.dc[0].containers.get("vnf1").getStatus()
+        assert(s["name"] == "vnf1")
+        assert(s["state"]["Running"])
+        # stop Mininet network
+        self.stopNet()
+
+    def testConnectivityMultiDC(self):
+        """
+        Test if compute instances started in different data centers
+        are able to talk to each other.
+        """
+        # create network
+        self.createNet(
+            nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
+            autolinkswitches=True)
+        # setup links
+        self.net.addLink(self.dc[0], self.s[0])
+        self.net.addLink(self.dc[1], self.s[2])
+        # start Mininet network
+        self.startNet()
+        # add compute resources
+        vnf1 = self.dc[0].startCompute("vnf1")
+        vnf2 = self.dc[1].startCompute("vnf2")
+        # check number of running nodes
+        assert(len(self.getDockerCli().containers()) == 2)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 5)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 1)
+        assert(len(self.dc[1].listCompute()) == 1)
+        # check connectivity by using ping
+        assert(self.net.ping([vnf1, vnf2]) <= 0.0)
+        # stop Mininet network
+        self.stopNet()
+
+    def testInterleavedAddRemoveMultiDC(self):
+        """
+        Test multiple, interleaved add and remove operations and ensure
+        that always all expected compute instances are reachable.
+        """
+                # create network
+        self.createNet(
+            nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
+            autolinkswitches=True)
+        # setup links
+        self.net.addLink(self.dc[0], self.s[0])
+        self.net.addLink(self.dc[1], self.s[2])
+        # start Mininet network
+        self.startNet()
+        # add compute resources
+        vnf1 = self.dc[0].startCompute("vnf1")
+        vnf2 = self.dc[1].startCompute("vnf2")
+        # check number of running nodes
+        assert(len(self.getDockerCli().containers()) == 2)
+        assert(len(self.net.hosts) == 2)
+        assert(len(self.net.switches) == 5)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 1)
+        assert(len(self.dc[1].listCompute()) == 1)
+        # check connectivity by using ping
+        assert(self.net.ping([vnf1, vnf2]) <= 0.0)
+        # remove compute resources
+        self.dc[0].stopCompute("vnf1")
+        # check number of running nodes
+        assert(len(self.getDockerCli().containers()) == 1)
+        assert(len(self.net.hosts) == 1)
+        assert(len(self.net.switches) == 5)
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 0)
+        assert(len(self.dc[1].listCompute()) == 1)
+        # add compute resources
+        vnf3 = self.dc[0].startCompute("vnf3")
+        vnf4 = self.dc[0].startCompute("vnf4")
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 2)
+        assert(len(self.dc[1].listCompute()) == 1)
+        assert(self.net.ping([vnf3, vnf2]) <= 0.0)
+        assert(self.net.ping([vnf4, vnf2]) <= 0.0)
+        # remove compute resources
+        self.dc[0].stopCompute("vnf3")
+        self.dc[0].stopCompute("vnf4")
+        self.dc[1].stopCompute("vnf2")
+        # check compute list result
+        assert(len(self.dc[0].listCompute()) == 0)
+        assert(len(self.dc[1].listCompute()) == 0)
+        # stop Mininet network
+        self.stopNet()
+
+if __name__ == '__main__':
+    unittest.main()