Merge pull request #41 from mpeuster/master
authorpeusterm <manuel.peuster@uni-paderborn.de>
Tue, 8 Mar 2016 13:40:08 +0000 (14:40 +0100)
committerpeusterm <manuel.peuster@uni-paderborn.de>
Tue, 8 Mar 2016 13:40:08 +0000 (14:40 +0100)
Added initial prototype of SONATA 'fake/dummy' gatekeeper API

15 files changed:
ansible/install.yml
setup.py
src/emuvim/api/openstack/README.md [new file with mode: 0644]
src/emuvim/api/sonata/README.md [new file with mode: 0644]
src/emuvim/api/sonata/__init__.py [new file with mode: 0644]
src/emuvim/api/sonata/dummygatekeeper.py [new file with mode: 0644]
src/emuvim/api/zerorpc/__init__.py [new file with mode: 0644]
src/emuvim/api/zerorpc/compute.py [new file with mode: 0644]
src/emuvim/api/zerorpc/network.py [new file with mode: 0644]
src/emuvim/api/zerorpcapi.py [deleted file]
src/emuvim/api/zerorpcapi_DCNetwork.py [deleted file]
src/emuvim/dcemulator/net.py
src/emuvim/dcemulator/node.py
src/emuvim/examples/simple_topology.py
src/emuvim/examples/sonata_y1_demo_topology_1.py

index dd6ef4c..cd43336 100755 (executable)
@@ -41,3 +41,9 @@
 
    - name: install pytest
      pip: name=pytest state=latest
+
+   - name: install Flask
+     pip: name=Flask state=latest
+
+   - name: install flask_restful
+     pip: name=flask_restful state=latest
index f2a6ce9..1973326 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,9 @@ setup(name='emuvim',
           'six>=1.9',
           'ryu',
           'oslo.config',
-          'pytest'
+          'pytest',
+          'Flask',
+          'flask_restful'
       ],
       zip_safe=False,
       entry_points={
diff --git a/src/emuvim/api/openstack/README.md b/src/emuvim/api/openstack/README.md
new file mode 100644 (file)
index 0000000..596390e
--- /dev/null
@@ -0,0 +1 @@
+This is a placeholder. This folder will contain a OpenStack/HEAT like interface to the emulator.
\ No newline at end of file
diff --git a/src/emuvim/api/sonata/README.md b/src/emuvim/api/sonata/README.md
new file mode 100644 (file)
index 0000000..969ba22
--- /dev/null
@@ -0,0 +1,71 @@
+# SONATA dummy gatekeeper API:
+
+## Upload a package (*.son) file:
+
+To upload the file `simplest-example.son` do:
+
+* `curl -i -X POST -F file=@simplest-example.son http://127.0.0.1:8000/api/packages/uploads`
+
+To list all uploaded packages do:
+
+* `curl http://127.0.0.1:8000/api/packages/uploads`
+
+To instantiate (start) a service do:
+
+* `curl -X POST http://127.0.0.1:8000/api/instantiations -d "{\"service_uuid\":\"59446b64-f941-40a8-b511-effb0512c21b\"}"`
+
+To list all running services do:
+
+* `curl http://127.0.0.1:8000/api/instantiations`
+
+
+## API definition
+
+This page describes the dummy gatekeeper API. This gatekeeper fakes the original platform gatekeeper during development SDK tools like son-push. 
+
+It is also able to deploy our example service package (not any arbitrary service package!) in the emulator for the Y1 demo.
+
+_Note: This API should converge to the API of the original GK as much as possible!_
+
+## REST API:
+<table>
+<tr>
+<th>Endpoint:</th>
+<th>Method:</th>
+<th>Header:</th>
+<th>Body:</th>
+<th>Response:</th>
+</tr>
+<tr>
+<td>/api/packages/uploads</td>
+<td>POST</td>
+<td>-</td>
+<td>{file-content} as enctype=multipart/form-data</td>
+<td>{"service_uuid": "c880aaab-f3b9-43ac-ac6b-3d27b46146b7", size=456, sha1=49ee6468dfa4ecbad440d669b249d523a38651be, error: null}</td>
+</tr>
+<tr>
+<td>/api/packages/uploads</td>
+<td>GET</td>
+<td>-</td>
+<td></td>
+<td>{service_uuid_list: ["c880aaab-f3b9-43ac-ac6b-3d27b46146b7", "c880aaab-f3b9-43ac-ac6b-3d27b46146b8", "c880aaab-f3b9-43ac-ac6b-3d27b46146b9"]}</td>
+</tr>
+<tr>
+<td>/api/instantiations</td>
+<td>POST</td>
+<td>-</td>
+<td>{service_uuid: "c880aaab-f3b9-43ac-ac6b-3d27b46146b7"}</td>
+<td>{service_instance_uuid: "de4567-f3b9-43ac-ac6b-3d27b461123"}</td>
+</tr>
+<tr>
+<td>/api/instantiations</td>
+<td>GET</td>
+<td>-</td>
+<td></td>
+<td>{service_instance_uuid_list: ["de4567-f3b9-43ac-ac6b-3d27b461123", "de4567-f3b9-43ac-ac6b-3d27b461124", "de4567-f3b9-43ac-ac6b-3d27b461125"]}</td>
+</tr>
+</table>
+
+## Run REST API in standalone mode (without emulator):
+
+* `sudo python src/emuvim/api/sonata/dummygatekeeper.py`
\ No newline at end of file
diff --git a/src/emuvim/api/sonata/__init__.py b/src/emuvim/api/sonata/__init__.py
new file mode 100644 (file)
index 0000000..14182c5
--- /dev/null
@@ -0,0 +1,43 @@
+"""
+This module implements a simple REST API that behaves like SONATA's gatekeeper.
+
+It is only used to support the development of SONATA's SDK tools and to demonstrate
+the year 1 version of the emulator until the integration with WP4's orchestrator is done.
+"""
+
+import logging
+import threading
+import dummygatekeeper as dgk
+
+
+class SonataDummyGatekeeperEndpoint(object):
+    """
+    Creates and starts a REST API based on Flask in an
+    additional thread.
+
+    Can connect this API to data centers defined in an emulator
+    topology.
+    """
+
+    def __init__(self, listenip, port):
+        self.dcs = {}
+        self.ip = listenip
+        self.port = port
+        logging.debug("Created API endpoint %s" % self)
+
+    def __repr__(self):
+        return "%s(%s:%d)" % (self.__class__.__name__, self.ip, self.port)
+
+    def connectDatacenter(self, dc):
+        self.dcs[dc.label] = dc
+        logging.info("Connected DC(%s) to API endpoint %s" % (
+            dc, self))
+
+    def start(self):
+        thread = threading.Thread(target=self._api_server_thread, args=())
+        thread.daemon = True
+        thread.start()
+        logging.debug("Started API endpoint %s" % self)
+
+    def _api_server_thread(self):
+        dgk.start_rest_api(self.ip, self.port)
diff --git a/src/emuvim/api/sonata/dummygatekeeper.py b/src/emuvim/api/sonata/dummygatekeeper.py
new file mode 100644 (file)
index 0000000..f258e49
--- /dev/null
@@ -0,0 +1,142 @@
+"""
+This module implements a simple REST API that behaves like SONATA's gatekeeper.
+
+It is only used to support the development of SONATA's SDK tools and to demonstrate
+the year 1 version of the emulator until the integration with WP4's orchestrator is done.
+"""
+
+import logging
+import os
+import uuid
+import hashlib
+import json
+from flask import Flask, request
+import flask_restful as fr
+
+logging.getLogger("werkzeug").setLevel(logging.WARNING)
+
+
+UPLOAD_FOLDER = "/tmp/son-dummy-gk/uploads/"
+CATALOG_FOLDER = "/tmp/son-dummy-gk/catalog/"
+
+
+class Gatekeeper(object):
+
+    def __init__(self):
+        self.packages = dict()
+        self.instantiations = dict()
+        logging.info("Create SONATA dummy gatekeeper.")
+
+    def unpack_service_package(self, service_uuid):
+        # TODO implement method
+        # 1. unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
+        pass
+
+    def start_service(self, service_uuid):
+        # TODO implement method
+        # 1. parse descriptors
+        # 2. do the corresponding dc.startCompute(name="foobar") calls
+        # 3. store references to the compute objects in self.instantiations
+        pass
+
+
+"""
+Resource definitions and API endpoints
+"""
+
+
+class Packages(fr.Resource):
+
+    def post(self):
+        """
+        Upload a *.son service package to the dummy gatekeeper.
+
+        We expect request with a *.son file and store it in UPLOAD_FOLDER
+        :return: UUID
+        """
+        try:
+            # get file contents
+            file = request.files['file']
+            # generate a uuid to reference this package
+            service_uuid = str(uuid.uuid4())
+            hash = hashlib.sha1(str(file)).hexdigest()
+            # ensure that upload folder exists
+            ensure_dir(UPLOAD_FOLDER)
+            upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
+            # store *.son file to disk
+            file.save(upload_path)
+            size = os.path.getsize(upload_path)
+            # store a reference to the uploaded package in our gatekeeper
+            GK.packages[service_uuid] = upload_path
+            # generate the JSON result
+            return {"service_uuid": service_uuid, "size": size, "sha1": hash, "error": None}
+        except Exception as ex:
+            logging.exception("Service package upload failed:")
+            return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}
+
+    def get(self):
+        """
+        Return a list of UUID's of uploaded service packages.
+        :return: dict/list
+        """
+        return {"service_uuid_list": list(GK.packages.iterkeys())}
+
+
+class Instantiations(fr.Resource):
+
+    def post(self):
+        """
+        Instantiate a service specified by its UUID.
+        Will return a new UUID to identify the running service instance.
+        :return: UUID
+        """
+        # TODO implement method (start real service)
+        json_data = request.get_json(force=True)
+        service_uuid = json_data.get("service_uuid")
+        if service_uuid is not None:
+            service_instance_uuid = str(uuid.uuid4())
+            GK.instantiations[service_instance_uuid] = service_uuid
+            logging.info("Starting service %r" % service_uuid)
+            return {"service_instance_uuid": service_instance_uuid}
+        return None
+
+    def get(self):
+        """
+        Returns a list of UUIDs containing all running services.
+        :return: dict / list
+        """
+        # TODO implement method
+        return {"service_instance_uuid_list": list(GK.instantiations.iterkeys())}
+
+# create a single, global GK object
+GK = Gatekeeper()
+# setup Flask
+app = Flask(__name__)
+app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024  # 512 MB max upload
+api = fr.Api(app)
+# define endpoints
+api.add_resource(Packages, '/api/packages/uploads')
+api.add_resource(Instantiations, '/api/instantiations')
+
+
+def start_rest_api(host, port):
+    # start the Flask server (not the best performance but ok for our use case)
+    app.run(host=host,
+            port=port,
+            debug=True,
+            use_reloader=False  # this is needed to run Flask in a non-main thread
+            )
+
+
+def ensure_dir(name):
+    if not os.path.exists(name):
+       os.makedirs(name)
+
+
+if __name__ == '__main__':
+    """
+    Lets allow to run the API in standalone mode.
+    """
+    logging.getLogger("werkzeug").setLevel(logging.INFO)
+    start_rest_api("0.0.0.0", 8000)
+
diff --git a/src/emuvim/api/zerorpc/__init__.py b/src/emuvim/api/zerorpc/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/src/emuvim/api/zerorpc/compute.py b/src/emuvim/api/zerorpc/compute.py
new file mode 100644 (file)
index 0000000..59b960c
--- /dev/null
@@ -0,0 +1,128 @@
+"""
+Distributed Cloud Emulator (dcemulator)
+(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
+"""
+
+import logging
+import threading
+import zerorpc
+
+logging.basicConfig(level=logging.INFO)
+
+
+class ZeroRpcApiEndpoint(object):
+    """
+    Simple API endpoint that offers a zerorpc-based
+    interface. This interface will be used by the
+    default command line client.
+    It can be used as a reference to implement
+    REST interfaces providing the same semantics,
+    like e.g. OpenStack compute API.
+    """
+
+    def __init__(self, listenip, port):
+        self.dcs = {}
+        self.ip = listenip
+        self.port = port
+        logging.debug("Created API endpoint %s(%s:%d)" % (
+            self.__class__.__name__, self.ip, self.port))
+
+    def connectDatacenter(self, dc):
+        self.dcs[dc.label] = dc
+        logging.info("Connected DC(%s) to API endpoint %s(%s:%d)" % (
+            dc.label, self.__class__.__name__, self.ip, self.port))
+
+    def start(self):
+        thread = threading.Thread(target=self._api_server_thread, args=())
+        thread.daemon = True
+        thread.start()
+        logging.debug("Started API endpoint %s(%s:%d)" % (
+            self.__class__.__name__, self.ip, self.port))
+
+    def _api_server_thread(self):
+        s = zerorpc.Server(MultiDatacenterApi(self.dcs))
+        s.bind("tcp://%s:%d" % (self.ip, self.port))
+        s.run()
+
+
+class MultiDatacenterApi(object):
+    """
+        Just pass through the corresponding request to the
+        selected data center. Do not implement provisioning
+        logic here because will will have multiple API
+        endpoint implementations at the end.
+    """
+
+    def __init__(self, dcs):
+        self.dcs = dcs
+
+    def compute_action_start(self, dc_label, compute_name, image, command, network):
+        """
+        Start a new compute instance: A docker container
+        :param dc_label: name of the DC
+        :param compute_name: compute container name
+        :param image: image name
+        :param command: command to execute
+        :param network:
+        :return: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
+        """
+        # TODO what to return UUID / given name / internal name ?
+        logging.debug("RPC CALL: compute start")
+        try:
+            c = self.dcs.get(dc_label).startCompute(
+                compute_name, image=image, command=command, network=network)
+            return str(c.name)
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def compute_action_stop(self, dc_label, compute_name):
+        logging.debug("RPC CALL: compute stop")
+        try:
+            return self.dcs.get(dc_label).stopCompute(compute_name)
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def compute_list(self, dc_label):
+        logging.debug("RPC CALL: compute list")
+        try:
+            if dc_label is None:
+                # return list with all compute nodes in all DCs
+                all_containers = []
+                for dc in self.dcs.itervalues():
+                    all_containers += dc.listCompute()
+                return [(c.name, c.getStatus())
+                        for c in all_containers]
+            else:
+                # return list of compute nodes for specified DC
+                return [(c.name, c.getStatus())
+                        for c in self.dcs.get(dc_label).listCompute()]
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def compute_status(self, dc_label, compute_name):
+        logging.debug("RPC CALL: compute status")
+        try:
+            return self.dcs.get(
+                dc_label).containers.get(compute_name).getStatus()
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def datacenter_list(self):
+        logging.debug("RPC CALL: datacenter list")
+        try:
+            return [d.getStatus() for d in self.dcs.itervalues()]
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def datacenter_status(self, dc_label):
+        logging.debug("RPC CALL: datacenter status")
+        try:
+                return self.dcs.get(dc_label).getStatus()
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
diff --git a/src/emuvim/api/zerorpc/network.py b/src/emuvim/api/zerorpc/network.py
new file mode 100644 (file)
index 0000000..f232166
--- /dev/null
@@ -0,0 +1,103 @@
+"""
+Distributed Cloud Emulator (dcemulator)
+"""
+
+import logging
+import threading
+import zerorpc
+
+
+logging.basicConfig(level=logging.INFO)
+
+
+class ZeroRpcApiEndpointDCNetwork(object):
+    """
+    Simple API endpoint that offers a zerorpc-based
+    interface. This interface will be used by the
+    default command line client.
+    It can be used as a reference to implement
+    REST interfaces providing the same semantics,
+    like e.g. OpenStack compute API.
+    """
+
+    def __init__(self, listenip, port, DCNetwork=None):
+        if DCNetwork :
+            self.connectDCNetwork(DCNetwork)
+        self.ip = listenip
+        self.port = port
+        logging.debug("Created monitoring API endpoint %s(%s:%d)" % (
+            self.__class__.__name__, self.ip, self.port))
+
+    def connectDCNetwork(self, net):
+        self.net = net
+        logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % (
+            self.__class__.__name__, self.ip, self.port))
+
+    def start(self):
+        thread = threading.Thread(target=self._api_server_thread, args=())
+        thread.daemon = True
+        thread.start()
+        logging.debug("Started API endpoint %s(%s:%d)" % (
+            self.__class__.__name__, self.ip, self.port))
+
+    def _api_server_thread(self):
+        s = zerorpc.Server(DCNetworkApi(self.net))
+        s.bind("tcp://%s:%d" % (self.ip, self.port))
+        s.run()
+
+    def stop(self):
+        logging.info("Stop the monitoring API endpoint")
+        return
+
+
+class DCNetworkApi(object):
+    """
+        The networking and monitoring commands need the scope of the
+        whole DC network to find the requested vnf. So this API is intended
+        to work with a DCNetwork.
+        Just pass through the corresponding request to the
+        selected data center network. Do not implement provisioning
+        logic here because will will have multiple API
+        endpoint implementations at the end.
+    """
+
+    def __init__(self, net):
+        self.net = net
+
+    def network_action_start(self, vnf_src_name, vnf_dst_name):
+        # call DCNetwork method, not really datacenter specific API for now...
+        # provided dc name needs to be part of API endpoint
+        # no check if vnfs are really connected to this datacenter...
+        logging.debug("RPC CALL: network chain start")
+        try:
+            c = self.net.setChain(
+                vnf_src_name, vnf_dst_name)
+            return str(c)
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    def network_action_stop(self, vnf_src_name, vnf_dst_name):
+        # call DCNetwork method, not really datacenter specific API for now...
+        # provided dc name needs to be part of API endpoint
+        # no check if vnfs are really connected to this datacenter...
+        logging.debug("RPC CALL: network chain stop")
+        try:
+            c = self.net.setChain(
+                vnf_src_name, vnf_dst_name, cmd='del-flows')
+            return c
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+    # get egress(default) or ingress rate of a vnf
+    def monitor_get_rate(self, vnf_name, direction):
+        logging.debug("RPC CALL: get rate")
+        try:
+            c = self.net.monitor_agent.get_rate(vnf_name, direction)
+            return c
+        except Exception as ex:
+            logging.exception("RPC error.")
+            return ex.message
+
+
diff --git a/src/emuvim/api/zerorpcapi.py b/src/emuvim/api/zerorpcapi.py
deleted file mode 100755 (executable)
index 59b960c..0000000
+++ /dev/null
@@ -1,128 +0,0 @@
-"""
-Distributed Cloud Emulator (dcemulator)
-(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
-"""
-
-import logging
-import threading
-import zerorpc
-
-logging.basicConfig(level=logging.INFO)
-
-
-class ZeroRpcApiEndpoint(object):
-    """
-    Simple API endpoint that offers a zerorpc-based
-    interface. This interface will be used by the
-    default command line client.
-    It can be used as a reference to implement
-    REST interfaces providing the same semantics,
-    like e.g. OpenStack compute API.
-    """
-
-    def __init__(self, listenip, port):
-        self.dcs = {}
-        self.ip = listenip
-        self.port = port
-        logging.debug("Created API endpoint %s(%s:%d)" % (
-            self.__class__.__name__, self.ip, self.port))
-
-    def connectDatacenter(self, dc):
-        self.dcs[dc.label] = dc
-        logging.info("Connected DC(%s) to API endpoint %s(%s:%d)" % (
-            dc.label, self.__class__.__name__, self.ip, self.port))
-
-    def start(self):
-        thread = threading.Thread(target=self._api_server_thread, args=())
-        thread.daemon = True
-        thread.start()
-        logging.debug("Started API endpoint %s(%s:%d)" % (
-            self.__class__.__name__, self.ip, self.port))
-
-    def _api_server_thread(self):
-        s = zerorpc.Server(MultiDatacenterApi(self.dcs))
-        s.bind("tcp://%s:%d" % (self.ip, self.port))
-        s.run()
-
-
-class MultiDatacenterApi(object):
-    """
-        Just pass through the corresponding request to the
-        selected data center. Do not implement provisioning
-        logic here because will will have multiple API
-        endpoint implementations at the end.
-    """
-
-    def __init__(self, dcs):
-        self.dcs = dcs
-
-    def compute_action_start(self, dc_label, compute_name, image, command, network):
-        """
-        Start a new compute instance: A docker container
-        :param dc_label: name of the DC
-        :param compute_name: compute container name
-        :param image: image name
-        :param command: command to execute
-        :param network:
-        :return: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
-        """
-        # TODO what to return UUID / given name / internal name ?
-        logging.debug("RPC CALL: compute start")
-        try:
-            c = self.dcs.get(dc_label).startCompute(
-                compute_name, image=image, command=command, network=network)
-            return str(c.name)
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
-
-    def compute_action_stop(self, dc_label, compute_name):
-        logging.debug("RPC CALL: compute stop")
-        try:
-            return self.dcs.get(dc_label).stopCompute(compute_name)
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
-
-    def compute_list(self, dc_label):
-        logging.debug("RPC CALL: compute list")
-        try:
-            if dc_label is None:
-                # return list with all compute nodes in all DCs
-                all_containers = []
-                for dc in self.dcs.itervalues():
-                    all_containers += dc.listCompute()
-                return [(c.name, c.getStatus())
-                        for c in all_containers]
-            else:
-                # return list of compute nodes for specified DC
-                return [(c.name, c.getStatus())
-                        for c in self.dcs.get(dc_label).listCompute()]
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
-
-    def compute_status(self, dc_label, compute_name):
-        logging.debug("RPC CALL: compute status")
-        try:
-            return self.dcs.get(
-                dc_label).containers.get(compute_name).getStatus()
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
-
-    def datacenter_list(self):
-        logging.debug("RPC CALL: datacenter list")
-        try:
-            return [d.getStatus() for d in self.dcs.itervalues()]
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
-
-    def datacenter_status(self, dc_label):
-        logging.debug("RPC CALL: datacenter status")
-        try:
-                return self.dcs.get(dc_label).getStatus()
-        except Exception as ex:
-            logging.exception("RPC error.")
-            return ex.message
diff --git a/src/emuvim/api/zerorpcapi_DCNetwork.py b/src/emuvim/api/zerorpcapi_DCNetwork.py
deleted file mode 100755 (executable)
index 27527aa..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-"""\r
-Distributed Cloud Emulator (dcemulator)\r
-(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>\r
-"""\r
-\r
-import logging\r
-import threading\r
-import zerorpc\r
-\r
-\r
-logging.basicConfig(level=logging.INFO)\r
-\r
-\r
-class ZeroRpcApiEndpointDCNetwork(object):\r
-    """\r
-    Simple API endpoint that offers a zerorpc-based\r
-    interface. This interface will be used by the\r
-    default command line client.\r
-    It can be used as a reference to implement\r
-    REST interfaces providing the same semantics,\r
-    like e.g. OpenStack compute API.\r
-    """\r
-\r
-    def __init__(self, listenip, port, DCNetwork=None):\r
-        if DCNetwork :\r
-            self.connectDCNetwork(DCNetwork)\r
-        self.ip = listenip\r
-        self.port = port\r
-        logging.debug("Created monitoring API endpoint %s(%s:%d)" % (\r
-            self.__class__.__name__, self.ip, self.port))\r
-\r
-    def connectDCNetwork(self, net):\r
-        self.net = net\r
-        logging.info("Connected DCNetwork to API endpoint %s(%s:%d)" % (\r
-            self.__class__.__name__, self.ip, self.port))\r
-\r
-    def start(self):\r
-        thread = threading.Thread(target=self._api_server_thread, args=())\r
-        thread.daemon = True\r
-        thread.start()\r
-        logging.debug("Started API endpoint %s(%s:%d)" % (\r
-            self.__class__.__name__, self.ip, self.port))\r
-\r
-    def _api_server_thread(self):\r
-        s = zerorpc.Server(DCNetworkApi(self.net))\r
-        s.bind("tcp://%s:%d" % (self.ip, self.port))\r
-        s.run()\r
-\r
-    def stop(self):\r
-        logging.info("Stop the monitoring API endpoint")\r
-        return\r
-\r
-\r
-class DCNetworkApi(object):\r
-    """\r
-        The networking and monitoring commands need the scope of the\r
-        whole DC network to find the requested vnf. So this API is intended\r
-        to work with a DCNetwork.\r
-        Just pass through the corresponding request to the\r
-        selected data center network. Do not implement provisioning\r
-        logic here because will will have multiple API\r
-        endpoint implementations at the end.\r
-    """\r
-\r
-    def __init__(self, net):\r
-        self.net = net\r
-\r
-    def network_action_start(self, vnf_src_name, vnf_dst_name):\r
-        # call DCNetwork method, not really datacenter specific API for now...\r
-        # provided dc name needs to be part of API endpoint\r
-        # no check if vnfs are really connected to this datacenter...\r
-        logging.debug("RPC CALL: network chain start")\r
-        try:\r
-            c = self.net.setChain(\r
-                vnf_src_name, vnf_dst_name)\r
-            return str(c)\r
-        except Exception as ex:\r
-            logging.exception("RPC error.")\r
-            return ex.message\r
-\r
-    def network_action_stop(self, vnf_src_name, vnf_dst_name):\r
-        # call DCNetwork method, not really datacenter specific API for now...\r
-        # provided dc name needs to be part of API endpoint\r
-        # no check if vnfs are really connected to this datacenter...\r
-        logging.debug("RPC CALL: network chain stop")\r
-        try:\r
-            c = self.net.setChain(\r
-                vnf_src_name, vnf_dst_name, cmd='del-flows')\r
-            return c\r
-        except Exception as ex:\r
-            logging.exception("RPC error.")\r
-            return ex.message\r
-\r
-    # get egress(default) or ingress rate of a vnf\r
-    def monitor_get_rate(self, vnf_name, direction):\r
-        logging.debug("RPC CALL: get rate")\r
-        try:\r
-            c = self.net.monitor_agent.get_rate(vnf_name, direction)\r
-            return c\r
-        except Exception as ex:\r
-            logging.exception("RPC error.")\r
-            return ex.message\r
-\r
-\r
index e01e950..00bb65c 100755 (executable)
@@ -12,7 +12,7 @@ from mininet.net import Dockernet
 from mininet.node import Controller, OVSSwitch, OVSKernelSwitch, Switch, Docker, Host, RemoteController
 from mininet.cli import CLI
 from mininet.log import setLogLevel, info, debug
-from mininet.link import TCLink, Link
+from mininet.link import TCLink
 import networkx as nx
 from emuvim.dcemulator.monitoring import DCNetworkMonitor
 
@@ -82,17 +82,22 @@ class DCNetwork(Dockernet):
             node2 = node2.switch
         # try to give containers a default IP
         if isinstance( node1, Docker ):
-            if not "params1" in params:
+            if "params1" not in params:
                 params["params1"] = {}
-            if not "ip" in params["params1"]:
+            if "ip" not in params["params1"]:
                 params["params1"]["ip"] = self.getNextIp()
         if isinstance( node2, Docker ):
-            if not "params2" in params:
+            if "params2" not in params:
                 params["params2"] = {}
-            if not "ip" in params["params2"]:
+            if "ip" not in params["params2"]:
                 params["params2"]["ip"] = self.getNextIp()
+        # ensure that we allow TCLinks between data centers
+        # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
+        # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
+        if "cls" not in params:
+            params["cls"] = TCLink
 
-        link = Dockernet.addLink(self, node1, node2, **params)  # TODO we need TCLinks with user defined performance here
+        link = Dockernet.addLink(self, node1, node2, **params)
 
         # add edge and assigned port number to graph in both directions between node1 and node2
         self.DCNetwork_graph.add_edge(node1.name, node2.name, \
index 336126c..6030153 100755 (executable)
@@ -3,6 +3,7 @@ Distributed Cloud Emulator (dcemulator)
 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
 """
 from mininet.node import Docker
+from mininet.link import Link
 import logging
 
 
@@ -79,6 +80,9 @@ class Datacenter(object):
         self.switch = None  # first prototype assumes one "bigswitch" per DC
         self.containers = {}  # keep track of running containers
 
+    def __repr__(self):
+        return self.label
+
     def _get_next_dc_dpid(self):
         global DCDPID_BASE
         DCDPID_BASE += 1
@@ -128,7 +132,8 @@ class Datacenter(object):
         d = self.net.addDocker("%s" % (name), dimage=image, dcmd=command)
         # connect all given networks
         for nw in network:
-            self.net.addLink(d, self.switch, params1=nw)
+            # TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3)
+            self.net.addLink(d, self.switch, params1=nw, cls=Link)
         # do bookkeeping
         self.containers[name] = d
         d.datacenter = self
index 8f14b69..eb52cb1 100755 (executable)
@@ -19,8 +19,8 @@ script.
 import logging
 from mininet.log import setLogLevel
 from emuvim.dcemulator.net import DCNetwork
-from emuvim.api.zerorpcapi import ZeroRpcApiEndpoint
-from emuvim.api.zerorpcapi_DCNetwork import ZeroRpcApiEndpointDCNetwork
+from emuvim.api.zerorpc.compute import ZeroRpcApiEndpoint
+from emuvim.api.zerorpc.network import ZeroRpcApiEndpointDCNetwork
 
 logging.basicConfig(level=logging.INFO)
 
index 1588190..7731fd2 100644 (file)
@@ -7,7 +7,8 @@ A simple topology with two PoPs for the y1 demo story board.
 import logging
 from mininet.log import setLogLevel
 from emuvim.dcemulator.net import DCNetwork
-from emuvim.api.zerorpcapi import ZeroRpcApiEndpoint
+from emuvim.api.zerorpc.compute import ZeroRpcApiEndpoint
+from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
 
 logging.basicConfig(level=logging.INFO)
 
@@ -18,18 +19,22 @@ def create_topology1():
     dc1 = net.addDatacenter("dc1")
     dc2 = net.addDatacenter("dc2")
     s1 = net.addSwitch("s1")
-    net.addLink(dc1, s1)
-    net.addLink(dc2, s1)
+    net.addLink(dc1, s1, delay="10ms")
+    net.addLink(dc2, s1, delay="20ms")
 
-    # create a new instance of a endpoint implementation
+    # add the command line interface endpoint to each DC
     zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
-    # connect data centers to this endpoint
     zapi1.connectDatacenter(dc1)
     zapi1.connectDatacenter(dc2)
     # run API endpoint server (in another thread, don't block)
     zapi1.start()
 
-    # TODO add "fake gatekeeper" api endpoint and connect it to both dcs
+    # add the SONATA dummy gatekeeper to each DC
+    sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 8000)
+    sdkg1.connectDatacenter(dc1)
+    sdkg1.connectDatacenter(dc2)
+    # run the dummy gatekeeper (in another thread, don't block)
+    sdkg1.start()
 
     # start the emulation platform
     net.start()
@@ -43,4 +48,4 @@ def main():
 
 
 if __name__ == '__main__':
-    main()
\ No newline at end of file
+    main()