Merge pull request #54 from mpeuster/master
authorpeusterm <manuel.peuster@uni-paderborn.de>
Mon, 14 Mar 2016 17:22:02 +0000 (18:22 +0100)
committerpeusterm <manuel.peuster@uni-paderborn.de>
Mon, 14 Mar 2016 17:22:02 +0000 (18:22 +0100)
SONATA dummy gatekeeper

.gitignore
setup.py
src/emuvim/api/sonata/README.md
src/emuvim/api/sonata/dummygatekeeper.py
src/emuvim/dcemulator/net.py
src/emuvim/test/base.py
src/emuvim/test/test_emulator.py

index fc2456c..7cd83d2 100755 (executable)
@@ -3,6 +3,8 @@ __pycache__/
 *.py[cod]
 *$py.class
 
+#IDE
+.idea/
 
 # C extensions
 *.so
@@ -66,4 +68,4 @@ target/
 .pytest.restart
 
 # JUnit xml
-utils/ci/junit-xml/*.xml
\ No newline at end of file
+utils/ci/junit-xml/*.xml
index cde045b..0634761 100644 (file)
--- a/setup.py
+++ b/setup.py
@@ -20,7 +20,8 @@ setup(name='emuvim',
           'ryu',
           'pytest',
           'Flask',
-          'flask_restful'
+          'flask_restful',
+          'docker-py'
       ],
       zip_safe=False,
       entry_points={
index 6ecb5de..d44ed27 100644 (file)
@@ -2,13 +2,13 @@
 
 ## Upload a package (*.son) file:
 
-To upload the file `simplest-example.son` do:
+To upload the file `sonata-demo.son` (from son-schema repo) do:
 
-* `curl -i -X POST -F file=@simplest-example.son http://127.0.0.1:8000/api/packages/uploads`
+* `curl -i -X POST -F file=@sonata-demo.son http://127.0.0.1:8000/api/packages`
 
 To list all uploaded packages do:
 
-* `curl http://127.0.0.1:8000/api/packages/uploads`
+* `curl http://127.0.0.1:8000/api/packages`
 
 To instantiate (start) a service do:
 
@@ -37,14 +37,14 @@ _Note: This API should converge to the API of the original GK as much as possibl
 <th>Response:</th>
 </tr>
 <tr>
-<td>/api/packages/uploads</td>
+<td>/api/packages</td>
 <td>POST</td>
 <td>-</td>
 <td>{file-content} as enctype=multipart/form-data</td>
 <td>{"service_uuid": "c880aaab-f3b9-43ac-ac6b-3d27b46146b7", size=456, sha1=49ee6468dfa4ecbad440d669b249d523a38651be, error: null}</td>
 </tr>
 <tr>
-<td>/api/packages/uploads</td>
+<td>/api/packages</td>
 <td>GET</td>
 <td>-</td>
 <td></td>
index f258e49..4241e79 100644 (file)
@@ -9,10 +9,14 @@ import logging
 import os
 import uuid
 import hashlib
-import json
+import zipfile
+import yaml
+from docker import Client as DockerClient
 from flask import Flask, request
 import flask_restful as fr
 
+LOG = logging.getLogger("sonata-dummy-gatekeeper")
+LOG.setLevel(logging.DEBUG)
 logging.getLogger("werkzeug").setLevel(logging.WARNING)
 
 
@@ -23,14 +27,40 @@ CATALOG_FOLDER = "/tmp/son-dummy-gk/catalog/"
 class Gatekeeper(object):
 
     def __init__(self):
-        self.packages = dict()
-        self.instantiations = dict()
-        logging.info("Create SONATA dummy gatekeeper.")
+        self.services = dict()
+        LOG.info("Create SONATA dummy gatekeeper.")
 
-    def unpack_service_package(self, service_uuid):
-        # TODO implement method
-        # 1. unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
-        pass
+    def register_service_package(self, service_uuid, service):
+        """
+        register new service package
+        :param service_uuid
+        :param service object
+        """
+        self.services[service_uuid] = service
+        # lets perform all steps needed to onboard the service
+        service.onboard()
+
+
+class Service(object):
+    """
+    This class represents a NS uploaded as a *.son package to the
+    dummy gatekeeper.
+    Can have multiple running instances of this service.
+    """
+
+    def __init__(self,
+                 service_uuid,
+                 package_file_hash,
+                 package_file_path):
+        self.uuid = service_uuid
+        self.package_file_hash = package_file_hash
+        self.package_file_path = package_file_path
+        self.package_content_path = os.path.join(CATALOG_FOLDER, "services/%s" % self.uuid)
+        self.manifest = None
+        self.nsd = None
+        self.vnfds = dict()
+        self.local_docker_files = dict()
+        self.instances = dict()
 
     def start_service(self, service_uuid):
         # TODO implement method
@@ -39,6 +69,100 @@ class Gatekeeper(object):
         # 3. store references to the compute objects in self.instantiations
         pass
 
+    def onboard(self):
+        """
+        Do all steps to prepare this service to be instantiated
+        :return:
+        """
+        # 1. extract the contents of the package and store them in our catalog
+        self._unpack_service_package()
+        # 2. read in all descriptor files
+        self._load_package_descriptor()
+        self._load_nsd()
+        self._load_vnfd()
+        self._load_docker_files()
+        # 3. prepare container images (e.g. download or build Dockerfile)
+        self._build_images_from_dockerfiles()
+        self._download_predefined_dockerimages()
+
+        LOG.info("On-boarded service: %r" % self.manifest.get("package_name"))
+
+    def _unpack_service_package(self):
+        """
+        unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
+        """
+        with zipfile.ZipFile(self.package_file_path, "r") as z:
+            z.extractall(self.package_content_path)
+
+    def _load_package_descriptor(self):
+        """
+        Load the main package descriptor YAML and keep it as dict.
+        :return:
+        """
+        self.manifest = load_yaml(
+            os.path.join(
+                self.package_content_path, "META-INF/MANIFEST.MF"))
+
+    def _load_nsd(self):
+        """
+        Load the entry NSD YAML and keep it as dict.
+        :return:
+        """
+        if "entry_service_template" in self.manifest:
+            nsd_path = os.path.join(
+                self.package_content_path,
+                make_relative_path(self.manifest.get("entry_service_template")))
+            self.nsd = load_yaml(nsd_path)
+            LOG.debug("Loaded NSD: %r" % self.nsd.get("ns_name"))
+
+    def _load_vnfd(self):
+        """
+        Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
+        :return:
+        """
+        if "package_content" in self.manifest:
+            for pc in self.manifest.get("package_content"):
+                if pc.get("content-type") == "application/sonata.function_descriptor":
+                    vnfd_path = os.path.join(
+                        self.package_content_path,
+                        make_relative_path(pc.get("name")))
+                    vnfd = load_yaml(vnfd_path)
+                    self.vnfds[vnfd.get("vnf_name")] = vnfd
+                    LOG.debug("Loaded VNFD: %r" % vnfd.get("vnf_name"))
+
+    def _load_docker_files(self):
+        """
+        Get all paths to Dockerfiles from MANIFEST.MF and store them in dict.
+        :return:
+        """
+        if "package_content" in self.manifest:
+            for df in self.manifest.get("package_content"):
+                if df.get("content-type") == "application/sonata.docker_files":
+                    docker_path = os.path.join(
+                        self.package_content_path,
+                        make_relative_path(df.get("name")))
+                    # FIXME: Mapping to docker image names is hardcoded because of the missing mapping in the example package
+                    self.local_docker_files[helper_map_docker_name(df.get("name"))] = docker_path
+                    LOG.debug("Found Dockerfile: %r" % docker_path)
+
+    def _build_images_from_dockerfiles(self):
+        """
+        Build Docker images for each local Dockerfile found in the package: self.local_docker_files
+        """
+        dc = DockerClient()
+        LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
+        for k, v in self.local_docker_files.iteritems():
+            for line in dc.build(path=v.replace("Dockerfile", ""), tag=k, rm=False, nocache=False):
+                LOG.debug("DOCKER BUILD: %s" % line)
+            LOG.info("Docker image created: %s" % k)
+
+    def _download_predefined_dockerimages(self):
+        """
+        If the package contains URLs to pre-build Docker images, we download them with this method.
+        """
+        # TODO implement
+        pass
+
 
 """
 Resource definitions and API endpoints
@@ -56,22 +180,23 @@ class Packages(fr.Resource):
         """
         try:
             # get file contents
-            file = request.files['file']
+            son_file = request.files['file']
             # generate a uuid to reference this package
             service_uuid = str(uuid.uuid4())
-            hash = hashlib.sha1(str(file)).hexdigest()
+            file_hash = hashlib.sha1(str(son_file)).hexdigest()
             # ensure that upload folder exists
             ensure_dir(UPLOAD_FOLDER)
             upload_path = os.path.join(UPLOAD_FOLDER, "%s.son" % service_uuid)
             # store *.son file to disk
-            file.save(upload_path)
+            son_file.save(upload_path)
             size = os.path.getsize(upload_path)
-            # store a reference to the uploaded package in our gatekeeper
-            GK.packages[service_uuid] = upload_path
+            # create a service object and register it
+            s = Service(service_uuid, file_hash, upload_path)
+            GK.register_service_package(service_uuid, s)
             # generate the JSON result
-            return {"service_uuid": service_uuid, "size": size, "sha1": hash, "error": None}
+            return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}
         except Exception as ex:
-            logging.exception("Service package upload failed:")
+            LOG.exception("Service package upload failed:")
             return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}
 
     def get(self):
@@ -79,7 +204,7 @@ class Packages(fr.Resource):
         Return a list of UUID's of uploaded service packages.
         :return: dict/list
         """
-        return {"service_uuid_list": list(GK.packages.iterkeys())}
+        return {"service_uuid_list": list(GK.services.iterkeys())}
 
 
 class Instantiations(fr.Resource):
@@ -95,8 +220,7 @@ class Instantiations(fr.Resource):
         service_uuid = json_data.get("service_uuid")
         if service_uuid is not None:
             service_instance_uuid = str(uuid.uuid4())
-            GK.instantiations[service_instance_uuid] = service_uuid
-            logging.info("Starting service %r" % service_uuid)
+            LOG.info("Starting service %r" % service_uuid)
             return {"service_instance_uuid": service_instance_uuid}
         return None
 
@@ -106,7 +230,8 @@ class Instantiations(fr.Resource):
         :return: dict / list
         """
         # TODO implement method
-        return {"service_instance_uuid_list": list(GK.instantiations.iterkeys())}
+        return {"service_instance_uuid_list": list()}
+
 
 # create a single, global GK object
 GK = Gatekeeper()
@@ -115,7 +240,7 @@ app = Flask(__name__)
 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024  # 512 MB max upload
 api = fr.Api(app)
 # define endpoints
-api.add_resource(Packages, '/api/packages/uploads')
+api.add_resource(Packages, '/api/packages')
 api.add_resource(Instantiations, '/api/instantiations')
 
 
@@ -130,7 +255,36 @@ def start_rest_api(host, port):
 
 def ensure_dir(name):
     if not os.path.exists(name):
-       os.makedirs(name)
+        os.makedirs(name)
+
+
+def load_yaml(path):
+    with open(path, "r") as f:
+        try:
+            r = yaml.load(f)
+        except yaml.YAMLError as exc:
+            LOG.exception("YAML parse error")
+            r = dict()
+    return r
+
+
+def make_relative_path(path):
+    if path.startswith("/"):
+        return path.replace("/", "", 1)
+    return path
+
+
+def helper_map_docker_name(name):
+    """
+    Quick hack to fix missing dependency in example package.
+    """
+    # TODO remove this when package description is fixed
+    mapping = {
+        "/docker_files/iperf/Dockerfile": "iperf_docker",
+        "/docker_files/firewall/Dockerfile": "fw_docker",
+        "/docker_files/tcpdump/Dockerfile": "tcpdump_docker"
+    }
+    return mapping.get(name)
 
 
 if __name__ == '__main__':
index da546ab..81d16ab 100755 (executable)
@@ -5,13 +5,13 @@ Distributed Cloud Emulator (dcemulator)
 import logging
 
 import site
+import time
 from subprocess import Popen
 import os
 
 from mininet.net import Dockernet
-from mininet.node import Controller, OVSSwitch, OVSKernelSwitch, Switch, Docker, Host, RemoteController
+from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
 from mininet.cli import CLI
-from mininet.log import setLogLevel, info, debug
 from mininet.link import TCLink
 import networkx as nx
 from emuvim.dcemulator.monitoring import DCNetworkMonitor
@@ -27,7 +27,7 @@ class DCNetwork(Dockernet):
     This class is used by topology definition scripts.
     """
 
-    def __init__(self, dc_emulation_max_cpu=1.0, **kwargs):
+    def __init__(self, controller=RemoteController, dc_emulation_max_cpu=1.0, **kwargs):
         """
         Create an extended version of a Dockernet network
         :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
@@ -38,20 +38,20 @@ class DCNetwork(Dockernet):
 
         # call original Docker.__init__ and setup default controller
         Dockernet.__init__(
-            self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
+            self, switch=OVSKernelSwitch, **kwargs)
 
-        # ass a remote controller to be able to use Ryu
-        self.addController('c0', controller=RemoteController)
+        # start Ryu controller
+        self.startRyu()
+
+        # add a remote controller to be able to use Ryu
+        self.addController('c0', controller=controller)
 
         # graph of the complete DC network
-        self.DCNetwork_graph=nx.DiGraph()
+        self.DCNetwork_graph = nx.DiGraph()
 
         # monitoring agent
         self.monitor_agent = DCNetworkMonitor(self)
 
-        # start Ryu controller
-        self.startRyu()
-
         # initialize resource model registrar
         self.rm_registrar = ResourceModelRegistrar(dc_emulation_max_cpu)
 
@@ -154,9 +154,8 @@ class DCNetwork(Dockernet):
 
     def stop(self):
         # stop Ryu controller
-        self.ryu_process.terminate()
-        #self.ryu_process.kill()
         Dockernet.stop(self)
+        self.stopRyu()
 
     def CLI(self):
         CLI(self)
@@ -215,5 +214,12 @@ class DCNetwork(Dockernet):
         ryu_option = '--ofp-tcp-listen-port'
         ryu_of_port = '6653'
         ryu_cmd =  'ryu-manager'
-        FNULL = open(os.devnull, 'w')
-        self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
\ No newline at end of file
+        FNULL = open("/tmp/ryu.log", 'w')
+        self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+        time.sleep(1)
+
+    def stopRyu(self):
+        if self.ryu_process:
+            self.ryu_process.terminate()
+            self.ryu_process.kill()
+
index f652259..13ace1b 100644 (file)
@@ -8,6 +8,7 @@ import subprocess
 import docker
 from emuvim.dcemulator.net import DCNetwork
 from mininet.clean import cleanup
+from mininet.node import Controller
 
 class SimpleTestTopology(unittest.TestCase):
     """
@@ -32,7 +33,7 @@ class SimpleTestTopology(unittest.TestCase):
         Creates a Mininet instance and automatically adds some
         nodes to it.
         """
-        self.net = net = DCNetwork()
+        self.net = DCNetwork()
 
         # add some switches
         for i in range(0, nswitches):
index 905b1c6..fb8714a 100755 (executable)
@@ -6,6 +6,7 @@ Python API.
 Does not test API endpoints. This is done in separated test suites.
 """
 
+import time
 import unittest
 from emuvim.dcemulator.node import EmulatorCompute
 from emuvim.test.base import SimpleTestTopology
@@ -39,6 +40,7 @@ class testEmulatorTopology( SimpleTestTopology ):
         # stop Mininet network
         self.stopNet()
 
+    #@unittest.skip("disabled to test if CI fails because this is the first test.")
     def testMultipleDatacenterDirect(self):
         """
         Create a two data centers and interconnect them.