Quickfix: Chaining in dummy GK does not work with example package
[osm/vim-emu.git] / src / emuvim / api / sonata / dummygatekeeper.py
old mode 100644 (file)
new mode 100755 (executable)
index 29ebc0b..8423a31
@@ -15,13 +15,23 @@ from docker import Client as DockerClient
 from flask import Flask, request
 import flask_restful as fr
 
+logging.basicConfig()
 LOG = logging.getLogger("sonata-dummy-gatekeeper")
 LOG.setLevel(logging.DEBUG)
 logging.getLogger("werkzeug").setLevel(logging.WARNING)
 
+GK_STORAGE = "/tmp/son-dummy-gk/"
+UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/")
+CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/")
 
-UPLOAD_FOLDER = "/tmp/son-dummy-gk/uploads/"
-CATALOG_FOLDER = "/tmp/son-dummy-gk/catalog/"
+# Enable Dockerfile build functionality
+BUILD_DOCKERFILE = False
+
+# flag to indicate that we run without the emulator (only the bare API for integration testing)
+GK_STANDALONE_MODE = False
+
+# should a new version of an image be pulled even if its available
+FORCE_PULL = True
 
 
 class Gatekeeper(object):
@@ -44,7 +54,7 @@ class Gatekeeper(object):
 
     def get_next_vnf_name(self):
         self.vnf_counter += 1
-        return "sonvnf%d" % self.vnf_counter
+        return "vnf%d" % self.vnf_counter
 
 
 class Service(object):
@@ -66,6 +76,7 @@ class Service(object):
         self.nsd = None
         self.vnfds = dict()
         self.local_docker_files = dict()
+        self.remote_docker_image_urls = dict()
         self.instances = dict()
 
     def onboard(self):
@@ -79,11 +90,13 @@ class Service(object):
         self._load_package_descriptor()
         self._load_nsd()
         self._load_vnfd()
-        self._load_docker_files()
         # 3. prepare container images (e.g. download or build Dockerfile)
-        self._build_images_from_dockerfiles()
-        self._download_predefined_dockerimages()
-
+        if BUILD_DOCKERFILE:
+            self._load_docker_files()
+            self._build_images_from_dockerfiles()
+        else:
+            self._load_docker_urls()
+            self._pull_predefined_dockerimages()
         LOG.info("On-boarded service: %r" % self.manifest.get("package_name"))
 
     def start_service(self):
@@ -95,40 +108,74 @@ class Service(object):
         :return:
         """
         LOG.info("Starting service %r" % self.uuid)
+
         # 1. each service instance gets a new uuid to identify it
         instance_uuid = str(uuid.uuid4())
         # build a instances dict (a bit like a NSR :))
         self.instances[instance_uuid] = dict()
         self.instances[instance_uuid]["vnf_instances"] = list()
+
         # 2. compute placement of this service instance (adds DC names to VNFDs)
-        self._calculate_placement(FirstDcPlacement)
+        if not GK_STANDALONE_MODE:
+            self._calculate_placement(FirstDcPlacement)
         # iterate over all vnfds that we have to start
         for vnfd in self.vnfds.itervalues():
-            # iterate over all deployment units within each VNFDs
-            for u in vnfd.get("virtual_deployment_units"):
-                # 3. get the name of the docker image to start and the assigned DC
-                docker_name = u.get("vm_image")
-                target_dc = vnfd.get("dc")
-                # 4. perform some checks to ensure we can start the container
-                assert(docker_name is not None)
-                assert(target_dc is not None)
-                if not self._check_docker_image_exists(docker_name):
-                    raise Exception("Docker image %r not found. Abort." % docker_name)
-                # 5. do the dc.startCompute(name="foobar") call to run the container
-                # TODO consider flavors, and other annotations
-                vnfi = target_dc.startCompute(GK.get_next_vnf_name(), image=docker_name, flavor_name="small")
-                # 6. store references to the compute objects in self.instances
-                self.instances[instance_uuid]["vnf_instances"].append(vnfi)
+            vnfi = None
+            if not GK_STANDALONE_MODE:
+                vnfi = self._start_vnfd(vnfd)
+            self.instances[instance_uuid]["vnf_instances"].append(vnfi)
+
+        # 3. Configure the chaining of the network functions (currently only E-Line links supported)
+        vlinks = self.nsd["virtual_links"]
+        fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
+        eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")]
+
+        for link in eline_fwd_links:
+            src_node, src_port = link["connection_points_reference"][0].split(":")
+            dst_node, dst_port = link["connection_points_reference"][1].split(":")
+
+            if src_node in self.vnfds:
+                network = self.vnfds[src_node].get("dc").net  # there should be a cleaner way to find the DCNetwork
+                network.setChain(src_node, dst_node, vnf_src_interface=src_port, vnf_dst_interface=dst_port)
+
         LOG.info("Service started. Instance id: %r" % instance_uuid)
         return instance_uuid
 
+    def _start_vnfd(self, vnfd):
+        """
+        Start a single VNFD of this service
+        :param vnfd: vnfd descriptor dict
+        :return:
+        """
+        # iterate over all deployment units within each VNFDs
+        for u in vnfd.get("virtual_deployment_units"):
+            # 1. get the name of the docker image to start and the assigned DC
+            vnf_name = vnfd.get("name")
+            if vnf_name not in self.remote_docker_image_urls:
+                raise Exception("No image name for %r found. Abort." % vnf_name)
+            docker_name = self.remote_docker_image_urls.get(vnf_name)
+            target_dc = vnfd.get("dc")
+            # 2. perform some checks to ensure we can start the container
+            assert(docker_name is not None)
+            assert(target_dc is not None)
+            if not self._check_docker_image_exists(docker_name):
+                raise Exception("Docker image %r not found. Abort." % docker_name)
+            # 3. do the dc.startCompute(name="foobar") call to run the container
+            # TODO consider flavors, and other annotations
+            intfs = vnfd.get("connection_points")
+            vnfi = target_dc.startCompute(GK.get_next_vnf_name(), network=intfs, image=docker_name, flavor_name="small")
+            # 6. store references to the compute objects in self.instances
+            return vnfi
+
     def _unpack_service_package(self):
         """
         unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
         """
+        LOG.info("Unzipping: %r" % self.package_file_path)
         with zipfile.ZipFile(self.package_file_path, "r") as z:
             z.extractall(self.package_content_path)
 
+
     def _load_package_descriptor(self):
         """
         Load the main package descriptor YAML and keep it as dict.
@@ -148,7 +195,7 @@ class Service(object):
                 self.package_content_path,
                 make_relative_path(self.manifest.get("entry_service_template")))
             self.nsd = load_yaml(nsd_path)
-            LOG.debug("Loaded NSD: %r" % self.nsd.get("ns_name"))
+            LOG.debug("Loaded NSD: %r" % self.nsd.get("name"))
 
     def _load_vnfd(self):
         """
@@ -162,28 +209,44 @@ class Service(object):
                         self.package_content_path,
                         make_relative_path(pc.get("name")))
                     vnfd = load_yaml(vnfd_path)
-                    self.vnfds[vnfd.get("vnf_name")] = vnfd
-                    LOG.debug("Loaded VNFD: %r" % vnfd.get("vnf_name"))
+                    self.vnfds[vnfd.get("name")] = vnfd
+                    LOG.debug("Loaded VNFD: %r" % vnfd.get("name"))
 
     def _load_docker_files(self):
         """
-        Get all paths to Dockerfiles from MANIFEST.MF and store them in dict.
+        Get all paths to Dockerfiles from VNFDs and store them in dict.
         :return:
         """
-        if "package_content" in self.manifest:
-            for df in self.manifest.get("package_content"):
-                if df.get("content-type") == "application/sonata.docker_files":
+        for k, v in self.vnfds.iteritems():
+            for vu in v.get("virtual_deployment_units"):
+                if vu.get("vm_image_format") == "docker":
+                    vm_image = vu.get("vm_image")
                     docker_path = os.path.join(
                         self.package_content_path,
-                        make_relative_path(df.get("name")))
-                    # FIXME: Mapping to docker image names is hardcoded because of the missing mapping in the example package
-                    self.local_docker_files[helper_map_docker_name(df.get("name"))] = docker_path
-                    LOG.debug("Found Dockerfile: %r" % docker_path)
+                        make_relative_path(vm_image))
+                    self.local_docker_files[k] = docker_path
+                    LOG.debug("Found Dockerfile (%r): %r" % (k, docker_path))
+
+    def _load_docker_urls(self):
+        """
+        Get all URLs to pre-build docker images in some repo.
+        :return:
+        """
+        for k, v in self.vnfds.iteritems():
+            for vu in v.get("virtual_deployment_units"):
+                if vu.get("vm_image_format") == "docker":
+                    url = vu.get("vm_image")
+                    if url is not None:
+                        url = url.replace("http://", "")
+                        self.remote_docker_image_urls[k] = url
+                        LOG.debug("Found Docker image URL (%r): %r" % (k, self.remote_docker_image_urls[k]))
 
     def _build_images_from_dockerfiles(self):
         """
         Build Docker images for each local Dockerfile found in the package: self.local_docker_files
         """
+        if GK_STANDALONE_MODE:
+            return  # do not build anything in standalone mode
         dc = DockerClient()
         LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files))
         for k, v in self.local_docker_files.iteritems():
@@ -191,12 +254,19 @@ class Service(object):
                 LOG.debug("DOCKER BUILD: %s" % line)
             LOG.info("Docker image created: %s" % k)
 
-    def _download_predefined_dockerimages(self):
+    def _pull_predefined_dockerimages(self):
         """
         If the package contains URLs to pre-build Docker images, we download them with this method.
         """
-        # TODO implement this if we want to be able to download docker images instead of building them
-        pass
+        dc = DockerClient()
+        for url in self.remote_docker_image_urls.itervalues():
+            if not FORCE_PULL:  # only pull if not present (speedup for development)
+                if len(dc.images(name=url)) > 0:
+                    LOG.debug("Image %r present. Skipping pull." % url)
+                    continue
+            LOG.info("Pulling image: %r" % url)
+            dc.pull(url,
+                    insecure_registry=True)
 
     def _check_docker_image_exists(self, image_name):
         """
@@ -253,7 +323,14 @@ class Packages(fr.Resource):
         """
         try:
             # get file contents
-            son_file = request.files['file']
+            print(request.files)
+            # lets search for the package in the request
+            if "package" in request.files:
+                son_file = request.files["package"]
+            # elif "file" in request.files:
+            #     son_file = request.files["file"]
+            else:
+                return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
             # generate a uuid to reference this package
             service_uuid = str(uuid.uuid4())
             file_hash = hashlib.sha1(str(son_file)).hexdigest()
@@ -270,7 +347,7 @@ class Packages(fr.Resource):
             return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}
         except Exception as ex:
             LOG.exception("Service package upload failed:")
-            return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}
+            return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
 
     def get(self):
         """
@@ -319,8 +396,8 @@ app = Flask(__name__)
 app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024  # 512 MB max upload
 api = fr.Api(app)
 # define endpoints
-api.add_resource(Packages, '/api/packages')
-api.add_resource(Instantiations, '/api/instantiations')
+api.add_resource(Packages, '/packages')
+api.add_resource(Instantiations, '/instantiations')
 
 
 def start_rest_api(host, port, datacenters=dict()):
@@ -349,28 +426,18 @@ def load_yaml(path):
 
 
 def make_relative_path(path):
+    if path.startswith("file://"):
+        path = path.replace("file://", "", 1)
     if path.startswith("/"):
-        return path.replace("/", "", 1)
+        path = path.replace("/", "", 1)
     return path
 
 
-def helper_map_docker_name(name):
-    """
-    Quick hack to fix missing dependency in example package.
-    """
-    # FIXME remove this when package description is fixed
-    mapping = {
-        "/docker_files/iperf/Dockerfile": "iperf_docker",
-        "/docker_files/firewall/Dockerfile": "fw_docker",
-        "/docker_files/tcpdump/Dockerfile": "tcpdump_docker"
-    }
-    return mapping.get(name)
-
-
 if __name__ == '__main__':
     """
     Lets allow to run the API in standalone mode.
     """
+    GK_STANDALONE_MODE = True
     logging.getLogger("werkzeug").setLevel(logging.INFO)
     start_rest_api("0.0.0.0", 8000)