X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=src%2Femuvim%2Fapi%2Fsonata%2Fdummygatekeeper.py;h=c69fd9a2333ebd204e05316e2f57153446a73cd9;hb=b8d9ecb3e6fc0c477794259b96f76a2ed7049cd5;hp=4241e799e100832543babe5cd7a52c1eb3db7014;hpb=fd692374280e8e0a936fa5064582f79a47b0560a;p=osm%2Fvim-emu.git diff --git a/src/emuvim/api/sonata/dummygatekeeper.py b/src/emuvim/api/sonata/dummygatekeeper.py index 4241e79..c69fd9a 100644 --- a/src/emuvim/api/sonata/dummygatekeeper.py +++ b/src/emuvim/api/sonata/dummygatekeeper.py @@ -15,19 +15,25 @@ from docker import Client as DockerClient from flask import Flask, request import flask_restful as fr +logging.basicConfig() LOG = logging.getLogger("sonata-dummy-gatekeeper") LOG.setLevel(logging.DEBUG) logging.getLogger("werkzeug").setLevel(logging.WARNING) +GK_STORAGE = "/tmp/son-dummy-gk/" +UPLOAD_FOLDER = os.path.join(GK_STORAGE, "uploads/") +CATALOG_FOLDER = os.path.join(GK_STORAGE, "catalog/") -UPLOAD_FOLDER = "/tmp/son-dummy-gk/uploads/" -CATALOG_FOLDER = "/tmp/son-dummy-gk/catalog/" +# flag to indicate that we run without the emulator (only the bare API for integration testing) +GK_STANDALONE_MODE = False class Gatekeeper(object): def __init__(self): self.services = dict() + self.dcs = dict() + self.vnf_counter = 0 # used to generate short names for VNFs (Mininet limitation) LOG.info("Create SONATA dummy gatekeeper.") def register_service_package(self, service_uuid, service): @@ -40,6 +46,10 @@ class Gatekeeper(object): # lets perform all steps needed to onboard the service service.onboard() + def get_next_vnf_name(self): + self.vnf_counter += 1 + return "vnf%d" % self.vnf_counter + class Service(object): """ @@ -62,13 +72,6 @@ class Service(object): self.local_docker_files = dict() self.instances = dict() - def start_service(self, service_uuid): - # TODO implement method - # 1. parse descriptors - # 2. do the corresponding dc.startCompute(name="foobar") calls - # 3. store references to the compute objects in self.instantiations - pass - def onboard(self): """ Do all steps to prepare this service to be instantiated @@ -87,6 +90,54 @@ class Service(object): LOG.info("On-boarded service: %r" % self.manifest.get("package_name")) + def start_service(self): + """ + This methods creates and starts a new service instance. + It computes placements, iterates over all VNFDs, and starts + each VNFD as a Docker container in the data center selected + by the placement algorithm. + :return: + """ + LOG.info("Starting service %r" % self.uuid) + # 1. each service instance gets a new uuid to identify it + instance_uuid = str(uuid.uuid4()) + # build a instances dict (a bit like a NSR :)) + self.instances[instance_uuid] = dict() + self.instances[instance_uuid]["vnf_instances"] = list() + # 2. compute placement of this service instance (adds DC names to VNFDs) + if not GK_STANDALONE_MODE: + self._calculate_placement(FirstDcPlacement) + # iterate over all vnfds that we have to start + for vnfd in self.vnfds.itervalues(): + vnfi = None + if not GK_STANDALONE_MODE: + vnfi = self._start_vnfd(vnfd) + self.instances[instance_uuid]["vnf_instances"].append(vnfi) + LOG.info("Service started. Instance id: %r" % instance_uuid) + return instance_uuid + + def _start_vnfd(self, vnfd): + """ + Start a single VNFD of this service + :param vnfd: vnfd descriptor dict + :return: + """ + # iterate over all deployment units within each VNFDs + for u in vnfd.get("virtual_deployment_units"): + # 1. get the name of the docker image to start and the assigned DC + docker_name = vnfd.get("vnf_name") + target_dc = vnfd.get("dc") + # 2. perform some checks to ensure we can start the container + assert(docker_name is not None) + assert(target_dc is not None) + if not self._check_docker_image_exists(docker_name): + raise Exception("Docker image %r not found. Abort." % docker_name) + # 3. do the dc.startCompute(name="foobar") call to run the container + # TODO consider flavors, and other annotations + vnfi = target_dc.startCompute(GK.get_next_vnf_name(), image=docker_name, flavor_name="small") + # 6. store references to the compute objects in self.instances + return vnfi + def _unpack_service_package(self): """ unzip *.son file and store contents in CATALOG_FOLDER/services// @@ -132,23 +183,25 @@ class Service(object): def _load_docker_files(self): """ - Get all paths to Dockerfiles from MANIFEST.MF and store them in dict. + Get all paths to Dockerfiles from VNFDs and store them in dict. :return: """ - if "package_content" in self.manifest: - for df in self.manifest.get("package_content"): - if df.get("content-type") == "application/sonata.docker_files": + for k, v in self.vnfds.iteritems(): + for vu in v.get("virtual_deployment_units"): + if vu.get("vm_image_format") == "docker": + vm_image = vu.get("vm_image") docker_path = os.path.join( self.package_content_path, - make_relative_path(df.get("name"))) - # FIXME: Mapping to docker image names is hardcoded because of the missing mapping in the example package - self.local_docker_files[helper_map_docker_name(df.get("name"))] = docker_path + make_relative_path(vm_image)) + self.local_docker_files[k] = docker_path LOG.debug("Found Dockerfile: %r" % docker_path) def _build_images_from_dockerfiles(self): """ Build Docker images for each local Dockerfile found in the package: self.local_docker_files """ + if GK_STANDALONE_MODE: + return # do not build anything in standalone mode dc = DockerClient() LOG.info("Building %d Docker images (this may take several minutes) ..." % len(self.local_docker_files)) for k, v in self.local_docker_files.iteritems(): @@ -160,9 +213,47 @@ class Service(object): """ If the package contains URLs to pre-build Docker images, we download them with this method. """ - # TODO implement + # TODO implement this if we want to be able to download docker images instead of building them pass + def _check_docker_image_exists(self, image_name): + """ + Query the docker service and check if the given image exists + :param image_name: name of the docker image + :return: + """ + return len(DockerClient().images(image_name)) > 0 + + def _calculate_placement(self, algorithm): + """ + Do placement by adding the a field "dc" to + each VNFD that points to one of our + data center objects known to the gatekeeper. + """ + assert(len(self.vnfds) > 0) + assert(len(GK.dcs) > 0) + # instantiate algorithm an place + p = algorithm() + p.place(self.nsd, self.vnfds, GK.dcs) + LOG.info("Using placement algorithm: %r" % p.__class__.__name__) + # lets print the placement result + for name, vnfd in self.vnfds.iteritems(): + LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc")))) + + +""" +Some (simple) placement algorithms +""" + + +class FirstDcPlacement(object): + """ + Placement: Always use one and the same data center from the GK.dcs dict. + """ + def place(self, nsd, vnfds, dcs): + for name, vnfd in vnfds.iteritems(): + vnfd["dc"] = list(dcs.itervalues())[0] + """ Resource definitions and API endpoints @@ -180,6 +271,7 @@ class Packages(fr.Resource): """ try: # get file contents + print(request.files) son_file = request.files['file'] # generate a uuid to reference this package service_uuid = str(uuid.uuid4()) @@ -215,22 +307,28 @@ class Instantiations(fr.Resource): Will return a new UUID to identify the running service instance. :return: UUID """ - # TODO implement method (start real service) + # try to extract the service uuid from the request json_data = request.get_json(force=True) service_uuid = json_data.get("service_uuid") - if service_uuid is not None: - service_instance_uuid = str(uuid.uuid4()) - LOG.info("Starting service %r" % service_uuid) + + # lets be a bit fuzzy here to make testing easier + if service_uuid is None and len(GK.services) > 0: + # if we don't get a service uuid, we simple start the first service in the list + service_uuid = list(GK.services.iterkeys())[0] + + if service_uuid in GK.services: + # ok, we have a service uuid, lets start the service + service_instance_uuid = GK.services.get(service_uuid).start_service() return {"service_instance_uuid": service_instance_uuid} - return None + return "Service not found", 404 def get(self): """ Returns a list of UUIDs containing all running services. :return: dict / list """ - # TODO implement method - return {"service_instance_uuid_list": list()} + return {"service_instance_list": [ + list(s.instances.iterkeys()) for s in GK.services.itervalues()]} # create a single, global GK object @@ -244,7 +342,8 @@ api.add_resource(Packages, '/api/packages') api.add_resource(Instantiations, '/api/instantiations') -def start_rest_api(host, port): +def start_rest_api(host, port, datacenters=dict()): + GK.dcs = datacenters # start the Flask server (not the best performance but ok for our use case) app.run(host=host, port=port, @@ -269,28 +368,18 @@ def load_yaml(path): def make_relative_path(path): + if path.startswith("file://"): + path = path.replace("file://", "", 1) if path.startswith("/"): - return path.replace("/", "", 1) + path = path.replace("/", "", 1) return path -def helper_map_docker_name(name): - """ - Quick hack to fix missing dependency in example package. - """ - # TODO remove this when package description is fixed - mapping = { - "/docker_files/iperf/Dockerfile": "iperf_docker", - "/docker_files/firewall/Dockerfile": "fw_docker", - "/docker_files/tcpdump/Dockerfile": "tcpdump_docker" - } - return mapping.get(name) - - if __name__ == '__main__': """ Lets allow to run the API in standalone mode. """ + GK_STANDALONE_MODE = True logging.getLogger("werkzeug").setLevel(logging.INFO) start_rest_api("0.0.0.0", 8000)