X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=src%2Femuvim%2Fapi%2Fsonata%2Fdummygatekeeper.py;h=70fce59696add08f3e55a4d32b5b3cc0d0febf66;hb=59b28fc5279aa56b06bdae9a02a395c208909327;hp=939070ce9a0ade5429fca08b8fa19dfd013d84c4;hpb=5373ea6bc34b414e74168e48e5fa823d3893ce78;p=osm%2Fvim-emu.git diff --git a/src/emuvim/api/sonata/dummygatekeeper.py b/src/emuvim/api/sonata/dummygatekeeper.py index 939070c..70fce59 100755 --- a/src/emuvim/api/sonata/dummygatekeeper.py +++ b/src/emuvim/api/sonata/dummygatekeeper.py @@ -38,10 +38,12 @@ import uuid import hashlib import zipfile import yaml +import threading from docker import Client as DockerClient from flask import Flask, request import flask_restful as fr from collections import defaultdict +import pkg_resources logging.basicConfig() LOG = logging.getLogger("sonata-dummy-gatekeeper") @@ -61,6 +63,13 @@ GK_STANDALONE_MODE = False # should a new version of an image be pulled even if its available FORCE_PULL = False +# Automatically deploy SAPs (endpoints) of the service as new containers +# Attention: This is not a configuration switch but a global variable! Don't change its default value. +DEPLOY_SAP = False + +# flag to indicate if we use bidirectional forwarding rules in the automatic chaining process +BIDIRECTIONAL_CHAIN = False + class Gatekeeper(object): def __init__(self): @@ -106,11 +115,11 @@ class Service(object): self.remote_docker_image_urls = dict() self.instances = dict() self.vnf_name2docker_name = dict() + self.sap_identifiers = set() # lets generate a set of subnet configurations used for e-line chaining setup self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1) self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2) - def onboard(self): """ Do all steps to prepare this service to be instantiated @@ -122,6 +131,8 @@ class Service(object): self._load_package_descriptor() self._load_nsd() self._load_vnfd() + if DEPLOY_SAP: + self._load_saps() # 3. prepare container images (e.g. download or build Dockerfile) if BUILD_DOCKERFILE: self._load_docker_files() @@ -147,9 +158,16 @@ class Service(object): self.instances[instance_uuid] = dict() self.instances[instance_uuid]["vnf_instances"] = list() - # 2. compute placement of this service instance (adds DC names to VNFDs) + # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported) + vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode", + reduce(lambda x, y: dict(x, **y), + map(lambda d: {d["vnf_id"]: d["vnf_name"]}, + self.nsd["network_functions"]))) + + # 3. compute placement of this service instance (adds DC names to VNFDs) if not GK_STANDALONE_MODE: - self._calculate_placement(FirstDcPlacement) + #self._calculate_placement(FirstDcPlacement) + self._calculate_placement(RoundRobinDcPlacement) # iterate over all vnfds that we have to start for vnfd in self.vnfds.itervalues(): vnfi = None @@ -157,18 +175,12 @@ class Service(object): vnfi = self._start_vnfd(vnfd) self.instances[instance_uuid]["vnf_instances"].append(vnfi) - # 3. Configure the chaining of the network functions (currently only E-Line links supported) - vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode", - reduce(lambda x, y: dict(x, **y), - map(lambda d: {d["vnf_id"]: d["vnf_name"]}, - self.nsd["network_functions"]))) - vlinks = self.nsd["virtual_links"] fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"] eline_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-Line")] elan_fwd_links = [l for l in vlinks if (l["id"] in fwd_links) and (l["connectivity_type"] == "E-LAN")] - # 3a. deploy E-Line links + # 4a. deploy E-Line links # cookie is used as identifier for the flowrules installed by the dummygatekeeper # eg. different services get a unique cookie for their flowrules cookie = 1 @@ -176,6 +188,19 @@ class Service(object): src_id, src_if_name = link["connection_points_reference"][0].split(":") dst_id, dst_if_name = link["connection_points_reference"][1].split(":") + # check if there is a SAP in the link + if src_id in self.sap_identifiers: + src_docker_name = "{0}_{1}".format(src_id, src_if_name) + src_id = src_docker_name + else: + src_docker_name = src_id + + if dst_id in self.sap_identifiers: + dst_docker_name = "{0}_{1}".format(dst_id, dst_if_name) + dst_id = dst_docker_name + else: + dst_docker_name = dst_id + src_name = vnf_id2vnf_name[src_id] dst_name = vnf_id2vnf_name[dst_id] @@ -185,13 +210,11 @@ class Service(object): if (src_name in self.vnfds) and (dst_name in self.vnfds): network = self.vnfds[src_name].get("dc").net # there should be a cleaner way to find the DCNetwork - src_docker_name = self.vnf_name2docker_name[src_name] - dst_docker_name = self.vnf_name2docker_name[dst_name] LOG.debug(src_docker_name) ret = network.setChain( src_docker_name, dst_docker_name, vnf_src_interface=src_if_name, vnf_dst_interface=dst_if_name, - bidirectional=True, cmd="add-flow", cookie=cookie, priority=10) + bidirectional=BIDIRECTIONAL_CHAIN, cmd="add-flow", cookie=cookie, priority=10) # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link src_vnfi = self._get_vnf_instance(instance_uuid, src_name) @@ -201,7 +224,7 @@ class Service(object): if dst_vnfi is not None: self._vnf_reconfigure_network(dst_vnfi, dst_if_name, self.eline_subnets_dst.pop(0)) - # 3b. deploy E-LAN links + # 4b. deploy E-LAN links base = 10 for link in elan_fwd_links: # generate lan ip address @@ -209,6 +232,9 @@ class Service(object): for intf in link["connection_points_reference"]: ip_address = generate_lan_string("10.0", base, subnet_size=24, ip=ip) vnf_id, intf_name = intf.split(":") + if vnf_id in self.sap_identifiers: + src_docker_name = "{0}_{1}".format(vnf_id, intf_name) + vnf_id = src_docker_name vnf_name = vnf_id2vnf_name[vnf_id] LOG.debug( "Setting up E-LAN link. %s(%s:%s) -> %s" % ( @@ -216,8 +242,8 @@ class Service(object): if vnf_name in self.vnfds: # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN - # E-LAN relies on the learning switch capability of the infrastructure switch in dockernet, - # so no explicit chaining is necessary + # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology + # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary. vnfi = self._get_vnf_instance(instance_uuid, vnf_name) if vnfi is not None: self._vnf_reconfigure_network(vnfi, intf_name, ip_address) @@ -226,14 +252,36 @@ class Service(object): # increase the base ip address for the next E-LAN base += 1 - - - # 4. run the emulator specific entrypoint scripts in the VNFIs of this service instance + # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance self._trigger_emulator_start_scripts_in_vnfis(self.instances[instance_uuid]["vnf_instances"]) LOG.info("Service started. Instance id: %r" % instance_uuid) return instance_uuid + def stop_service(self, instance_uuid): + """ + This method stops a running service instance. + It iterates over all VNF instances, stopping them each + and removing them from their data center. + + :param instance_uuid: the uuid of the service instance to be stopped + """ + LOG.info("Stopping service %r" % self.uuid) + # get relevant information + # instance_uuid = str(self.uuid.uuid4()) + vnf_instances = self.instances[instance_uuid]["vnf_instances"] + + for v in vnf_instances: + self._stop_vnfi(v) + + if not GK_STANDALONE_MODE: + # remove placement? + # self._remove_placement(RoundRobinPlacement) + None + + # last step: remove the instance from the list of all instances + del self.instances[instance_uuid] + def _start_vnfd(self, vnfd): """ Start a single VNFD of this service @@ -257,6 +305,7 @@ class Service(object): # TODO consider flavors, and other annotations intfs = vnfd.get("connection_points") + # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername # use the vnf_id in the nsd as docker name # so deployed containers can be easily mapped back to the nsd vnf_name2id = defaultdict(lambda: "NotExistingNode", @@ -271,6 +320,19 @@ class Service(object): vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small") return vnfi + def _stop_vnfi(self, vnfi): + """ + Stop a VNF instance. + + :param vnfi: vnf instance to be stopped + """ + # Find the correct datacenter + status = vnfi.getStatus() + dc = vnfi.datacenter + # stop the vnfi + LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc)) + dc.stopCompute(status["name"]) + def _get_vnf_instance(self, instance_uuid, name): """ Returns the Docker object for the given VNF name (or Docker name). @@ -312,8 +374,11 @@ class Service(object): for env_var in env: if "SON_EMU_CMD=" in env_var: cmd = str(env_var.split("=")[1]) - LOG.info("Executing entrypoint script in %r: %r" % (vnfi.name, cmd)) - vnfi.cmdPrint(cmd) + LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd)) + # execute command in new thread to ensure that GK is not blocked by VNF + t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,)) + t.daemon = True + t.start() def _unpack_service_package(self): """ @@ -360,6 +425,28 @@ class Service(object): self.vnfds[vnfd.get("name")] = vnfd LOG.debug("Loaded VNFD: %r" % vnfd.get("name")) + def _load_saps(self): + # Each Service Access Point (connection_point) in the nsd is getting its own container + SAPs = [p["id"] for p in self.nsd["connection_points"] if p["type"] == "interface"] + for sap in SAPs: + # endpoints needed in this service + sap_vnf_id, sap_vnf_interface = sap.split(':') + # set of the connection_point ids found in the nsd (in the examples this is 'ns') + self.sap_identifiers.add(sap_vnf_id) + + sap_docker_name = "%s_%s" % (sap_vnf_id, sap_vnf_interface) + + # add SAP to self.vnfds + sapfile = pkg_resources.resource_filename(__name__, "sap_vnfd.yml") + sap_vnfd = load_yaml(sapfile) + sap_vnfd["connection_points"][0]["id"] = sap_vnf_interface + sap_vnfd["name"] = sap_docker_name + self.vnfds[sap_docker_name] = sap_vnfd + # add SAP vnf to list in the NSD so it is deployed later on + # each SAP get a unique VNFD and vnf_id in the NSD + self.nsd["network_functions"].append({"vnf_id": sap_docker_name, "vnf_name": sap_docker_name}) + LOG.debug("Loaded SAP: %r" % sap_vnfd.get("name")) + def _load_docker_files(self): """ Get all paths to Dockerfiles from VNFDs and store them in dict. @@ -455,6 +542,20 @@ class FirstDcPlacement(object): vnfd["dc"] = list(dcs.itervalues())[0] +class RoundRobinDcPlacement(object): + """ + Placement: Distribute VNFs across all available DCs in a round robin fashion. + """ + def place(self, nsd, vnfds, dcs): + c = 0 + dcs_list = list(dcs.itervalues()) + for name, vnfd in vnfds.iteritems(): + vnfd["dc"] = dcs_list[c % len(dcs_list)] + c += 1 # inc. c to use next DC + + + + """ Resource definitions and API endpoints """ @@ -492,7 +593,7 @@ class Packages(fr.Resource): s = Service(service_uuid, file_hash, upload_path) GK.register_service_package(service_uuid, s) # generate the JSON result - return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None} + return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201 except Exception as ex: LOG.exception("Service package upload failed:") return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500 @@ -526,7 +627,7 @@ class Instantiations(fr.Resource): if service_uuid in GK.services: # ok, we have a service uuid, lets start the service service_instance_uuid = GK.services.get(service_uuid).start_service() - return {"service_instance_uuid": service_instance_uuid} + return {"service_instance_uuid": service_instance_uuid}, 201 return "Service not found", 404 def get(self): @@ -538,9 +639,47 @@ class Instantiations(fr.Resource): return {"service_instantiations_list": [ list(s.instances.iterkeys()) for s in GK.services.itervalues()]} + def delete(self): + """ + Stops a running service specified by its service and instance UUID. + """ + # try to extract the service and instance UUID from the request + json_data = request.get_json(force=True) + service_uuid = json_data.get("service_uuid") + instance_uuid = json_data.get("service_instance_uuid") + + # try to be fuzzy + if service_uuid is None and len(GK.services) > 0: + #if we don't get a service uuid, we simply stop the last service in the list + service_uuid = list(GK.services.iterkeys())[0] + if instance_uuid is None and len(GK.services[service_uuid].instances) > 0: + instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0] + + if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances: + # valid service and instance UUID, stop service + GK.services.get(service_uuid).stop_service(instance_uuid) + del GK.services.get(service_uuid).instances[instance_uuid] + return + return "Service not found", 404 + +class Exit(fr.Resource): + + def put(self): + """ + Stop the running Containernet instance regardless of data transmitted + """ + GK.net.stop() + + +def initialize_GK(): + global GK + GK = Gatekeeper() + + # create a single, global GK object -GK = Gatekeeper() +GK = None +initialize_GK() # setup Flask app = Flask(__name__) app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload @@ -548,6 +687,12 @@ api = fr.Api(app) # define endpoints api.add_resource(Packages, '/packages') api.add_resource(Instantiations, '/instantiations') +api.add_resource(Exit, '/emulator/exit') + + +#def initialize_GK(): +# global GK +# GK = Gatekeeper() def start_rest_api(host, port, datacenters=dict()):