X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;ds=sidebyside;f=src%2Femuvim%2Fapi%2Fsonata%2Fdummygatekeeper.py;h=ca00176df40f2f726ec019cf18f0d2265b455995;hb=7e084ea1579fe004470577274b20df66f5122ca0;hp=294ffed1b6780527c11c6f5ecddb3df8fd6de224;hpb=76eb865d051eace78cd55ff56e5ef4caf6f8d93e;p=osm%2Fvim-emu.git diff --git a/src/emuvim/api/sonata/dummygatekeeper.py b/src/emuvim/api/sonata/dummygatekeeper.py index 294ffed..ca00176 100755 --- a/src/emuvim/api/sonata/dummygatekeeper.py +++ b/src/emuvim/api/sonata/dummygatekeeper.py @@ -120,7 +120,6 @@ class Service(object): self.eline_subnets_src = generate_subnet_strings(50, start=200, subnet_size=24, ip=1) self.eline_subnets_dst = generate_subnet_strings(50, start=200, subnet_size=24, ip=2) - def onboard(self): """ Do all steps to prepare this service to be instantiated @@ -259,6 +258,30 @@ class Service(object): LOG.info("Service started. Instance id: %r" % instance_uuid) return instance_uuid + def stop_service(self, instance_uuid): + """ + This method stops a running service instance. + It iterates over all VNF instances, stopping them each + and removing them from their data center. + + :param instance_uuid: the uuid of the service instance to be stopped + """ + LOG.info("Stopping service %r" % self.uuid) + # get relevant information + # instance_uuid = str(self.uuid.uuid4()) + vnf_instances = self.instances[instance_uuid]["vnf_instances"] + + for v in vnf_instances: + self._stop_vnfi(v) + + if not GK_STANDALONE_MODE: + # remove placement? + # self._remove_placement(RoundRobinPlacement) + None + + # last step: remove the instance from the list of all instances + del self.instances[instance_uuid] + def _start_vnfd(self, vnfd): """ Start a single VNFD of this service @@ -278,7 +301,32 @@ class Service(object): assert(target_dc is not None) if not self._check_docker_image_exists(docker_name): raise Exception("Docker image %r not found. Abort." % docker_name) - # 3. do the dc.startCompute(name="foobar") call to run the container + + # 3. get the resource limits + res_req = u.get("resource_requirements") + cpu_list = res_req.get("cpu").get("cores") + if not cpu_list or len(cpu_list)==0: + cpu_list="1" + cpu_bw = res_req.get("cpu").get("cpu_bw") + if not cpu_bw: + cpu_bw=1 + mem_num = str(res_req.get("memory").get("size")) + if len(mem_num)==0: + mem_num="2" + mem_unit = str(res_req.get("memory").get("size_unit")) + if str(mem_unit)==0: + mem_unit="GB" + mem_limit = float(mem_num) + if mem_unit=="GB": + mem_limit=mem_limit*1024*1024*1024 + elif mem_unit=="MB": + mem_limit=mem_limit*1024*1024 + elif mem_unit=="KB": + mem_limit=mem_limit*1024 + mem_lim = int(mem_limit) + cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw)) + + # 4. do the dc.startCompute(name="foobar") call to run the container # TODO consider flavors, and other annotations intfs = vnfd.get("connection_points") @@ -294,9 +342,23 @@ class Service(object): LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc"))) LOG.debug("Interfaces for %r: %r" % (vnf_name, intfs)) - vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small") + vnfi = target_dc.startCompute(self.vnf_name2docker_name[vnf_name], network=intfs, image=docker_name, flavor_name="small", \ + cpu_quota=cpu_quota, cpu_period=cpu_period, cpuset=cpu_list, mem_limit=mem_lim) return vnfi + def _stop_vnfi(self, vnfi): + """ + Stop a VNF instance. + + :param vnfi: vnf instance to be stopped + """ + # Find the correct datacenter + status = vnfi.getStatus() + dc = vnfi.datacenter + # stop the vnfi + LOG.info("Stopping the vnf instance contained in %r in DC %r" % (status["name"], dc)) + dc.stopCompute(status["name"]) + def _get_vnf_instance(self, instance_uuid, name): """ Returns the Docker object for the given VNF name (or Docker name). @@ -336,8 +398,9 @@ class Service(object): config = vnfi.dcinfo.get("Config", dict()) env = config.get("Env", list()) for env_var in env: - if "SON_EMU_CMD=" in env_var: - cmd = str(env_var.split("=")[1]) + var, cmd = map(str.strip, map(str, env_var.split('=', 1))) + LOG.debug("%r = %r" % (var , cmd)) + if var=="SON_EMU_CMD": LOG.info("Executing entry point script in %r: %r" % (vnfi.name, cmd)) # execute command in new thread to ensure that GK is not blocked by VNF t = threading.Thread(target=vnfi.cmdPrint, args=(cmd,)) @@ -491,6 +554,29 @@ class Service(object): for name, vnfd in self.vnfds.iteritems(): LOG.info("Placed VNF %r on DC %r" % (name, str(vnfd.get("dc")))) + def _calculate_cpu_cfs_values(self, cpu_time_percentage): + """ + Calculate cpu period and quota for CFS + :param cpu_time_percentage: percentage of overall CPU to be used + :return: cpu_period, cpu_quota + """ + if cpu_time_percentage is None: + return -1, -1 + if cpu_time_percentage < 0: + return -1, -1 + # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt) + # Attention minimum cpu_quota is 1ms (micro) + cpu_period = 1000000 # lets consider a fixed period of 1000000 microseconds for now + LOG.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period, cpu_time_percentage)) + cpu_quota = cpu_period * cpu_time_percentage # calculate the fraction of cpu time for this container + # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why + if cpu_quota < 1000: + LOG.debug("cpu_quota before correcting: %r" % cpu_quota) + cpu_quota = 1000 + LOG.warning("Increased CPU quota to avoid system error.") + LOG.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period, cpu_quota)) + return int(cpu_period), int(cpu_quota) + """ Some (simple) placement algorithms @@ -512,7 +598,7 @@ class RoundRobinDcPlacement(object): """ def place(self, nsd, vnfds, dcs): c = 0 - dcs_list = list(dcs.itervalues()) + dcs_list = list(dcs.itervalues()) for name, vnfd in vnfds.iteritems(): vnfd["dc"] = dcs_list[c % len(dcs_list)] c += 1 # inc. c to use next DC @@ -557,7 +643,7 @@ class Packages(fr.Resource): s = Service(service_uuid, file_hash, upload_path) GK.register_service_package(service_uuid, s) # generate the JSON result - return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None} + return {"service_uuid": service_uuid, "size": size, "sha1": file_hash, "error": None}, 201 except Exception as ex: LOG.exception("Service package upload failed:") return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500 @@ -591,7 +677,7 @@ class Instantiations(fr.Resource): if service_uuid in GK.services: # ok, we have a service uuid, lets start the service service_instance_uuid = GK.services.get(service_uuid).start_service() - return {"service_instance_uuid": service_instance_uuid} + return {"service_instance_uuid": service_instance_uuid}, 201 return "Service not found", 404 def get(self): @@ -603,9 +689,47 @@ class Instantiations(fr.Resource): return {"service_instantiations_list": [ list(s.instances.iterkeys()) for s in GK.services.itervalues()]} + def delete(self): + """ + Stops a running service specified by its service and instance UUID. + """ + # try to extract the service and instance UUID from the request + json_data = request.get_json(force=True) + service_uuid = json_data.get("service_uuid") + instance_uuid = json_data.get("service_instance_uuid") + + # try to be fuzzy + if service_uuid is None and len(GK.services) > 0: + #if we don't get a service uuid, we simply stop the last service in the list + service_uuid = list(GK.services.iterkeys())[0] + if instance_uuid is None and len(GK.services[service_uuid].instances) > 0: + instance_uuid = list(GK.services[service_uuid].instances.iterkeys())[0] + + if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances: + # valid service and instance UUID, stop service + GK.services.get(service_uuid).stop_service(instance_uuid) + del GK.services.get(service_uuid).instances[instance_uuid] + return + return "Service not found", 404 + +class Exit(fr.Resource): + + def put(self): + """ + Stop the running Containernet instance regardless of data transmitted + """ + GK.net.stop() + + +def initialize_GK(): + global GK + GK = Gatekeeper() + + # create a single, global GK object -GK = Gatekeeper() +GK = None +initialize_GK() # setup Flask app = Flask(__name__) app.config['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload @@ -613,6 +737,12 @@ api = fr.Api(app) # define endpoints api.add_resource(Packages, '/packages') api.add_resource(Instantiations, '/instantiations') +api.add_resource(Exit, '/emulator/exit') + + +#def initialize_GK(): +# global GK +# GK = Gatekeeper() def start_rest_api(host, port, datacenters=dict()):