X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=src%2Femuvim%2Fdcemulator%2Fnode.py;h=09dca9cb6c23672da273541222d478dac9f26f03;hb=5b37641a26aef3691b6f5ad2e3d1b768841eee8f;hp=3f76df9c4779efd954e81603bd91a3468af7369d;hpb=d2bbf4505674a8e0ea4f36f9576d53eba2b7af51;p=osm%2Fvim-emu.git diff --git a/src/emuvim/dcemulator/node.py b/src/emuvim/dcemulator/node.py index 3f76df9..09dca9c 100755 --- a/src/emuvim/dcemulator/node.py +++ b/src/emuvim/dcemulator/node.py @@ -4,6 +4,7 @@ Distributed Cloud Emulator (dcemulator) """ from mininet.node import Docker from mininet.link import Link +from emuvim.dcemulator.resourcemodel import NotEnoughResourcesAvailable import logging import time import json @@ -48,7 +49,9 @@ class EmulatorCompute(Docker): status = {} status["name"] = self.name status["network"] = self.getNetworkStatus() + status["docker_network"] = self.dcinfo['NetworkSettings']['IPAddress'] status["image"] = self.dimage + status["flavor_name"] = self.flavor_name status["cpu_quota"] = self.cpu_quota status["cpu_period"] = self.cpu_period status["cpu_shares"] = self.cpu_shares @@ -131,7 +134,7 @@ class Datacenter(object): raise Exception("Container with name %s already exists." % name) # set default parameter if image is None: - image = "ubuntu" + image = "ubuntu:trusty" if network is None: network = {} # {"ip": "10.0.0.254/8"} if isinstance(network, dict): @@ -140,69 +143,36 @@ class Datacenter(object): if len(network) < 1: network.append({}) - # allocate in resource resource model and compute resource limits for new container - cpu_limit = mem_limit = disk_limit = -1 - cpu_period = cpu_quota = None - if self._resource_model is not None: - # call allocate in resource model to calculate resource limit for this container - (cpu_limit, mem_limit, disk_limit) = alloc = self._resource_model.allocate(name, flavor_name) - LOG.debug("Allocation result: %r" % str(alloc)) - # check if we have a cpu_limit given by the used resource model - if cpu_limit > 0: - # calculate cpu period and quota for CFS - # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt) - # TODO consider multi core machines etc! non trivial! - # Attention minimum cpu_quota is 1ms (micro) - cpu_period = 100000 # lets consider a fixed period of 100000 microseconds for now - cpu_quota = cpu_period * cpu_limit # calculate the fraction of cpu time for this container - LOG.debug( - "CPU limit: cpu_quota = cpu_period * cpu_limit = %f * %f = %f" % (cpu_period, cpu_limit, cpu_quota)) - # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why - if cpu_quota < 1000: - cpu_quota = 1000 - LOG.warning("Increased CPU quota for %r to avoid system error." % name) - # check if we have a mem_limit given by the used resource model - if mem_limit > 0: - LOG.debug( - "MEM limit: mem_limit = %f MB" % mem_limit) - # ATTENTION minimum mem_limit per container is 4MB - if mem_limit < 4: - mem_limit = 4 - LOG.warning("Increased MEM limit for %r because it was less than 4.0 MB." % name) # create the container d = self.net.addDocker( "%s" % (name), dimage=image, dcmd=command, datacenter=self, - flavor_name=flavor_name, - cpu_period=int(cpu_period) if cpu_limit > 0 else None, # set cpu limits if needed - cpu_quota=int(cpu_quota) if cpu_limit > 0 else None, - mem_limit="%dm" % int(mem_limit) if mem_limit > 0 else None, # set mem limits if needed - memswap_limit="%dm" % int(mem_limit) if mem_limit > 0 else None # lets set swap to mem limit for now + flavor_name=flavor_name ) + + # apply resource limits to container if a resource model is defined + if self._resource_model is not None: + try: + self._resource_model.allocate(d) + self._resource_model.write_allocation_log(d, self.resource_log_path) + except NotEnoughResourcesAvailable as ex: + LOG.warning("Allocation of container %r was blocked by resource model." % name) + LOG.info(ex.message) + # ensure that we remove the container + self.net.removeDocker(name) + return None + # connect all given networks + # if no --net option is given, network = [{}], so 1 empty dict in the list + # this results in 1 default interface with a default ip address for nw in network: # TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3) - self.net.addLink(d, self.switch, params1=nw, cls=Link) + logging.info('nw: {0}'.format(nw)) + self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id')) # do bookkeeping self.containers[name] = d - - # write resource log if a path is given - if self.resource_log_path is not None: - l = dict() - l["t"] = time.time() - l["name"] = name - l["compute"] = d.getStatus() - l["flavor_name"] = flavor_name - l["action"] = "allocate" - l["cpu_limit"] = cpu_limit - l["mem_limit"] = mem_limit - l["disk_limit"] = disk_limit - l["rm_state"] = None if self._resource_model is None else self._resource_model.get_state_dict() - # append to logfile - with open(self.resource_log_path, "a") as f: - f.write("%s\n" % json.dumps(l)) return d # we might use UUIDs for naming later on def stopCompute(self, name): @@ -213,28 +183,20 @@ class Datacenter(object): if name not in self.containers: raise Exception("Container with name %s not found." % name) LOG.debug("Stopping compute instance %r in data center %r" % (name, str(self))) + + # call resource model and free resources + if self._resource_model is not None: + self._resource_model.free(self.containers[name]) + self._resource_model.write_free_log(self.containers[name], self.resource_log_path) + + # remove links self.net.removeLink( link=None, node1=self.containers[name], node2=self.switch) + + # remove container self.net.removeDocker("%s" % (name)) del self.containers[name] - # call resource model and free resources - if self._resource_model is not None: - self._resource_model.free(name) - # write resource log if a path is given - if self.resource_log_path is not None: - l = dict() - l["t"] = time.time() - l["name"] = name - l["flavor_name"] = None - l["action"] = "free" - l["cpu_limit"] = -1 - l["mem_limit"] = -1 - l["disk_limit"] = -1 - l["rm_state"] = None if self._resource_model is None else self._resource_model.get_state_dict() - # append to logfile - with open(self.resource_log_path, "a") as f: - f.write("%s\n" % json.dumps(l)) return True def listCompute(self):