"""
from mininet.node import Docker
from mininet.link import Link
+from emuvim.dcemulator.resourcemodel import NotEnoughResourcesAvailable
import logging
+import time
+import json
LOG = logging.getLogger("dcemulator")
LOG.setLevel(logging.DEBUG)
this compute instance is connected to.
"""
# format list of tuples (name, Ip, MAC, isUp, status)
- return [(str(i), i.IP(), i.MAC(), i.isUp(), i.status())
+ return [{'intf_name':str(i), 'ip':i.IP(), 'mac':i.MAC(), 'up':i.isUp(), 'status':i.status()}
for i in self.intfList()]
def getStatus(self):
status = {}
status["name"] = self.name
status["network"] = self.getNetworkStatus()
+ status["docker_network"] = self.dcinfo['NetworkSettings']['IPAddress']
status["image"] = self.dimage
+ status["flavor_name"] = self.flavor_name
status["cpu_quota"] = self.cpu_quota
status["cpu_period"] = self.cpu_period
status["cpu_shares"] = self.cpu_shares
DC_COUNTER = 1
- def __init__(self, label, metadata={}):
+ def __init__(self, label, metadata={}, resource_log_path=None):
self.net = None # DCNetwork to which we belong
# each node (DC) has a short internal name used by Mininet
# this is caused by Mininets naming limitations for swtiches etc.
self.label = label
# dict to store arbitrary metadata (e.g. latitude and longitude)
self.metadata = metadata
+ # path to which resource information should be logged (e.g. for experiments). None = no logging
+ self.resource_log_path = resource_log_path
# first prototype assumes one "bigswitch" per DC
self.switch = None
# keep track of running containers
raise Exception("Container with name %s already exists." % name)
# set default parameter
if image is None:
- image = "ubuntu"
+ image = "ubuntu:trusty"
if network is None:
network = {} # {"ip": "10.0.0.254/8"}
if isinstance(network, dict):
if len(network) < 1:
network.append({})
- # allocate in resource resource model and compute resource limits for new container
- cpu_limit = mem_limit = disk_limit = -1
- cpu_period = cpu_quota = None
- if self._resource_model is not None:
- # call allocate in resource model to calculate resource limit for this container
- (cpu_limit, mem_limit, disk_limit) = alloc = self._resource_model.allocate(name, flavor_name)
- LOG.debug("Allocation result: %r" % str(alloc))
- # check if we have a cpu_limit given by the used resource model
- if cpu_limit > 0:
- # calculate cpu period and quota for CFS
- # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
- # TODO consider multi core machines etc! non trivial!
- # Attention minimum cpu_quota is 1ms (micro)
- cpu_period = 100000 # lets consider a fixed period of 100000 microseconds for now
- cpu_quota = cpu_period * cpu_limit # calculate the fraction of cpu time for this container
- LOG.debug(
- "CPU limit: cpu_quota = cpu_period * cpu_limit = %f * %f = %f" % (cpu_period, cpu_limit, cpu_quota))
- # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
- if cpu_quota < 1000:
- cpu_quota = 1000
- LOG.warning("Increased CPU quota for %r to avoid system error." % name)
- # check if we have a mem_limit given by the used resource model
- if mem_limit > 0:
- LOG.debug(
- "MEM limit: mem_limit = %f MB" % mem_limit)
- # ATTENTION minimum mem_limit per container is 4MB
- if mem_limit < 4:
- mem_limit = 4
- LOG.warning("Increased MEM limit for %r because it was less than 4.0 MB." % name)
# create the container
d = self.net.addDocker(
"%s" % (name),
dimage=image,
dcmd=command,
datacenter=self,
- flavor_name=flavor_name,
- cpu_period=int(cpu_period) if cpu_limit > 0 else None, # set cpu limits if needed
- cpu_quota=int(cpu_quota) if cpu_limit > 0 else None,
- mem_limit="%dm" % int(mem_limit) if mem_limit > 0 else None, # set mem limits if needed
- memswap_limit="%dm" % int(mem_limit) if mem_limit > 0 else None # lets set swap to mem limit for now
+ flavor_name=flavor_name
)
+
+ # apply resource limits to container if a resource model is defined
+ if self._resource_model is not None:
+ try:
+ self._resource_model.allocate(d)
+ self._resource_model.write_allocation_log(d, self.resource_log_path)
+ except NotEnoughResourcesAvailable as ex:
+ LOG.warning("Allocation of container %r was blocked by resource model." % name)
+ LOG.info(ex.message)
+ # ensure that we remove the container
+ self.net.removeDocker(name)
+ return None
+
# connect all given networks
+ # if no --net option is given, network = [{}], so 1 empty dict in the list
+ # this results in 1 default interface with a default ip address
for nw in network:
# TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3)
- self.net.addLink(d, self.switch, params1=nw, cls=Link)
+ self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id'))
# do bookkeeping
self.containers[name] = d
return d # we might use UUIDs for naming later on
assert name is not None
if name not in self.containers:
raise Exception("Container with name %s not found." % name)
+ LOG.debug("Stopping compute instance %r in data center %r" % (name, str(self)))
+
+ # call resource model and free resources
+ if self._resource_model is not None:
+ self._resource_model.free(self.containers[name])
+ self._resource_model.write_free_log(self.containers[name], self.resource_log_path)
+
+ # remove links
self.net.removeLink(
link=None, node1=self.containers[name], node2=self.switch)
+
+ # remove container
self.net.removeDocker("%s" % (name))
del self.containers[name]
- # call resource model and free resources
- if self._resource_model is not None:
- self._resource_model.free(name)
+
return True
def listCompute(self):