-"""
-Distributed Cloud Emulator (dcemulator)
-(c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
-"""
+# Copyright (c) 2015 SONATA-NFV and Paderborn University
+# ALL RIGHTS RESERVED.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Neither the name of the SONATA-NFV, Paderborn University
+# nor the names of its contributors may be used to endorse or promote
+# products derived from this software without specific prior written
+# permission.
+#
+# This work has been performed in the framework of the SONATA project,
+# funded by the European Commission under Grant number 671517 through
+# the Horizon 2020 and 5G-PPP programmes. The authors would like to
+# acknowledge the contributions of their colleagues of the SONATA
+# partner consortium (www.sonata-nfv.eu).
from mininet.node import Docker
from mininet.link import Link
+from emuvim.dcemulator.resourcemodel import NotEnoughResourcesAvailable
import logging
-LOG = logging.getLogger("dcemulator")
+
+LOG = logging.getLogger("dcemulator.node")
LOG.setLevel(logging.DEBUG)
DCDPID_BASE = 1000 # start of switch dpid's used for data center switches
+EXTSAPDPID_BASE = 2000 # start of switch dpid's used for external SAP switches
class EmulatorCompute(Docker):
"""
Emulator specific compute node class.
- Inherits from Dockernet's Docker host class.
+ Inherits from Containernet's Docker host class.
Represents a single container connected to a (logical)
data center.
We can add emulator specific helper functions to it.
self, name, dimage, **kwargs):
self.datacenter = kwargs.get("datacenter") # pointer to current DC
self.flavor_name = kwargs.get("flavor_name")
- LOG.debug("Starting compute instance %r in data center %r" % (name, str(self.datacenter)))
+ LOG.debug("Starting compute instance %r in data center %r" %
+ (name, str(self.datacenter)))
# call original Docker.__init__
Docker.__init__(self, name, dimage, **kwargs)
Helper method to receive information about the virtual networks
this compute instance is connected to.
"""
- # format list of tuples (name, Ip, MAC, isUp, status)
- return [(str(i), i.IP(), i.MAC(), i.isUp(), i.status())
- for i in self.intfList()]
+ # get all links and find dc switch interface
+ networkStatusList = []
+ for i in self.intfList():
+ vnf_name = self.name
+ vnf_interface = str(i)
+ dc_port_name = self.datacenter.net.find_connected_dc_interface(
+ vnf_name, vnf_interface)
+ # format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
+ intf_dict = {'intf_name': str(i), 'ip': "{0}/{1}".format(i.IP(), i.prefixLen), 'netmask': i.prefixLen,
+ 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
+ networkStatusList.append(intf_dict)
+
+ return networkStatusList
def getStatus(self):
"""
status = {}
status["name"] = self.name
status["network"] = self.getNetworkStatus()
+ status["docker_network"] = self.dcinfo['NetworkSettings']['IPAddress']
status["image"] = self.dimage
- status["cpu_quota"] = self.cpu_quota
- status["cpu_period"] = self.cpu_period
- status["cpu_shares"] = self.cpu_shares
- status["cpuset"] = self.cpuset
- status["mem_limit"] = self.mem_limit
- status["memswap_limit"] = self.memswap_limit
+ status["flavor_name"] = self.flavor_name
+ status["cpu_quota"] = self.resources.get('cpu_quota')
+ status["cpu_period"] = self.resources.get('cpu_period')
+ status["cpu_shares"] = self.resources.get('cpu_shares')
+ status["cpuset"] = self.resources.get('cpuset_cpus')
+ status["mem_limit"] = self.resources.get('mem_limit')
+ status["memswap_limit"] = self.resources.get('memswap_limit')
status["state"] = self.dcli.inspect_container(self.dc)["State"]
status["id"] = self.dcli.inspect_container(self.dc)["Id"]
+ status["short_id"] = self.dcli.inspect_container(self.dc)["Id"][:12]
+ status["hostname"] = self.dcli.inspect_container(self.dc)[
+ "Config"]['Hostname']
status["datacenter"] = (None if self.datacenter is None
else self.datacenter.label)
+
return status
+class EmulatorExtSAP(object):
+ """
+ Emulator specific class that defines an external service access point (SAP) for the service.
+ Inherits from Containernet's OVSBridge class.
+ Represents a single OVS switch connected to a (logical)
+ data center.
+ We can add emulator specific helper functions to it.
+ """
+
+ def __init__(self, sap_name, sap_net, datacenter, **kwargs):
+
+ self.datacenter = datacenter # pointer to current DC
+ self.net = self.datacenter.net
+ self.name = sap_name
+
+ LOG.debug("Starting ext SAP instance %r in data center %r" %
+ (sap_name, str(self.datacenter)))
+
+ # create SAP as separate OVS switch with an assigned ip address
+ self.ip = str(sap_net[1]) + '/' + str(sap_net.prefixlen)
+ self.subnet = sap_net
+ # allow connection to the external internet through the host
+ params = dict(NAT=True)
+ self.switch = self.net.addExtSAP(sap_name, self.ip, dpid=hex(
+ self._get_next_extSAP_dpid())[2:], **params)
+ self.switch.start()
+
+ def _get_next_extSAP_dpid(self):
+ global EXTSAPDPID_BASE
+ EXTSAPDPID_BASE += 1
+ return EXTSAPDPID_BASE
+
+ def getNetworkStatus(self):
+ """
+ Helper method to receive information about the virtual networks
+ this compute instance is connected to.
+ """
+ # get all links and find dc switch interface
+ networkStatusList = []
+ for i in self.switch.intfList():
+ vnf_name = self.name
+ vnf_interface = str(i)
+ if vnf_interface == 'lo':
+ continue
+ dc_port_name = self.datacenter.net.find_connected_dc_interface(
+ vnf_name, vnf_interface)
+ # format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
+ intf_dict = {'intf_name': str(i), 'ip': self.ip, 'netmask': i.prefixLen, 'mac': i.MAC(
+ ), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
+ networkStatusList.append(intf_dict)
+
+ return networkStatusList
+
+ def getStatus(self):
+ return {
+ "name": self.switch.name,
+ "datacenter": self.datacenter.name,
+ "network": self.getNetworkStatus()
+ }
+
+
class Datacenter(object):
"""
Represents a logical data center to which compute resources
DC_COUNTER = 1
- def __init__(self, label, metadata={}):
+ def __init__(self, label, metadata={}, resource_log_path=None):
self.net = None # DCNetwork to which we belong
# each node (DC) has a short internal name used by Mininet
# this is caused by Mininets naming limitations for swtiches etc.
self.name = "dc%d" % Datacenter.DC_COUNTER
Datacenter.DC_COUNTER += 1
# use this for user defined names that can be longer than self.name
- self.label = label
+ self.label = label
# dict to store arbitrary metadata (e.g. latitude and longitude)
self.metadata = metadata
+ # path to which resource information should be logged (e.g. for
+ # experiments). None = no logging
+ self.resource_log_path = resource_log_path
# first prototype assumes one "bigswitch" per DC
self.switch = None
# keep track of running containers
self.containers = {}
+ # keep track of attached external access points
+ self.extSAPs = {}
# pointer to assigned resource model
self._resource_model = None
def start(self):
pass
- def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny"):
+ def startCompute(self, name, image=None, command=None, network=None,
+ flavor_name="tiny", properties=dict(), **params):
"""
Create a new container as compute resource and connect it to this
data center.
:param command: command (string)
:param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
:param flavor_name: name of the flavor for this compute container
+ :param properties: dictionary of properties (key-value) that will be passed as environment variables
:return:
"""
assert name is not None
raise Exception("Container with name %s already exists." % name)
# set default parameter
if image is None:
- image = "ubuntu"
+ image = "ubuntu:trusty"
if network is None:
network = {} # {"ip": "10.0.0.254/8"}
if isinstance(network, dict):
- network = [network] # if we have only one network, put it in a list
+ # if we have only one network, put it in a list
+ network = [network]
if isinstance(network, list):
if len(network) < 1:
network.append({})
- # allocate in resource resource model and compute resource limits for new container
- cpu_limit = mem_limit = disk_limit = -1
- cpu_period = cpu_quota = None
- if self._resource_model is not None:
- # call allocate in resource model to calculate resource limit for this container
- (cpu_limit, mem_limit, disk_limit) = alloc = self._resource_model.allocate(name, flavor_name)
- LOG.debug("Allocation result: %r" % str(alloc))
- # check if we have a cpu_limit given by the used resource model
- if cpu_limit > 0:
- # calculate cpu period and quota for CFS
- # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
- # TODO consider multi core machines etc! non trivial!
- # Attention minimum cpu_quota is 1ms (micro)
- cpu_period = 100000 # lets consider a fixed period of 100000 microseconds for now
- cpu_quota = cpu_period * cpu_limit # calculate the fraction of cpu time for this container
- LOG.debug(
- "CPU limit: cpu_quota = cpu_period * cpu_limit = %f * %f = %f" % (cpu_period, cpu_limit, cpu_quota))
- # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
- if cpu_quota < 1000:
- cpu_quota = 1000
- LOG.warning("Increased CPU quota for %r to avoid system error." % name)
- # check if we have a mem_limit given by the used resource model
- if mem_limit > 0:
- LOG.debug(
- "MEM limit: mem_limit = %f MB" % mem_limit)
- # ATTENTION minimum mem_limit per container is 4MB
- if mem_limit < 4:
- mem_limit = 4
- LOG.warning("Increased MEM limit for %r because it was less than 4.0 MB." % name)
+ # apply hard-set resource limits=0
+ cpu_percentage = params.get('cpu_percent')
+ if cpu_percentage:
+ params['cpu_period'] = self.net.cpu_period
+ params['cpu_quota'] = self.net.cpu_period * float(cpu_percentage)
+
+ env = properties
+ properties['VNF_NAME'] = name
# create the container
d = self.net.addDocker(
- "%s" % (name),
+ str(name),
dimage=image,
dcmd=command,
datacenter=self,
flavor_name=flavor_name,
- cpu_period=int(cpu_period) if cpu_limit > 0 else None, # set cpu limits if needed
- cpu_quota=int(cpu_quota) if cpu_limit > 0 else None,
- mem_limit="%dm" % int(mem_limit) if mem_limit > 0 else None, # set mem limits if needed
- memswap_limit="%dm" % int(mem_limit) if mem_limit > 0 else None # lets set swap to mem limit for now
+ environment=env,
+ **params
)
+
+ # apply resource limits to container if a resource model is defined
+ if self._resource_model is not None:
+ try:
+ self._resource_model.allocate(d)
+ self._resource_model.write_allocation_log(
+ d, self.resource_log_path)
+ except NotEnoughResourcesAvailable as ex:
+ LOG.warning(
+ "Allocation of container %r was blocked by resource model." % name)
+ LOG.info(ex.message)
+ # ensure that we remove the container
+ self.net.removeDocker(name)
+ return None
+
# connect all given networks
+ # if no --net option is given, network = [{}], so 1 empty dict in the list
+ # this results in 1 default interface with a default ip address
for nw in network:
- # TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3)
- self.net.addLink(d, self.switch, params1=nw, cls=Link)
+ # clean up network configuration (e.g. RTNETLINK does not allow ':'
+ # in intf names
+ if nw.get("id") is not None:
+ nw["id"] = self._clean_ifname(nw["id"])
+ # TODO we cannot use TCLink here (see:
+ # https://github.com/mpeuster/containernet/issues/3)
+ self.net.addLink(d, self.switch, params1=nw,
+ cls=Link, intfName1=nw.get('id'))
# do bookkeeping
self.containers[name] = d
return d # we might use UUIDs for naming later on
assert name is not None
if name not in self.containers:
raise Exception("Container with name %s not found." % name)
+ LOG.debug("Stopping compute instance %r in data center %r" %
+ (name, str(self)))
+
+ # stop the monitored metrics
+ if self.net.monitor_agent is not None:
+ self.net.monitor_agent.stop_metric(name)
+
+ # call resource model and free resources
+ if self._resource_model is not None:
+ self._resource_model.free(self.containers[name])
+ self._resource_model.write_free_log(
+ self.containers[name], self.resource_log_path)
+
+ # remove links
self.net.removeLink(
link=None, node1=self.containers[name], node2=self.switch)
+
+ # remove container
self.net.removeDocker("%s" % (name))
del self.containers[name]
- # call resource model and free resources
- if self._resource_model is not None:
- self._resource_model.free(name)
+
return True
+ def attachExternalSAP(self, sap_name, sap_net, **params):
+ extSAP = EmulatorExtSAP(sap_name, sap_net, self, **params)
+ # link SAP to the DC switch
+ self.net.addLink(extSAP.switch, self.switch, cls=Link)
+ self.extSAPs[sap_name] = extSAP
+
+ def removeExternalSAP(self, sap_name):
+ sap_switch = self.extSAPs[sap_name].switch
+ # sap_switch = self.net.getNodeByName(sap_name)
+ # remove link of SAP to the DC switch
+ self.net.removeLink(link=None, node1=sap_switch, node2=self.switch)
+ self.net.removeExtSAP(sap_name)
+ del self.extSAPs[sap_name]
+
def listCompute(self):
"""
Return a list of all running containers assigned to this
"""
return list(self.containers.itervalues())
+ def listExtSAPs(self):
+ """
+ Return a list of all external SAPs assigned to this
+ data center.
+ """
+ return list(self.extSAPs.itervalues())
+
def getStatus(self):
"""
Return a dict with status information about this DC.
"""
+ container_list = [name for name in self.containers]
+ ext_saplist = [sap_name for sap_name in self.extSAPs]
return {
"label": self.label,
"internalname": self.name,
"switch": self.switch.name,
"n_running_containers": len(self.containers),
- "metadata": self.metadata
+ "metadata": self.metadata,
+ "vnf_list": container_list,
+ "ext SAP list": ext_saplist
}
def assignResourceModel(self, rm):
:return:
"""
if self._resource_model is not None:
- raise Exception("There is already an resource model assigned to this DC.")
+ raise Exception(
+ "There is already an resource model assigned to this DC.")
self._resource_model = rm
self.net.rm_registrar.register(self, rm)
LOG.info("Assigned RM: %r to DC: %r" % (rm, self))
+ @staticmethod
+ def _clean_ifname(name):
+ """
+ Cleans up given string to be a
+ RTNETLINK compatible interface name.
+ :param name: string
+ :return: string
+ """
+ if name is None:
+ return "if0"
+ name = name.replace(":", "-")
+ name = name.replace(" ", "-")
+ name = name.replace(".", "-")
+ name = name.replace("_", "-")
+ return name