X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=src%2Femuvim%2Fdcemulator%2Fnode.py;h=5cfc9efff545bbfec151ba54084d59c2a675be71;hb=ee5179f161d10b35a8a73f495ca3692fda9b2e71;hp=7746e9e05fcd2f400ad9a80d2cd0f4132e5e47b4;hpb=71b3a2f2ec214416524e4c3687b3c1136fb5281c;p=osm%2Fvim-emu.git diff --git a/src/emuvim/dcemulator/node.py b/src/emuvim/dcemulator/node.py index 7746e9e..5cfc9ef 100755 --- a/src/emuvim/dcemulator/node.py +++ b/src/emuvim/dcemulator/node.py @@ -1,22 +1,47 @@ """ -Distributed Cloud Emulator (dcemulator) -(c) 2015 by Manuel Peuster +Copyright (c) 2015 SONATA-NFV and Paderborn University +ALL RIGHTS RESERVED. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION] +nor the names of its contributors may be used to endorse or promote +products derived from this software without specific prior written +permission. + +This work has been performed in the framework of the SONATA project, +funded by the European Commission under Grant number 671517 through +the Horizon 2020 and 5G-PPP programmes. The authors would like to +acknowledge the contributions of their colleagues of the SONATA +partner consortium (www.sonata-nfv.eu). """ from mininet.node import Docker from mininet.link import Link +from emuvim.dcemulator.resourcemodel import NotEnoughResourcesAvailable import logging +import time +import json -LOG = logging.getLogger("dcemulator") +LOG = logging.getLogger("dcemulator.node") LOG.setLevel(logging.DEBUG) DCDPID_BASE = 1000 # start of switch dpid's used for data center switches - class EmulatorCompute(Docker): """ Emulator specific compute node class. - Inherits from Dockernet's Docker host class. + Inherits from Containernet's Docker host class. Represents a single container connected to a (logical) data center. We can add emulator specific helper functions to it. @@ -35,9 +60,17 @@ class EmulatorCompute(Docker): Helper method to receive information about the virtual networks this compute instance is connected to. """ - # format list of tuples (name, Ip, MAC, isUp, status) - return [(str(i), i.IP(), i.MAC(), i.isUp(), i.status()) - for i in self.intfList()] + # get all links and find dc switch interface + networkStatusList = [] + for i in self.intfList(): + vnf_name = self.name + vnf_interface = str(i) + dc_port_name = self.datacenter.net.find_connected_dc_interface(vnf_name, vnf_interface) + # format list of tuples (name, Ip, MAC, isUp, status, dc_portname) + intf_dict = {'intf_name': str(i), 'ip': i.IP(), 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name} + networkStatusList.append(intf_dict) + + return networkStatusList def getStatus(self): """ @@ -46,17 +79,22 @@ class EmulatorCompute(Docker): status = {} status["name"] = self.name status["network"] = self.getNetworkStatus() + status["docker_network"] = self.dcinfo['NetworkSettings']['IPAddress'] status["image"] = self.dimage - status["cpu_quota"] = self.cpu_quota - status["cpu_period"] = self.cpu_period - status["cpu_shares"] = self.cpu_shares - status["cpuset"] = self.cpuset - status["mem_limit"] = self.mem_limit - status["memswap_limit"] = self.memswap_limit + status["flavor_name"] = self.flavor_name + status["cpu_quota"] = self.resources.get('cpu_quota') + status["cpu_period"] = self.resources.get('cpu_period') + status["cpu_shares"] = self.resources.get('cpu_shares') + status["cpuset"] = self.resources.get('cpuset_cpus') + status["mem_limit"] = self.resources.get('mem_limit') + status["memswap_limit"] = self.resources.get('memswap_limit') status["state"] = self.dcli.inspect_container(self.dc)["State"] status["id"] = self.dcli.inspect_container(self.dc)["Id"] + status["short_id"] = self.dcli.inspect_container(self.dc)["Id"][:12] + status["hostname"] = self.dcli.inspect_container(self.dc)["Config"]['Hostname'] status["datacenter"] = (None if self.datacenter is None else self.datacenter.label) + return status @@ -70,16 +108,18 @@ class Datacenter(object): DC_COUNTER = 1 - def __init__(self, label, metadata={}): + def __init__(self, label, metadata={}, resource_log_path=None): self.net = None # DCNetwork to which we belong # each node (DC) has a short internal name used by Mininet # this is caused by Mininets naming limitations for swtiches etc. self.name = "dc%d" % Datacenter.DC_COUNTER Datacenter.DC_COUNTER += 1 # use this for user defined names that can be longer than self.name - self.label = label + self.label = label # dict to store arbitrary metadata (e.g. latitude and longitude) self.metadata = metadata + # path to which resource information should be logged (e.g. for experiments). None = no logging + self.resource_log_path = resource_log_path # first prototype assumes one "bigswitch" per DC self.switch = None # keep track of running containers @@ -110,7 +150,7 @@ class Datacenter(object): def start(self): pass - def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny"): + def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny", **params): """ Create a new container as compute resource and connect it to this data center. @@ -127,7 +167,7 @@ class Datacenter(object): raise Exception("Container with name %s already exists." % name) # set default parameter if image is None: - image = "ubuntu" + image = "ubuntu:trusty" if network is None: network = {} # {"ip": "10.0.0.254/8"} if isinstance(network, dict): @@ -136,28 +176,12 @@ class Datacenter(object): if len(network) < 1: network.append({}) - # allocate in resource resource model and compute resource limits for new container - cpu_limit = mem_limit = disk_limit = -1 - cpu_period = cpu_quota = None - if self._resource_model is not None: - # call allocate in resource model to calculate resource limit for this container - (cpu_limit, mem_limit, disk_limit) = alloc = self._resource_model.allocate(name, flavor_name) - LOG.debug("Allocation result: %r" % str(alloc)) - # check if we have a cpu_limit given by the used resource model - if cpu_limit > 0: - # calculate cpu period and quota for CFS - # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt) - # TODO consider multi core machines etc! non trivial! - # Attention minimum cpu_quota is 1ms (micro) - cpu_period = 100000 # lets consider a fixed period of 100000 microseconds for now - cpu_quota = cpu_period * cpu_limit # calculate the fraction of cpu time for this container - LOG.debug( - "CPU limit: cpu_quota = cpu_period * cpu_limit = %f * %f = %f" % (cpu_period, cpu_limit, cpu_quota)) - # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why - if cpu_quota < 1000: - cpu_quota = 1000 - LOG.warning("Increased CPU quota for %d to avoid system error." % name) - # TODO add memory and disc limitations + # apply hard-set resource limits=0 + cpu_percentage = params.get('cpu_percent') + if cpu_percentage: + params['cpu_period'] = self.net.cpu_period + params['cpu_quota'] = self.net.cpu_period * float(cpu_percentage) + # create the container d = self.net.addDocker( "%s" % (name), @@ -165,13 +189,33 @@ class Datacenter(object): dcmd=command, datacenter=self, flavor_name=flavor_name, - cpu_period=int(cpu_period) if cpu_limit > 0 else None, # set cpu limits if needed - cpu_quota=int(cpu_quota) if cpu_limit > 0 else None, + environment = {'VNF_NAME':name}, + **params ) + + + + # apply resource limits to container if a resource model is defined + if self._resource_model is not None: + try: + self._resource_model.allocate(d) + self._resource_model.write_allocation_log(d, self.resource_log_path) + except NotEnoughResourcesAvailable as ex: + LOG.warning("Allocation of container %r was blocked by resource model." % name) + LOG.info(ex.message) + # ensure that we remove the container + self.net.removeDocker(name) + return None + # connect all given networks + # if no --net option is given, network = [{}], so 1 empty dict in the list + # this results in 1 default interface with a default ip address for nw in network: - # TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3) - self.net.addLink(d, self.switch, params1=nw, cls=Link) + # clean up network configuration (e.g. RTNETLINK does not allow ':' in intf names + if nw.get("id") is not None: + nw["id"] = self._clean_ifname(nw["id"]) + # TODO we cannot use TCLink here (see: https://github.com/mpeuster/containernet/issues/3) + self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id')) # do bookkeeping self.containers[name] = d return d # we might use UUIDs for naming later on @@ -183,15 +227,45 @@ class Datacenter(object): assert name is not None if name not in self.containers: raise Exception("Container with name %s not found." % name) + LOG.debug("Stopping compute instance %r in data center %r" % (name, str(self))) + + # stop the monitored metrics + if self.net.monitor_agent is not None: + self.net.monitor_agent.stop_metric(name) + + # call resource model and free resources + if self._resource_model is not None: + self._resource_model.free(self.containers[name]) + self._resource_model.write_free_log(self.containers[name], self.resource_log_path) + + # remove links self.net.removeLink( link=None, node1=self.containers[name], node2=self.switch) + + # remove container self.net.removeDocker("%s" % (name)) del self.containers[name] - # call resource model and free resources - if self._resource_model is not None: - self._resource_model.free(name) + return True + def attachExternalSAP(self, sap_name, sap_ip): + # create SAP as OVS internal interface + sap_intf = self.switch.attachInternalIntf(sap_name, sap_ip) + + # add this as a link to the DCnetwork graph, so it is available for routing + attr_dict2 = {'src_port_id': sap_name, 'src_port_nr': None, + 'src_port_name': sap_name, + 'dst_port_id': self.switch.ports[sap_intf], 'dst_port_nr': self.switch.ports[sap_intf], + 'dst_port_name': sap_intf.name} + self.net.DCNetwork_graph.add_edge(sap_name, self.switch.name, attr_dict=attr_dict2) + + attr_dict2 = {'dst_port_id': sap_name, 'dst_port_nr': None, + 'dst_port_name': sap_name, + 'src_port_id': self.switch.ports[sap_intf], 'src_port_nr': self.switch.ports[sap_intf], + 'src_port_name': sap_intf.name} + self.net.DCNetwork_graph.add_edge(self.switch.name, sap_name, attr_dict=attr_dict2) + + def listCompute(self): """ Return a list of all running containers assigned to this @@ -223,3 +297,19 @@ class Datacenter(object): self.net.rm_registrar.register(self, rm) LOG.info("Assigned RM: %r to DC: %r" % (rm, self)) + @staticmethod + def _clean_ifname(name): + """ + Cleans up given string to be a + RTNETLINK compatible interface name. + :param name: string + :return: string + """ + if name is None: + return "if0" + name = name.replace(":", "-") + name = name.replace(" ", "-") + name = name.replace(".", "-") + name = name.replace("_", "-") + return name +