from mininet.link import TCLink
import networkx as nx
from emuvim.dcemulator.monitoring import DCNetworkMonitor
-
from emuvim.dcemulator.node import Datacenter, EmulatorCompute
+from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
class DCNetwork(Dockernet):
This class is used by topology definition scripts.
"""
- def __init__(self, **kwargs):
+ def __init__(self, dc_emulation_max_cpu=1.0, **kwargs):
+ """
+ Create an extended version of a Dockernet network
+ :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
+ :param kwargs: path through for Mininet parameters
+ :return:
+ """
self.dcs = {}
- # create a Mininet/Dockernet network
+
# call original Docker.__init__ and setup default controller
- #Dockernet.__init__(
- # self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
Dockernet.__init__(
self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
+
+ # ass a remote controller to be able to use Ryu
self.addController('c0', controller=RemoteController)
# graph of the complete DC network
# start Ryu controller
self.startRyu()
+ # initialize resource model registrar
+ self.rm_registrar = ResourceModelRegistrar(dc_emulation_max_cpu)
def addDatacenter(self, label, metadata={}):
"""
def __init__(
self, name, dimage, **kwargs):
logging.debug("Create EmulatorCompute instance: %s" % name)
- self.datacenter = None # pointer to current DC
+ self.datacenter = kwargs.get("datacenter") # pointer to current DC
+ self.flavor_name = kwargs.get("flavor_name")
# call original Docker.__init__
Docker.__init__(self, name, dimage, **kwargs)
self.label = label
# dict to store arbitrary metadata (e.g. latitude and longitude)
self.metadata = metadata
- self.switch = None # first prototype assumes one "bigswitch" per DC
- self.containers = {} # keep track of running containers
+ # first prototype assumes one "bigswitch" per DC
+ self.switch = None
+ # keep track of running containers
+ self.containers = {}
+ # pointer to assigned resource model
+ self._resource_model = None
def __repr__(self):
return self.label
def start(self):
pass
- def startCompute(self, name, image=None, command=None, network=None):
+ def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny"):
"""
Create a new container as compute resource and connect it to this
data center.
:param image: image name (string)
:param command: command (string)
:param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
+ :param flavor_name: name of the flavor for this compute container
:return:
"""
assert name is not None
if len(network) < 1:
network.append({})
+ # allocate in resource resource model and compute resource limits for new container
+ if self._resource_model is not None:
+ # TODO pass resource limits to new container (cf. Dockernet API)
+ cpu_limit, mem_limit, disk_limit = self._resource_model.allocate(name, flavor_name)
# create the container
- d = self.net.addDocker("%s" % (name), dimage=image, dcmd=command)
+ d = self.net.addDocker(
+ "%s" % (name),
+ dimage=image,
+ dcmd=command,
+ datacenter=self,
+ flavor_name=flavor_name)
# connect all given networks
for nw in network:
# TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3)
self.net.addLink(d, self.switch, params1=nw, cls=Link)
# do bookkeeping
self.containers[name] = d
- d.datacenter = self
return d # we might use UUIDs for naming later on
def stopCompute(self, name):
link=None, node1=self.containers[name], node2=self.switch)
self.net.removeDocker("%s" % (name))
del self.containers[name]
+ # call resource model and free resources
+ if self._resource_model is not None:
+ self._resource_model.free(name)
return True
def listCompute(self):
"n_running_containers": len(self.containers),
"metadata": self.metadata
}
+
+ def assignResourceModel(self, rm):
+ if self._resource_model is not None:
+ raise Exception("There is already an resource model assigned to this DC.")
+ self._resource_model = rm
+ self.net.rm_registrar.register(self, rm)
+ logging.info("Assigned RM: %r to DC: %r" % (rm, self))
+
--- /dev/null
+"""
+Base classes needed for resource models support.
+"""
+
+import logging
+LOG = logging.getLogger("resourcemodel")
+LOG.setLevel(logging.DEBUG)
+
+
+class ResourceModelRegistrar(object):
+
+ def __init__(self, dc_emulation_max_cpu):
+ self.e_cpu = dc_emulation_max_cpu
+ # pointer to all resource models assigned to DCs
+ self._resource_models = dict()
+ LOG.info("Resource model registrar created with dc_emulation_max_cpu=%r" % dc_emulation_max_cpu)
+
+ def register(self, dc, rm):
+ if dc in self._resource_models:
+ raise Exception("There is already an resource model assigned to this DC.")
+ self._resource_models[dc] = rm
+ LOG.info("Registrar: Added resource model: %r" % rm)
+
+
+class ResourceFlavor(object):
+
+ def __init__(self, name, metrics):
+ self.name = name
+ self.metrics = metrics
+ LOG.debug("Create flavor %r with metrics: %r" % (name, metrics))
+
+
+class BaseResourceModel(object):
+
+ def __init__(self):
+ self._flavors=dict()
+ self._initDefaultFlavors()
+ LOG.info("Resource model %r initialized" % self)
+
+ def __repr__(self):
+ return self.__class__.__name__
+
+ def _initDefaultFlavors(self):
+ # initialize some default flavours (inspired by OpenStack)
+ self.addFlavour(ResourceFlavor(
+ "tiny", {"compute": 1, "memory": 32, "disk": 1}))
+ self.addFlavour(ResourceFlavor(
+ "small", {"compute": 4, "memory": 128, "disk": 20}))
+ self.addFlavour(ResourceFlavor(
+ "medium", {"compute": 8, "memory": 256, "disk": 40}))
+ self.addFlavour(ResourceFlavor(
+ "large", {"compute": 16, "memory": 512, "disk": 80}))
+ self.addFlavour(ResourceFlavor(
+ "xlarge", {"compute": 32, "memory": 1024, "disk": 160}))
+
+ def addFlavour(self, fl):
+ if fl.name in self._flavors:
+ raise Exception("Flavor with name %r already exists!" % fl.name)
+ self._flavors[fl.name] = fl
+
+ def allocate(self, name, flavor_name):
+ LOG.info("RM-ALLOCATE: %r with flavor: %r" % (name, flavor_name))
+ return 0.0, 0.0, 0.0
+
+ def free(self, name):
+ LOG.info("RM-FREE: %r" % name)
+
--- /dev/null
+"""
+Playground for resource models created by University of Paderborn.
+"""
+import logging
+from emuvim.dcemulator.resourcemodel import BaseResourceModel
+
+LOG = logging.getLogger("upbrm")
+LOG.setLevel(logging.DEBUG)
+
+
+class UpbSimpleCloudDcApproxRM(BaseResourceModel):
+
+ def __init__(self):
+ super(self.__class__, self).__init__()
+
--- /dev/null
+"""
+A simple topology to test resource model support.
+"""
+
+import logging
+import time
+from mininet.log import setLogLevel
+from emuvim.dcemulator.net import DCNetwork
+from emuvim.api.zerorpc.compute import ZeroRpcApiEndpoint
+from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
+from emuvim.dcemulator.resourcemodel.upbrm import UpbSimpleCloudDcApproxRM
+
+logging.basicConfig(level=logging.INFO)
+
+
+def create_topology1():
+ # create topology
+ # use a maximum of 50% cpu time for containers added to data centers
+ net = DCNetwork(dc_emulation_max_cpu=0.5)
+ # add some data centers and create a topology
+ dc1 = net.addDatacenter("dc1")
+ dc2 = net.addDatacenter("dc2")
+ s1 = net.addSwitch("s1")
+ net.addLink(dc1, s1, delay="10ms")
+ net.addLink(dc2, s1, delay="20ms")
+
+ # create and assign resource models for each DC
+ rm1 = UpbSimpleCloudDcApproxRM()
+ rm2 = UpbSimpleCloudDcApproxRM()
+ dc1.assignResourceModel(rm1)
+ dc2.assignResourceModel(rm2)
+
+ # add the command line interface endpoint to each DC
+ zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
+ zapi1.connectDatacenter(dc1)
+ zapi1.connectDatacenter(dc2)
+ # run API endpoint server (in another thread, don't block)
+ zapi1.start()
+
+ # add the SONATA dummy gatekeeper to each DC
+ sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 8000)
+ sdkg1.connectDatacenter(dc1)
+ sdkg1.connectDatacenter(dc2)
+ # run the dummy gatekeeper (in another thread, don't block)
+ sdkg1.start()
+
+ # start the emulation platform
+ net.start()
+ print "Wait a moment and allocate some compute start some compute resources..."
+ time.sleep(2)
+ dc1.startCompute("vnf1")
+ dc1.startCompute("vnf2", flavor_name="tiny")
+ dc1.startCompute("vnf3", flavor_name="small")
+ dc2.startCompute("vnf4", flavor_name="medium")
+ print "... done."
+ time.sleep(2)
+ net.CLI()
+ net.stop()
+
+
+def main():
+ setLogLevel('info') # set Mininet loglevel
+ create_topology1()
+
+
+if __name__ == '__main__':
+ main()