From 42f08becf9e88b1765793977d581f08c3ebf641f Mon Sep 17 00:00:00 2001 From: peusterm Date: Thu, 10 Mar 2016 21:56:34 +0100 Subject: [PATCH] Initial resource limitation model API and classes. --- src/emuvim/dcemulator/net.py | 18 +++-- src/emuvim/dcemulator/node.py | 37 ++++++++-- .../dcemulator/resourcemodel/__init__.py | 67 +++++++++++++++++++ src/emuvim/dcemulator/resourcemodel/upbrm.py | 15 +++++ .../examples/resource_model_demo_topology.py | 67 +++++++++++++++++++ 5 files changed, 193 insertions(+), 11 deletions(-) create mode 100644 src/emuvim/dcemulator/resourcemodel/__init__.py create mode 100644 src/emuvim/dcemulator/resourcemodel/upbrm.py create mode 100644 src/emuvim/examples/resource_model_demo_topology.py diff --git a/src/emuvim/dcemulator/net.py b/src/emuvim/dcemulator/net.py index 00bb65c..da546ab 100755 --- a/src/emuvim/dcemulator/net.py +++ b/src/emuvim/dcemulator/net.py @@ -15,8 +15,8 @@ from mininet.log import setLogLevel, info, debug from mininet.link import TCLink import networkx as nx from emuvim.dcemulator.monitoring import DCNetworkMonitor - from emuvim.dcemulator.node import Datacenter, EmulatorCompute +from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar class DCNetwork(Dockernet): @@ -27,14 +27,20 @@ class DCNetwork(Dockernet): This class is used by topology definition scripts. """ - def __init__(self, **kwargs): + def __init__(self, dc_emulation_max_cpu=1.0, **kwargs): + """ + Create an extended version of a Dockernet network + :param dc_emulation_max_cpu: max. CPU time used by containers in data centers + :param kwargs: path through for Mininet parameters + :return: + """ self.dcs = {} - # create a Mininet/Dockernet network + # call original Docker.__init__ and setup default controller - #Dockernet.__init__( - # self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs) Dockernet.__init__( self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs) + + # ass a remote controller to be able to use Ryu self.addController('c0', controller=RemoteController) # graph of the complete DC network @@ -46,6 +52,8 @@ class DCNetwork(Dockernet): # start Ryu controller self.startRyu() + # initialize resource model registrar + self.rm_registrar = ResourceModelRegistrar(dc_emulation_max_cpu) def addDatacenter(self, label, metadata={}): """ diff --git a/src/emuvim/dcemulator/node.py b/src/emuvim/dcemulator/node.py index 6030153..5b35b9b 100755 --- a/src/emuvim/dcemulator/node.py +++ b/src/emuvim/dcemulator/node.py @@ -22,7 +22,8 @@ class EmulatorCompute(Docker): def __init__( self, name, dimage, **kwargs): logging.debug("Create EmulatorCompute instance: %s" % name) - self.datacenter = None # pointer to current DC + self.datacenter = kwargs.get("datacenter") # pointer to current DC + self.flavor_name = kwargs.get("flavor_name") # call original Docker.__init__ Docker.__init__(self, name, dimage, **kwargs) @@ -77,8 +78,12 @@ class Datacenter(object): self.label = label # dict to store arbitrary metadata (e.g. latitude and longitude) self.metadata = metadata - self.switch = None # first prototype assumes one "bigswitch" per DC - self.containers = {} # keep track of running containers + # first prototype assumes one "bigswitch" per DC + self.switch = None + # keep track of running containers + self.containers = {} + # pointer to assigned resource model + self._resource_model = None def __repr__(self): return self.label @@ -103,7 +108,7 @@ class Datacenter(object): def start(self): pass - def startCompute(self, name, image=None, command=None, network=None): + def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny"): """ Create a new container as compute resource and connect it to this data center. @@ -111,6 +116,7 @@ class Datacenter(object): :param image: image name (string) :param command: command (string) :param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"}) + :param flavor_name: name of the flavor for this compute container :return: """ assert name is not None @@ -128,15 +134,23 @@ class Datacenter(object): if len(network) < 1: network.append({}) + # allocate in resource resource model and compute resource limits for new container + if self._resource_model is not None: + # TODO pass resource limits to new container (cf. Dockernet API) + cpu_limit, mem_limit, disk_limit = self._resource_model.allocate(name, flavor_name) # create the container - d = self.net.addDocker("%s" % (name), dimage=image, dcmd=command) + d = self.net.addDocker( + "%s" % (name), + dimage=image, + dcmd=command, + datacenter=self, + flavor_name=flavor_name) # connect all given networks for nw in network: # TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3) self.net.addLink(d, self.switch, params1=nw, cls=Link) # do bookkeeping self.containers[name] = d - d.datacenter = self return d # we might use UUIDs for naming later on def stopCompute(self, name): @@ -150,6 +164,9 @@ class Datacenter(object): link=None, node1=self.containers[name], node2=self.switch) self.net.removeDocker("%s" % (name)) del self.containers[name] + # call resource model and free resources + if self._resource_model is not None: + self._resource_model.free(name) return True def listCompute(self): @@ -170,3 +187,11 @@ class Datacenter(object): "n_running_containers": len(self.containers), "metadata": self.metadata } + + def assignResourceModel(self, rm): + if self._resource_model is not None: + raise Exception("There is already an resource model assigned to this DC.") + self._resource_model = rm + self.net.rm_registrar.register(self, rm) + logging.info("Assigned RM: %r to DC: %r" % (rm, self)) + diff --git a/src/emuvim/dcemulator/resourcemodel/__init__.py b/src/emuvim/dcemulator/resourcemodel/__init__.py new file mode 100644 index 0000000..6ddd459 --- /dev/null +++ b/src/emuvim/dcemulator/resourcemodel/__init__.py @@ -0,0 +1,67 @@ +""" +Base classes needed for resource models support. +""" + +import logging +LOG = logging.getLogger("resourcemodel") +LOG.setLevel(logging.DEBUG) + + +class ResourceModelRegistrar(object): + + def __init__(self, dc_emulation_max_cpu): + self.e_cpu = dc_emulation_max_cpu + # pointer to all resource models assigned to DCs + self._resource_models = dict() + LOG.info("Resource model registrar created with dc_emulation_max_cpu=%r" % dc_emulation_max_cpu) + + def register(self, dc, rm): + if dc in self._resource_models: + raise Exception("There is already an resource model assigned to this DC.") + self._resource_models[dc] = rm + LOG.info("Registrar: Added resource model: %r" % rm) + + +class ResourceFlavor(object): + + def __init__(self, name, metrics): + self.name = name + self.metrics = metrics + LOG.debug("Create flavor %r with metrics: %r" % (name, metrics)) + + +class BaseResourceModel(object): + + def __init__(self): + self._flavors=dict() + self._initDefaultFlavors() + LOG.info("Resource model %r initialized" % self) + + def __repr__(self): + return self.__class__.__name__ + + def _initDefaultFlavors(self): + # initialize some default flavours (inspired by OpenStack) + self.addFlavour(ResourceFlavor( + "tiny", {"compute": 1, "memory": 32, "disk": 1})) + self.addFlavour(ResourceFlavor( + "small", {"compute": 4, "memory": 128, "disk": 20})) + self.addFlavour(ResourceFlavor( + "medium", {"compute": 8, "memory": 256, "disk": 40})) + self.addFlavour(ResourceFlavor( + "large", {"compute": 16, "memory": 512, "disk": 80})) + self.addFlavour(ResourceFlavor( + "xlarge", {"compute": 32, "memory": 1024, "disk": 160})) + + def addFlavour(self, fl): + if fl.name in self._flavors: + raise Exception("Flavor with name %r already exists!" % fl.name) + self._flavors[fl.name] = fl + + def allocate(self, name, flavor_name): + LOG.info("RM-ALLOCATE: %r with flavor: %r" % (name, flavor_name)) + return 0.0, 0.0, 0.0 + + def free(self, name): + LOG.info("RM-FREE: %r" % name) + diff --git a/src/emuvim/dcemulator/resourcemodel/upbrm.py b/src/emuvim/dcemulator/resourcemodel/upbrm.py new file mode 100644 index 0000000..c6666ae --- /dev/null +++ b/src/emuvim/dcemulator/resourcemodel/upbrm.py @@ -0,0 +1,15 @@ +""" +Playground for resource models created by University of Paderborn. +""" +import logging +from emuvim.dcemulator.resourcemodel import BaseResourceModel + +LOG = logging.getLogger("upbrm") +LOG.setLevel(logging.DEBUG) + + +class UpbSimpleCloudDcApproxRM(BaseResourceModel): + + def __init__(self): + super(self.__class__, self).__init__() + diff --git a/src/emuvim/examples/resource_model_demo_topology.py b/src/emuvim/examples/resource_model_demo_topology.py new file mode 100644 index 0000000..329ce7d --- /dev/null +++ b/src/emuvim/examples/resource_model_demo_topology.py @@ -0,0 +1,67 @@ +""" +A simple topology to test resource model support. +""" + +import logging +import time +from mininet.log import setLogLevel +from emuvim.dcemulator.net import DCNetwork +from emuvim.api.zerorpc.compute import ZeroRpcApiEndpoint +from emuvim.api.sonata import SonataDummyGatekeeperEndpoint +from emuvim.dcemulator.resourcemodel.upbrm import UpbSimpleCloudDcApproxRM + +logging.basicConfig(level=logging.INFO) + + +def create_topology1(): + # create topology + # use a maximum of 50% cpu time for containers added to data centers + net = DCNetwork(dc_emulation_max_cpu=0.5) + # add some data centers and create a topology + dc1 = net.addDatacenter("dc1") + dc2 = net.addDatacenter("dc2") + s1 = net.addSwitch("s1") + net.addLink(dc1, s1, delay="10ms") + net.addLink(dc2, s1, delay="20ms") + + # create and assign resource models for each DC + rm1 = UpbSimpleCloudDcApproxRM() + rm2 = UpbSimpleCloudDcApproxRM() + dc1.assignResourceModel(rm1) + dc2.assignResourceModel(rm2) + + # add the command line interface endpoint to each DC + zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242) + zapi1.connectDatacenter(dc1) + zapi1.connectDatacenter(dc2) + # run API endpoint server (in another thread, don't block) + zapi1.start() + + # add the SONATA dummy gatekeeper to each DC + sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 8000) + sdkg1.connectDatacenter(dc1) + sdkg1.connectDatacenter(dc2) + # run the dummy gatekeeper (in another thread, don't block) + sdkg1.start() + + # start the emulation platform + net.start() + print "Wait a moment and allocate some compute start some compute resources..." + time.sleep(2) + dc1.startCompute("vnf1") + dc1.startCompute("vnf2", flavor_name="tiny") + dc1.startCompute("vnf3", flavor_name="small") + dc2.startCompute("vnf4", flavor_name="medium") + print "... done." + time.sleep(2) + net.CLI() + net.stop() + + +def main(): + setLogLevel('info') # set Mininet loglevel + create_topology1() + + +if __name__ == '__main__': + main() -- 2.25.1