Merge pull request #48 from mpeuster/master
Added basic resource model API.
diff --git a/src/emuvim/dcemulator/net.py b/src/emuvim/dcemulator/net.py
index 00bb65c..da546ab 100755
--- a/src/emuvim/dcemulator/net.py
+++ b/src/emuvim/dcemulator/net.py
@@ -15,8 +15,8 @@
from mininet.link import TCLink
import networkx as nx
from emuvim.dcemulator.monitoring import DCNetworkMonitor
-
from emuvim.dcemulator.node import Datacenter, EmulatorCompute
+from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
class DCNetwork(Dockernet):
@@ -27,14 +27,20 @@
This class is used by topology definition scripts.
"""
- def __init__(self, **kwargs):
+ def __init__(self, dc_emulation_max_cpu=1.0, **kwargs):
+ """
+ Create an extended version of a Dockernet network
+ :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
+ :param kwargs: path through for Mininet parameters
+ :return:
+ """
self.dcs = {}
- # create a Mininet/Dockernet network
+
# call original Docker.__init__ and setup default controller
- #Dockernet.__init__(
- # self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
Dockernet.__init__(
self, controller=RemoteController, switch=OVSKernelSwitch, **kwargs)
+
+ # ass a remote controller to be able to use Ryu
self.addController('c0', controller=RemoteController)
# graph of the complete DC network
@@ -46,6 +52,8 @@
# start Ryu controller
self.startRyu()
+ # initialize resource model registrar
+ self.rm_registrar = ResourceModelRegistrar(dc_emulation_max_cpu)
def addDatacenter(self, label, metadata={}):
"""
diff --git a/src/emuvim/dcemulator/node.py b/src/emuvim/dcemulator/node.py
index 6030153..d9971c8 100755
--- a/src/emuvim/dcemulator/node.py
+++ b/src/emuvim/dcemulator/node.py
@@ -22,7 +22,8 @@
def __init__(
self, name, dimage, **kwargs):
logging.debug("Create EmulatorCompute instance: %s" % name)
- self.datacenter = None # pointer to current DC
+ self.datacenter = kwargs.get("datacenter") # pointer to current DC
+ self.flavor_name = kwargs.get("flavor_name")
# call original Docker.__init__
Docker.__init__(self, name, dimage, **kwargs)
@@ -77,8 +78,12 @@
self.label = label
# dict to store arbitrary metadata (e.g. latitude and longitude)
self.metadata = metadata
- self.switch = None # first prototype assumes one "bigswitch" per DC
- self.containers = {} # keep track of running containers
+ # first prototype assumes one "bigswitch" per DC
+ self.switch = None
+ # keep track of running containers
+ self.containers = {}
+ # pointer to assigned resource model
+ self._resource_model = None
def __repr__(self):
return self.label
@@ -103,7 +108,7 @@
def start(self):
pass
- def startCompute(self, name, image=None, command=None, network=None):
+ def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny"):
"""
Create a new container as compute resource and connect it to this
data center.
@@ -111,6 +116,7 @@
:param image: image name (string)
:param command: command (string)
:param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
+ :param flavor_name: name of the flavor for this compute container
:return:
"""
assert name is not None
@@ -128,15 +134,24 @@
if len(network) < 1:
network.append({})
+ # allocate in resource resource model and compute resource limits for new container
+ if self._resource_model is not None:
+ # TODO pass resource limits to new container (cf. Dockernet API) Issue #47
+ (cpu_limit, mem_limit, disk_limit) = alloc = self._resource_model.allocate(name, flavor_name)
+ logging.info("Allocation result: %r" % str(alloc))
# create the container
- d = self.net.addDocker("%s" % (name), dimage=image, dcmd=command)
+ d = self.net.addDocker(
+ "%s" % (name),
+ dimage=image,
+ dcmd=command,
+ datacenter=self,
+ flavor_name=flavor_name)
# connect all given networks
for nw in network:
# TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3)
self.net.addLink(d, self.switch, params1=nw, cls=Link)
# do bookkeeping
self.containers[name] = d
- d.datacenter = self
return d # we might use UUIDs for naming later on
def stopCompute(self, name):
@@ -150,6 +165,9 @@
link=None, node1=self.containers[name], node2=self.switch)
self.net.removeDocker("%s" % (name))
del self.containers[name]
+ # call resource model and free resources
+ if self._resource_model is not None:
+ self._resource_model.free(name)
return True
def listCompute(self):
@@ -170,3 +188,11 @@
"n_running_containers": len(self.containers),
"metadata": self.metadata
}
+
+ def assignResourceModel(self, rm):
+ if self._resource_model is not None:
+ raise Exception("There is already an resource model assigned to this DC.")
+ self._resource_model = rm
+ self.net.rm_registrar.register(self, rm)
+ logging.info("Assigned RM: %r to DC: %r" % (rm, self))
+
diff --git a/src/emuvim/dcemulator/resourcemodel/__init__.py b/src/emuvim/dcemulator/resourcemodel/__init__.py
new file mode 100644
index 0000000..52a35e5
--- /dev/null
+++ b/src/emuvim/dcemulator/resourcemodel/__init__.py
@@ -0,0 +1,113 @@
+"""
+Base classes needed for resource models support.
+"""
+
+import logging
+LOG = logging.getLogger("resourcemodel")
+LOG.setLevel(logging.DEBUG)
+
+
+class ResourceModelRegistrar(object):
+ """
+ Global registry to keep track of all existing resource models.
+ """
+
+ def __init__(self, dc_emulation_max_cpu):
+ self.e_cpu = dc_emulation_max_cpu
+ # pointer to all resource models assigned to DCs
+ self._resource_models = dict()
+ LOG.info("Resource model registrar created with dc_emulation_max_cpu=%r" % dc_emulation_max_cpu)
+
+ def register(self, dc, rm):
+ """
+ Register a new resource model.
+ :param dc: Data center to which it is assigned.
+ :param rm: The resource model object.
+ :return: None
+ """
+ if dc in self._resource_models:
+ raise Exception("There is already an resource model assigned to this DC.")
+ self._resource_models[dc] = rm
+ rm.registrar = self
+ LOG.info("Registrar: Added resource model: %r" % rm)
+
+ @property
+ def resource_models(self):
+ return list(self._resource_models.itervalues())
+
+
+class ResourceFlavor(object):
+ """
+ Simple class that represents resource flavors (c.f. OpenStack).
+ Can contain arbitrary metrics.
+ """
+ def __init__(self, name, metrics):
+ self.name = name
+ self._metrics = metrics
+ LOG.debug("Create flavor %r with metrics: %r" % (name, metrics))
+
+ def get(self, metric_key):
+ return self._metrics.get(metric_key)
+
+
+class BaseResourceModel(object):
+ """
+ Base class for a resource limitation model.
+ Has to be extended by a real resource model implementtion.
+ """
+
+ def __init__(self):
+ self._flavors = dict()
+ self._initDefaultFlavors()
+ self.registrar = None # pointer to registrar
+ self.allocated_compute_instances = dict()
+ LOG.info("Resource model %r initialized" % self)
+
+ def __repr__(self):
+ return self.__class__.__name__
+
+ def _initDefaultFlavors(self):
+ """
+ initialize some default flavours (naming/sizes inspired by OpenStack)
+ """
+ self.addFlavour(ResourceFlavor(
+ "tiny", {"compute": 1, "memory": 32, "disk": 1}))
+ self.addFlavour(ResourceFlavor(
+ "small", {"compute": 4, "memory": 128, "disk": 20}))
+ self.addFlavour(ResourceFlavor(
+ "medium", {"compute": 8, "memory": 256, "disk": 40}))
+ self.addFlavour(ResourceFlavor(
+ "large", {"compute": 16, "memory": 512, "disk": 80}))
+ self.addFlavour(ResourceFlavor(
+ "xlarge", {"compute": 32, "memory": 1024, "disk": 160}))
+
+ def addFlavour(self, fl):
+ """
+ Add a new flavor to the resource model.
+ :param fl: flavor object
+ :return: None
+ """
+ if fl.name in self._flavors:
+ raise Exception("Flavor with name %r already exists!" % fl.name)
+ self._flavors[fl.name] = fl
+
+ def allocate(self, name, flavor_name):
+ """
+ This method has to be overwritten by a real resource model.
+ :param name: Name of the started compute instance.
+ :param flavor_name: Name of the flavor to be allocated.
+ :return: 3-tuple: (CPU-fraction, Mem-limit, Disk-limit)
+ """
+ LOG.warning("Allocating in BaseResourceModel: %r with flavor: %r" % (name, flavor_name))
+ self.allocated_compute_instances[name] = flavor_name
+ return -1.0, -1.0, -1.0 # return invalid values to indicate that this RM is a dummy
+
+ def free(self, name):
+ """
+ This method has to be overwritten by a real resource model.
+ :param name: Name of the compute instance that is stopped.
+ :return: True/False
+ """
+ LOG.warning("Free in BaseResourceModel: %r" % name)
+ del self.allocated_compute_instances[name]
+ return True
diff --git a/src/emuvim/dcemulator/resourcemodel/upb/__init__.py b/src/emuvim/dcemulator/resourcemodel/upb/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/emuvim/dcemulator/resourcemodel/upb/__init__.py
diff --git a/src/emuvim/dcemulator/resourcemodel/upb/simple.py b/src/emuvim/dcemulator/resourcemodel/upb/simple.py
new file mode 100644
index 0000000..503e35c
--- /dev/null
+++ b/src/emuvim/dcemulator/resourcemodel/upb/simple.py
@@ -0,0 +1,22 @@
+"""
+Playground for resource models created by University of Paderborn.
+"""
+import logging
+from emuvim.dcemulator.resourcemodel import BaseResourceModel
+
+LOG = logging.getLogger("rm.upb.simple")
+LOG.setLevel(logging.DEBUG)
+
+
+class UpbSimpleCloudDcApproxRM(BaseResourceModel):
+ """
+ This will be an example resource model that limits the overall
+ resources that can be deployed per data center.
+ """
+ # TODO Implement resource model issue #12
+
+ def __init__(self, max_cu=32, max_mu=1024):
+ self._max_cu = max_cu
+ self._max_mu = max_mu
+ super(self.__class__, self).__init__()
+
diff --git a/src/emuvim/examples/resource_model_demo_topology.py b/src/emuvim/examples/resource_model_demo_topology.py
new file mode 100644
index 0000000..e65a8ce
--- /dev/null
+++ b/src/emuvim/examples/resource_model_demo_topology.py
@@ -0,0 +1,67 @@
+"""
+A simple topology to test resource model support.
+"""
+
+import logging
+import time
+from mininet.log import setLogLevel
+from emuvim.dcemulator.net import DCNetwork
+from emuvim.api.zerorpc.compute import ZeroRpcApiEndpoint
+from emuvim.api.sonata import SonataDummyGatekeeperEndpoint
+from emuvim.dcemulator.resourcemodel.upb.simple import UpbSimpleCloudDcApproxRM
+
+logging.basicConfig(level=logging.INFO)
+
+
+def create_topology1():
+ # create topology
+ # use a maximum of 50% cpu time for containers added to data centers
+ net = DCNetwork(dc_emulation_max_cpu=0.5)
+ # add some data centers and create a topology
+ dc1 = net.addDatacenter("dc1")
+ dc2 = net.addDatacenter("dc2")
+ s1 = net.addSwitch("s1")
+ net.addLink(dc1, s1, delay="10ms")
+ net.addLink(dc2, s1, delay="20ms")
+
+ # create and assign resource models for each DC
+ rm1 = UpbSimpleCloudDcApproxRM(max_cu=10, max_mu=1024)
+ rm2 = UpbSimpleCloudDcApproxRM(max_cu=20)
+ dc1.assignResourceModel(rm1)
+ dc2.assignResourceModel(rm2)
+
+ # add the command line interface endpoint to each DC
+ zapi1 = ZeroRpcApiEndpoint("0.0.0.0", 4242)
+ zapi1.connectDatacenter(dc1)
+ zapi1.connectDatacenter(dc2)
+ # run API endpoint server (in another thread, don't block)
+ zapi1.start()
+
+ # add the SONATA dummy gatekeeper to each DC
+ sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 8000)
+ sdkg1.connectDatacenter(dc1)
+ sdkg1.connectDatacenter(dc2)
+ # run the dummy gatekeeper (in another thread, don't block)
+ sdkg1.start()
+
+ # start the emulation platform
+ net.start()
+ print "Wait a moment and allocate some compute start some compute resources..."
+ time.sleep(2)
+ dc1.startCompute("vnf1")
+ dc1.startCompute("vnf2", flavor_name="tiny")
+ dc1.startCompute("vnf3", flavor_name="small")
+ dc2.startCompute("vnf4", flavor_name="medium")
+ print "... done."
+ time.sleep(2)
+ net.CLI()
+ net.stop()
+
+
+def main():
+ setLogLevel('info') # set Mininet loglevel
+ create_topology1()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/src/emuvim/test/__init__.py b/src/emuvim/test/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/emuvim/test/__init__.py
diff --git a/src/emuvim/test/base.py b/src/emuvim/test/base.py
new file mode 100644
index 0000000..f652259
--- /dev/null
+++ b/src/emuvim/test/base.py
@@ -0,0 +1,91 @@
+"""
+Helper module that implements helpers for test implementations.
+"""
+
+import unittest
+import os
+import subprocess
+import docker
+from emuvim.dcemulator.net import DCNetwork
+from mininet.clean import cleanup
+
+class SimpleTestTopology(unittest.TestCase):
+ """
+ Helper class to do basic test setups.
+ s1 -- s2 -- s3 -- ... -- sN
+ """
+
+ def __init__(self, *args, **kwargs):
+ self.net = None
+ self.s = [] # list of switches
+ self.h = [] # list of hosts
+ self.d = [] # list of docker containers
+ self.dc = [] # list of data centers
+ self.docker_cli = None
+ super(SimpleTestTopology, self).__init__(*args, **kwargs)
+
+ def createNet(
+ self,
+ nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
+ autolinkswitches=False):
+ """
+ Creates a Mininet instance and automatically adds some
+ nodes to it.
+ """
+ self.net = net = DCNetwork()
+
+ # add some switches
+ for i in range(0, nswitches):
+ self.s.append(self.net.addSwitch('s%d' % i))
+ # if specified, chain all switches
+ if autolinkswitches:
+ for i in range(0, len(self.s) - 1):
+ self.net.addLink(self.s[i], self.s[i + 1])
+ # add some data centers
+ for i in range(0, ndatacenter):
+ self.dc.append(
+ self.net.addDatacenter(
+ 'datacenter%d' % i,
+ metadata={"unittest_dc": i}))
+ # add some hosts
+ for i in range(0, nhosts):
+ self.h.append(self.net.addHost('h%d' % i))
+ # add some dockers
+ for i in range(0, ndockers):
+ self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu"))
+
+ def startNet(self):
+ self.net.start()
+
+ def stopNet(self):
+ self.net.stop()
+
+ def getDockerCli(self):
+ """
+ Helper to interact with local docker instance.
+ """
+ if self.docker_cli is None:
+ self.docker_cli = docker.Client(
+ base_url='unix://var/run/docker.sock')
+ return self.docker_cli
+
+ def getDockernetContainers(self):
+ """
+ List the containers managed by dockernet
+ """
+ return self.getDockerCli().containers(filters={"label": "com.dockernet"})
+
+ @staticmethod
+ def setUp():
+ pass
+
+ @staticmethod
+ def tearDown():
+ cleanup()
+ # make sure that all pending docker containers are killed
+ with open(os.devnull, 'w') as devnull:
+ subprocess.call(
+ "sudo docker rm -f $(sudo docker ps --filter 'label=com.dockernet' -a -q)",
+ stdout=devnull,
+ stderr=devnull,
+ shell=True)
\ No newline at end of file
diff --git a/src/emuvim/test/test_emulator.py b/src/emuvim/test/test_emulator.py
index 06e2da3..905b1c6 100755
--- a/src/emuvim/test/test_emulator.py
+++ b/src/emuvim/test/test_emulator.py
@@ -7,104 +7,12 @@
"""
import unittest
-import os
-import time
-import subprocess
-import docker
-from emuvim.dcemulator.net import DCNetwork
from emuvim.dcemulator.node import EmulatorCompute
-from mininet.node import Host, Controller, OVSSwitch, Docker
-from mininet.link import TCLink
-from mininet.topo import SingleSwitchTopo, LinearTopo
-from mininet.log import setLogLevel
-from mininet.util import quietRun
-from mininet.clean import cleanup
-
-
-class simpleTestTopology( unittest.TestCase ):
- """
- Helper class to do basic test setups.
- s1 -- s2 -- s3 -- ... -- sN
- """
-
- def __init__(self, *args, **kwargs):
- self.net = None
- self.s = [] # list of switches
- self.h = [] # list of hosts
- self.d = [] # list of docker containers
- self.dc = [] # list of data centers
- self.docker_cli = None
- super(simpleTestTopology, self).__init__(*args, **kwargs)
-
- def createNet(
- self,
- nswitches=0, ndatacenter=0, nhosts=0, ndockers=0,
- autolinkswitches=False):
- """
- Creates a Mininet instance and automatically adds some
- nodes to it.
- """
- self.net = net = DCNetwork()
-
- # add some switches
- for i in range(0, nswitches):
- self.s.append(self.net.addSwitch('s%d' % i))
- # if specified, chain all switches
- if autolinkswitches:
- for i in range(0, len(self.s) - 1):
- self.net.addLink(self.s[i], self.s[i + 1])
- # add some data centers
- for i in range(0, ndatacenter):
- self.dc.append(
- self.net.addDatacenter(
- 'datacenter%d' % i,
- metadata={"unittest_dc": i}))
- # add some hosts
- for i in range(0, nhosts):
- self.h.append(self.net.addHost('h%d' % i))
- # add some dockers
- for i in range(0, ndockers):
- self.d.append(self.net.addDocker('d%d' % i, dimage="ubuntu"))
-
- def startNet(self):
- self.net.start()
-
- def stopNet(self):
- self.net.stop()
-
- def getDockerCli(self):
- """
- Helper to interact with local docker instance.
- """
- if self.docker_cli is None:
- self.docker_cli = docker.Client(
- base_url='unix://var/run/docker.sock')
- return self.docker_cli
-
- def getDockernetContainers(self):
- """
- List the containers managed by dockernet
- """
- return self.getDockerCli().containers(filters={"label": "com.dockernet"})
-
- @staticmethod
- def setUp():
- pass
-
- @staticmethod
- def tearDown():
- cleanup()
- # make sure that all pending docker containers are killed
- with open(os.devnull, 'w') as devnull:
- subprocess.call(
- "sudo docker rm -f $(sudo docker ps --filter 'label=com.dockernet' -a -q)",
- stdout=devnull,
- stderr=devnull,
- shell=True)
+from emuvim.test.base import SimpleTestTopology
#@unittest.skip("disabled topology tests for development")
-class testEmulatorTopology( simpleTestTopology ):
+class testEmulatorTopology( SimpleTestTopology ):
"""
Tests to check the topology API of the emulator.
"""
@@ -179,7 +87,7 @@
#@unittest.skip("disabled compute tests for development")
-class testEmulatorCompute( simpleTestTopology ):
+class testEmulatorCompute( SimpleTestTopology ):
"""
Tests to check the emulator's API to add and remove
compute resources at runtime.
diff --git a/src/emuvim/test/test_resourcemodel_api.py b/src/emuvim/test/test_resourcemodel_api.py
new file mode 100644
index 0000000..35b4dd5
--- /dev/null
+++ b/src/emuvim/test/test_resourcemodel_api.py
@@ -0,0 +1,51 @@
+import time
+from emuvim.test.base import SimpleTestTopology
+from emuvim.dcemulator.resourcemodel import BaseResourceModel, ResourceFlavor
+
+
+class testResourceModel(SimpleTestTopology):
+
+ def testBaseResourceModelApi(self):
+ r = BaseResourceModel()
+ # check if default flavors are there
+ assert(len(r._flavors) == 5)
+ # check addFlavor functionality
+ f = ResourceFlavor("test", {"testmetric": 42})
+ r.addFlavour(f)
+ assert("test" in r._flavors)
+ assert(r._flavors.get("test").get("testmetric") == 42)
+ # test if allocate and free runs through
+ assert(len(r.allocate("testc", "tiny")) == 3) # expected: 3tuple
+ assert(r.free("testc"))
+
+ def testAddRmToDc(self):
+ # create network
+ self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0)
+ # setup links
+ self.net.addLink(self.dc[0], self.h[0])
+ self.net.addLink(self.h[1], self.dc[0])
+ # add resource model
+ r = BaseResourceModel()
+ self.dc[0].assignResourceModel(r)
+ # start Mininet network
+ self.startNet()
+ # check number of running nodes
+ assert(len(self.getDockernetContainers()) == 0)
+ assert(len(self.net.hosts) == 2)
+ assert(len(self.net.switches) == 1)
+ # check resource model and resource model registrar
+ assert(self.dc[0]._resource_model is not None)
+ assert(len(self.net.rm_registrar.resource_models) == 1)
+
+ # check if alloc was called during startCompute
+ assert(len(r.allocated_compute_instances) == 0)
+ self.dc[0].startCompute("tc1")
+ time.sleep(1)
+ assert(len(r.allocated_compute_instances) == 1)
+ # check if free was called during stopCompute
+ self.dc[0].stopCompute("tc1")
+ assert(len(r.allocated_compute_instances) == 0)
+ # check connectivity by using ping
+ assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+ # stop Mininet network
+ self.stopNet()