Better RM test case. Imporved other tests.
diff --git a/src/emuvim/test/test_emulator.py b/src/emuvim/test/test_emulator.py
index fb8714a..2038116 100755
--- a/src/emuvim/test/test_emulator.py
+++ b/src/emuvim/test/test_emulator.py
@@ -32,11 +32,11 @@
# start Mininet network
self.startNet()
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 0)
- assert(len(self.net.hosts) == 2)
- assert(len(self.net.switches) == 1)
+ self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 1)
# check connectivity by using ping
- assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+ self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
# stop Mininet network
self.stopNet()
@@ -54,11 +54,11 @@
# start Mininet network
self.startNet()
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 0)
- assert(len(self.net.hosts) == 2)
- assert(len(self.net.switches) == 2)
+ self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 2)
# check connectivity by using ping
- assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+ self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
# stop Mininet network
self.stopNet()
@@ -79,11 +79,11 @@
# start Mininet network
self.startNet()
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 0)
- assert(len(self.net.hosts) == 2)
- assert(len(self.net.switches) == 5)
+ self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 5)
# check connectivity by using ping
- assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+ self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
# stop Mininet network
self.stopNet()
@@ -110,15 +110,15 @@
# add compute resources
vnf1 = self.dc[0].startCompute("vnf1")
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 1)
- assert(len(self.net.hosts) == 2)
- assert(len(self.net.switches) == 1)
+ self.assertTrue(len(self.getDockernetContainers()) == 1)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 1)
# check compute list result
- assert(len(self.dc[0].listCompute()) == 1)
- assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
- assert(self.dc[0].listCompute()[0].name == "vnf1")
+ self.assertTrue(len(self.dc[0].listCompute()) == 1)
+ self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+ self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
# check connectivity by using ping
- assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
+ self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
# stop Mininet network
self.stopNet()
@@ -136,21 +136,21 @@
# add compute resources
vnf1 = self.dc[0].startCompute("vnf1")
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 1)
- assert(len(self.net.hosts) == 2)
- assert(len(self.net.switches) == 1)
+ self.assertTrue(len(self.getDockernetContainers()) == 1)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 1)
# check compute list result
- assert(len(self.dc[0].listCompute()) == 1)
+ self.assertTrue(len(self.dc[0].listCompute()) == 1)
# check connectivity by using ping
- assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
+ self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
# remove compute resources
self.dc[0].stopCompute("vnf1")
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 0)
- assert(len(self.net.hosts) == 1)
- assert(len(self.net.switches) == 1)
+ self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.net.hosts) == 1)
+ self.assertTrue(len(self.net.switches) == 1)
# check compute list result
- assert(len(self.dc[0].listCompute()) == 0)
+ self.assertTrue(len(self.dc[0].listCompute()) == 0)
# stop Mininet network
self.stopNet()
@@ -168,19 +168,19 @@
# add compute resources
vnf1 = self.dc[0].startCompute("vnf1")
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 1)
- assert(len(self.net.hosts) == 2)
- assert(len(self.net.switches) == 1)
+ self.assertTrue(len(self.getDockernetContainers()) == 1)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 1)
# check compute list result
- assert(len(self.dc[0].listCompute()) == 1)
- assert(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
- assert(self.dc[0].listCompute()[0].name == "vnf1")
+ self.assertTrue(len(self.dc[0].listCompute()) == 1)
+ self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+ self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
# check connectivity by using ping
- assert(self.net.ping([self.h[0], vnf1]) <= 0.0)
+ self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
# check get status
s = self.dc[0].containers.get("vnf1").getStatus()
- assert(s["name"] == "vnf1")
- assert(s["state"]["Running"])
+ self.assertTrue(s["name"] == "vnf1")
+ self.assertTrue(s["state"]["Running"])
# stop Mininet network
self.stopNet()
@@ -202,14 +202,14 @@
vnf1 = self.dc[0].startCompute("vnf1")
vnf2 = self.dc[1].startCompute("vnf2")
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 2)
- assert(len(self.net.hosts) == 2)
- assert(len(self.net.switches) == 5)
+ self.assertTrue(len(self.getDockernetContainers()) == 2)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 5)
# check compute list result
- assert(len(self.dc[0].listCompute()) == 1)
- assert(len(self.dc[1].listCompute()) == 1)
+ self.assertTrue(len(self.dc[0].listCompute()) == 1)
+ self.assertTrue(len(self.dc[1].listCompute()) == 1)
# check connectivity by using ping
- assert(self.net.ping([vnf1, vnf2]) <= 0.0)
+ self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
# stop Mininet network
self.stopNet()
@@ -231,38 +231,38 @@
vnf1 = self.dc[0].startCompute("vnf1")
vnf2 = self.dc[1].startCompute("vnf2")
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 2)
- assert(len(self.net.hosts) == 2)
- assert(len(self.net.switches) == 5)
+ self.assertTrue(len(self.getDockernetContainers()) == 2)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 5)
# check compute list result
- assert(len(self.dc[0].listCompute()) == 1)
- assert(len(self.dc[1].listCompute()) == 1)
+ self.assertTrue(len(self.dc[0].listCompute()) == 1)
+ self.assertTrue(len(self.dc[1].listCompute()) == 1)
# check connectivity by using ping
- assert(self.net.ping([vnf1, vnf2]) <= 0.0)
+ self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
# remove compute resources
self.dc[0].stopCompute("vnf1")
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 1)
- assert(len(self.net.hosts) == 1)
- assert(len(self.net.switches) == 5)
+ self.assertTrue(len(self.getDockernetContainers()) == 1)
+ self.assertTrue(len(self.net.hosts) == 1)
+ self.assertTrue(len(self.net.switches) == 5)
# check compute list result
- assert(len(self.dc[0].listCompute()) == 0)
- assert(len(self.dc[1].listCompute()) == 1)
+ self.assertTrue(len(self.dc[0].listCompute()) == 0)
+ self.assertTrue(len(self.dc[1].listCompute()) == 1)
# add compute resources
vnf3 = self.dc[0].startCompute("vnf3")
vnf4 = self.dc[0].startCompute("vnf4")
# check compute list result
- assert(len(self.dc[0].listCompute()) == 2)
- assert(len(self.dc[1].listCompute()) == 1)
- assert(self.net.ping([vnf3, vnf2]) <= 0.0)
- assert(self.net.ping([vnf4, vnf2]) <= 0.0)
+ self.assertTrue(len(self.dc[0].listCompute()) == 2)
+ self.assertTrue(len(self.dc[1].listCompute()) == 1)
+ self.assertTrue(self.net.ping([vnf3, vnf2]) <= 0.0)
+ self.assertTrue(self.net.ping([vnf4, vnf2]) <= 0.0)
# remove compute resources
self.dc[0].stopCompute("vnf3")
self.dc[0].stopCompute("vnf4")
self.dc[1].stopCompute("vnf2")
# check compute list result
- assert(len(self.dc[0].listCompute()) == 0)
- assert(len(self.dc[1].listCompute()) == 0)
+ self.assertTrue(len(self.dc[0].listCompute()) == 0)
+ self.assertTrue(len(self.dc[1].listCompute()) == 0)
# stop Mininet network
self.stopNet()
diff --git a/src/emuvim/test/test_resourcemodel_api.py b/src/emuvim/test/test_resourcemodel_api.py
index 3412ec4..9b1a3ec 100644
--- a/src/emuvim/test/test_resourcemodel_api.py
+++ b/src/emuvim/test/test_resourcemodel_api.py
@@ -6,21 +6,32 @@
class testResourceModel(SimpleTestTopology):
+ """
+ Test the general resource model API and functionality.
+ """
def testBaseResourceModelApi(self):
+ """
+ Tast bare API without real resource madel.
+ :return:
+ """
r = BaseResourceModel()
# check if default flavors are there
- assert(len(r._flavors) == 5)
+ self.assertTrue(len(r._flavors) == 5)
# check addFlavor functionality
f = ResourceFlavor("test", {"testmetric": 42})
r.addFlavour(f)
- assert("test" in r._flavors)
- assert(r._flavors.get("test").get("testmetric") == 42)
+ self.assertTrue("test" in r._flavors)
+ self.assertTrue(r._flavors.get("test").get("testmetric") == 42)
# test if allocate and free runs through
- assert(len(r.allocate("testc", "tiny")) == 3) # expected: 3tuple
- assert(r.free("testc"))
+ self.assertTrue(len(r.allocate("testc", "tiny")) == 3) # expected: 3tuple
+ self.assertTrue(r.free("testc"))
def testAddRmToDc(self):
+ """
+ Test is allocate/free is called when a RM is added to a DC.
+ :return:
+ """
# create network
self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0)
# setup links
@@ -32,23 +43,23 @@
# start Mininet network
self.startNet()
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 0)
- assert(len(self.net.hosts) == 2)
- assert(len(self.net.switches) == 1)
+ self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 1)
# check resource model and resource model registrar
- assert(self.dc[0]._resource_model is not None)
- assert(len(self.net.rm_registrar.resource_models) == 1)
+ self.assertTrue(self.dc[0]._resource_model is not None)
+ self.assertTrue(len(self.net.rm_registrar.resource_models) == 1)
# check if alloc was called during startCompute
- assert(len(r.allocated_compute_instances) == 0)
+ self.assertTrue(len(r.allocated_compute_instances) == 0)
self.dc[0].startCompute("tc1")
time.sleep(1)
- assert(len(r.allocated_compute_instances) == 1)
+ self.assertTrue(len(r.allocated_compute_instances) == 1)
# check if free was called during stopCompute
self.dc[0].stopCompute("tc1")
- assert(len(r.allocated_compute_instances) == 0)
+ self.assertTrue(len(r.allocated_compute_instances) == 0)
# check connectivity by using ping
- assert(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+ self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
# stop Mininet network
self.stopNet()
@@ -57,7 +68,12 @@
"""
Test the UpbSimpleCloudDc resource model.
"""
+
def testAllocation(self):
+ """
+ Test the allocation procedures and correct calculations.
+ :return:
+ """
# config
E_CPU = 1.0
MAX_CU = 100
@@ -67,29 +83,29 @@
reg.register("test_dc", rm)
res = rm.allocate("c1", "tiny") # calculate allocation
- assert(res[0] == E_CPU / MAX_CU * 1) # validate compute result
- assert(res[1] < 0) # validate memory result
- assert(res[2] < 0) # validate disk result
+ self.assertTrue(res[0] == E_CPU / MAX_CU * 1) # validate compute result
+ self.assertTrue(res[1] < 0) # validate memory result
+ self.assertTrue(res[2] < 0) # validate disk result
res = rm.allocate("c2", "small") # calculate allocation
- assert(res[0] == E_CPU / MAX_CU * 4) # validate compute result
- assert(res[1] < 0) # validate memory result
- assert(res[2] < 0) # validate disk result
+ self.assertTrue(res[0] == E_CPU / MAX_CU * 4) # validate compute result
+ self.assertTrue(res[1] < 0) # validate memory result
+ self.assertTrue(res[2] < 0) # validate disk result
res = rm.allocate("c3", "medium") # calculate allocation
- assert(res[0] == E_CPU / MAX_CU * 8) # validate compute result
- assert(res[1] < 0) # validate memory result
- assert(res[2] < 0) # validate disk result
+ self.assertTrue(res[0] == E_CPU / MAX_CU * 8) # validate compute result
+ self.assertTrue(res[1] < 0) # validate memory result
+ self.assertTrue(res[2] < 0) # validate disk result
res = rm.allocate("c4", "large") # calculate allocation
- assert(res[0] == E_CPU / MAX_CU * 16) # validate compute result
- assert(res[1] < 0) # validate memory result
- assert(res[2] < 0) # validate disk result
+ self.assertTrue(res[0] == E_CPU / MAX_CU * 16) # validate compute result
+ self.assertTrue(res[1] < 0) # validate memory result
+ self.assertTrue(res[2] < 0) # validate disk result
res = rm.allocate("c5", "xlarge") # calculate allocation
- assert(res[0] == E_CPU / MAX_CU * 32) # validate compute result
- assert(res[1] < 0) # validate memory result
- assert(res[2] < 0) # validate disk result
+ self.assertTrue(res[0] == E_CPU / MAX_CU * 32) # validate compute result
+ self.assertTrue(res[1] < 0) # validate memory result
+ self.assertTrue(res[2] < 0) # validate disk result
# test over provisioning exeption
exception = False
@@ -99,11 +115,15 @@
rm.allocate("c8", "xlarge") # calculate allocation
rm.allocate("c9", "xlarge") # calculate allocation
except Exception as e:
- assert("Not enough compute" in e.message)
+ self.assertTrue("Not enough compute" in e.message)
exception = True
- assert(exception)
+ self.assertTrue(exception)
def testFree(self):
+ """
+ Test the free procedure.
+ :return:
+ """
# config
E_CPU = 1.0
MAX_CU = 100
@@ -112,9 +132,49 @@
rm = UpbSimpleCloudDcRM(max_cu=100, max_mu=100)
reg.register("test_dc", rm)
rm.allocate("c1", "tiny") # calculate allocation
- assert(rm.dc_alloc_cu == 1)
+ self.assertTrue(rm.dc_alloc_cu == 1)
rm.free("c1")
- assert(rm.dc_alloc_cu == 0)
+ self.assertTrue(rm.dc_alloc_cu == 0)
+
+ def testInRealTopo(self):
+ """
+ Start a real container and check if limitations are really passed down to Dockernet.
+ :return:
+ """
+ # create network
+ self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0)
+ # setup links
+ self.net.addLink(self.dc[0], self.h[0])
+ self.net.addLink(self.h[1], self.dc[0])
+ # add resource model
+ r = UpbSimpleCloudDcRM(max_cu=100, max_mu=100)
+ self.dc[0].assignResourceModel(r)
+ # start Mininet network
+ self.startNet()
+ # check number of running nodes
+ self.assertTrue(len(self.getDockernetContainers()) == 0)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 1)
+ # check resource model and resource model registrar
+ self.assertTrue(self.dc[0]._resource_model is not None)
+ self.assertTrue(len(self.net.rm_registrar.resource_models) == 1)
+
+ # check if alloc was called during startCompute
+ self.assertTrue(len(r.allocated_compute_instances) == 0)
+ tc1 = self.dc[0].startCompute("tc1", flavor_name="tiny")
+ time.sleep(1)
+ self.assertTrue(len(r.allocated_compute_instances) == 1)
+
+ # check if there is a real limitation set for containers cgroup
+ self.assertEqual(tc1.cpu_period/tc1.cpu_quota, 100)
+
+ # check if free was called during stopCompute
+ self.dc[0].stopCompute("tc1")
+ self.assertTrue(len(r.allocated_compute_instances) == 0)
+ # check connectivity by using ping
+ self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+ # stop Mininet network
+ self.stopNet()
diff --git a/src/emuvim/test/test_sonata_dummy_gatekeeper.py b/src/emuvim/test/test_sonata_dummy_gatekeeper.py
index b7ba371..ce5bcbf 100644
--- a/src/emuvim/test/test_sonata_dummy_gatekeeper.py
+++ b/src/emuvim/test/test_sonata_dummy_gatekeeper.py
@@ -54,14 +54,14 @@
self.assertEqual(len(r4.json().get("service_instance_list")), 1)
# check number of running nodes
- assert(len(self.getDockernetContainers()) == 3)
- assert(len(self.net.hosts) == 5)
- assert(len(self.net.switches) == 2)
+ self.assertTrue(len(self.getDockernetContainers()) == 3)
+ self.assertTrue(len(self.net.hosts) == 5)
+ self.assertTrue(len(self.net.switches) == 2)
# check compute list result
- assert(len(self.dc[0].listCompute()) == 3)
+ self.assertTrue(len(self.dc[0].listCompute()) == 3)
# check connectivity by using ping
for vnf in self.dc[0].listCompute():
- assert(self.net.ping([self.h[0], vnf]) <= 0.0)
+ self.assertTrue(self.net.ping([self.h[0], vnf]) <= 0.0)
# stop Mininet network
self.stopNet()