From: peusterm Date: Wed, 11 May 2016 13:50:11 +0000 (+0200) Subject: preparation for integration tests X-Git-Tag: v3.1~128^2~6 X-Git-Url: https://osm.etsi.org/gitweb/?a=commitdiff_plain;h=db31fdbb0e98d35c4852f6e0b514c4f56e7eba24;p=osm%2Fvim-emu.git preparation for integration tests --- diff --git a/src/emuvim/test/__main__.py b/src/emuvim/test/__main__.py deleted file mode 100755 index f7fa66d..0000000 --- a/src/emuvim/test/__main__.py +++ /dev/null @@ -1,7 +0,0 @@ -import runner -import os - - -if __name__ == '__main__': - thisdir = os.path.dirname( os.path.realpath( __file__ ) ) - runner.main(thisdir) diff --git a/src/emuvim/test/integrationtests/__init__.py b/src/emuvim/test/integrationtests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/emuvim/test/integrationtests/test_sonata_dummy_gatekeeper.py b/src/emuvim/test/integrationtests/test_sonata_dummy_gatekeeper.py new file mode 100644 index 0000000..33a1d57 --- /dev/null +++ b/src/emuvim/test/integrationtests/test_sonata_dummy_gatekeeper.py @@ -0,0 +1,74 @@ +import time +import requests +import subprocess +import os +import unittest +from emuvim.test.base import SimpleTestTopology +from emuvim.api.sonata import SonataDummyGatekeeperEndpoint + + + +class testSonataDummyGatekeeper(SimpleTestTopology): + + @unittest.skip("disabled test since ubuntu:trusty not used in current example package") + def testAPI(self): + # create network + self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.dc[0], self.dc[1]) + self.net.addLink(self.h[1], self.dc[1]) + # connect dummy GK to data centers + sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000) + sdkg1.connectDatacenter(self.dc[0]) + sdkg1.connectDatacenter(self.dc[1]) + # run the dummy gatekeeper (in another thread, don't block) + sdkg1.start() + # start Mininet network + self.startNet() + time.sleep(1) + + # download example from GitHub + print "downloading latest son-demo.son from GitHub" + subprocess.call( + ["wget", + "http://github.com/sonata-nfv/son-schema/blob/master/package-descriptor/examples/sonata-demo.son?raw=true", + "-O", + "son-demo.son"] + ) + + print "starting tests" + # board package + files = {"package": open("son-demo.son", "rb")} + r = requests.post("http://127.0.0.1:5000/packages", files=files) + self.assertEqual(r.status_code, 200) + self.assertTrue(r.json().get("service_uuid") is not None) + os.remove("son-demo.son") + + # instantiate service + service_uuid = r.json().get("service_uuid") + r2 = requests.post("http://127.0.0.1:5000/instantiations", json={"service_uuid": service_uuid}) + self.assertEqual(r2.status_code, 200) + + # give the emulator some time to instantiate everything + time.sleep(2) + + # check get request APIs + r3 = requests.get("http://127.0.0.1:5000/packages") + self.assertEqual(len(r3.json().get("service_uuid_list")), 1) + r4 = requests.get("http://127.0.0.1:5000/instantiations") + self.assertEqual(len(r4.json().get("service_instance_list")), 1) + + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 3) + self.assertTrue(len(self.net.hosts) == 5) + self.assertTrue(len(self.net.switches) == 2) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 3) + # check connectivity by using ping + for vnf in self.dc[0].listCompute(): + self.assertTrue(self.net.ping([self.h[0], vnf]) <= 0.0) + # stop Mininet network + self.stopNet() + + diff --git a/src/emuvim/test/runner.py b/src/emuvim/test/runner.py deleted file mode 100755 index 469a99e..0000000 --- a/src/emuvim/test/runner.py +++ /dev/null @@ -1,52 +0,0 @@ -#!/usr/bin/env python - -""" -Run all tests - -v : verbose output - -e : emulator test only (no API tests) - -a : API tests only -""" - -from unittest import defaultTestLoader, TextTestRunner, TestSuite -import os -import sys -from mininet.util import ensureRoot -from mininet.clean import cleanup -from mininet.log import setLogLevel - - -def runTests( testDir, verbosity=1, emuonly=False, apionly=False ): - "discover and run all tests in testDir" - # ensure inport paths work - sys.path.append("%s/.." % testDir) - # ensure root and cleanup before starting tests - ensureRoot() - cleanup() - # discover all tests in testDir - testSuite = defaultTestLoader.discover( testDir ) - if emuonly: - testSuiteFiltered = [s for s in testSuite if "Emulator" in str(s)] - testSuite = TestSuite() - testSuite.addTests(testSuiteFiltered) - if apionly: - testSuiteFiltered = [s for s in testSuite if "Api" in str(s)] - testSuite = TestSuite() - testSuite.addTests(testSuiteFiltered) - - # run tests - TextTestRunner( verbosity=verbosity ).run( testSuite ) - - -def main(thisdir): - setLogLevel( 'warning' ) - # get the directory containing example tests - vlevel = 2 if '-v' in sys.argv else 1 - emuonly = ('-e' in sys.argv) - apionly = ('-a' in sys.argv) - runTests( - testDir=thisdir, verbosity=vlevel, emuonly=emuonly, apionly=apionly) - - -if __name__ == '__main__': - thisdir = os.path.dirname( os.path.realpath( __file__ ) ) - main(thisdir) diff --git a/src/emuvim/test/test_api_zerorpc.py b/src/emuvim/test/test_api_zerorpc.py deleted file mode 100755 index 2830872..0000000 --- a/src/emuvim/test/test_api_zerorpc.py +++ /dev/null @@ -1 +0,0 @@ -#TODO we'll need this at some time. But I'am lazy. A good REST API seems to be more important. diff --git a/src/emuvim/test/test_emulator.py b/src/emuvim/test/test_emulator.py deleted file mode 100755 index 243f050..0000000 --- a/src/emuvim/test/test_emulator.py +++ /dev/null @@ -1,270 +0,0 @@ -""" -Test suite to automatically test emulator functionalities. -Directly interacts with the emulator through the Mininet-like -Python API. - -Does not test API endpoints. This is done in separated test suites. -""" - -import time -import unittest -from emuvim.dcemulator.node import EmulatorCompute -from emuvim.test.base import SimpleTestTopology - - -#@unittest.skip("disabled topology tests for development") -class testEmulatorTopology( SimpleTestTopology ): - """ - Tests to check the topology API of the emulator. - """ - - def testSingleDatacenter(self): - """ - Create a single data center and add check if its switch is up - by using manually added hosts. Tests especially the - data center specific addLink method. - """ - # create network - self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - self.net.addLink(self.h[1], self.dc[0]) - # start Mininet network - self.startNet() - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 0) - self.assertTrue(len(self.net.hosts) == 2) - self.assertTrue(len(self.net.switches) == 1) - # check connectivity by using ping - self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) - # stop Mininet network - self.stopNet() - - #@unittest.skip("disabled to test if CI fails because this is the first test.") - def testMultipleDatacenterDirect(self): - """ - Create a two data centers and interconnect them. - """ - # create network - self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - self.net.addLink(self.h[1], self.dc[1]) - self.net.addLink(self.dc[0], self.dc[1]) - # start Mininet network - self.startNet() - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 0) - self.assertTrue(len(self.net.hosts) == 2) - self.assertTrue(len(self.net.switches) == 2) - # check connectivity by using ping - self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) - # stop Mininet network - self.stopNet() - - def testMultipleDatacenterWithIntermediateSwitches(self): - """ - Create a two data centers and interconnect them with additional - switches between them. - """ - # create network - self.createNet( - nswitches=3, ndatacenter=2, nhosts=2, ndockers=0, - autolinkswitches=True) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - self.net.addLink(self.h[1], self.dc[1]) - self.net.addLink(self.dc[0], self.s[0]) - self.net.addLink(self.s[2], self.dc[1]) - # start Mininet network - self.startNet() - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 0) - self.assertTrue(len(self.net.hosts) == 2) - self.assertTrue(len(self.net.switches) == 5) - # check connectivity by using ping - self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) - # stop Mininet network - self.stopNet() - - -#@unittest.skip("disabled compute tests for development") -class testEmulatorCompute( SimpleTestTopology ): - """ - Tests to check the emulator's API to add and remove - compute resources at runtime. - """ - - def testAddSingleComputeSingleDC(self): - """ - Adds a single compute instance to - a single DC and checks its connectivity with a - manually added host. - """ - # create network - self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - # start Mininet network - self.startNet() - # add compute resources - vnf1 = self.dc[0].startCompute("vnf1") - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 1) - self.assertTrue(len(self.net.hosts) == 2) - self.assertTrue(len(self.net.switches) == 1) - # check compute list result - self.assertTrue(len(self.dc[0].listCompute()) == 1) - self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute)) - self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1") - # check connectivity by using ping - self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0) - # stop Mininet network - self.stopNet() - - def testRemoveSingleComputeSingleDC(self): - """ - Test stop method for compute instances. - Check that the instance is really removed. - """ - # create network - self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - # start Mininet network - self.startNet() - # add compute resources - vnf1 = self.dc[0].startCompute("vnf1") - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 1) - self.assertTrue(len(self.net.hosts) == 2) - self.assertTrue(len(self.net.switches) == 1) - # check compute list result - self.assertTrue(len(self.dc[0].listCompute()) == 1) - # check connectivity by using ping - self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0) - # remove compute resources - self.dc[0].stopCompute("vnf1") - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 0) - self.assertTrue(len(self.net.hosts) == 1) - self.assertTrue(len(self.net.switches) == 1) - # check compute list result - self.assertTrue(len(self.dc[0].listCompute()) == 0) - # stop Mininet network - self.stopNet() - - def testGetStatusSingleComputeSingleDC(self): - """ - Check if the getStatus functionality of EmulatorCompute - objects works well. - """ - # create network - self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - # start Mininet network - self.startNet() - # add compute resources - vnf1 = self.dc[0].startCompute("vnf1") - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 1) - self.assertTrue(len(self.net.hosts) == 2) - self.assertTrue(len(self.net.switches) == 1) - # check compute list result - self.assertTrue(len(self.dc[0].listCompute()) == 1) - self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute)) - self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1") - # check connectivity by using ping - self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0) - # check get status - s = self.dc[0].containers.get("vnf1").getStatus() - self.assertTrue(s["name"] == "vnf1") - self.assertTrue(s["state"]["Running"]) - # stop Mininet network - self.stopNet() - - def testConnectivityMultiDC(self): - """ - Test if compute instances started in different data centers - are able to talk to each other. - """ - # create network - self.createNet( - nswitches=3, ndatacenter=2, nhosts=0, ndockers=0, - autolinkswitches=True) - # setup links - self.net.addLink(self.dc[0], self.s[0]) - self.net.addLink(self.dc[1], self.s[2]) - # start Mininet network - self.startNet() - # add compute resources - vnf1 = self.dc[0].startCompute("vnf1") - vnf2 = self.dc[1].startCompute("vnf2") - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 2) - self.assertTrue(len(self.net.hosts) == 2) - self.assertTrue(len(self.net.switches) == 5) - # check compute list result - self.assertTrue(len(self.dc[0].listCompute()) == 1) - self.assertTrue(len(self.dc[1].listCompute()) == 1) - # check connectivity by using ping - self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0) - # stop Mininet network - self.stopNet() - - def testInterleavedAddRemoveMultiDC(self): - """ - Test multiple, interleaved add and remove operations and ensure - that always all expected compute instances are reachable. - """ - # create network - self.createNet( - nswitches=3, ndatacenter=2, nhosts=0, ndockers=0, - autolinkswitches=True) - # setup links - self.net.addLink(self.dc[0], self.s[0]) - self.net.addLink(self.dc[1], self.s[2]) - # start Mininet network - self.startNet() - # add compute resources - vnf1 = self.dc[0].startCompute("vnf1") - vnf2 = self.dc[1].startCompute("vnf2") - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 2) - self.assertTrue(len(self.net.hosts) == 2) - self.assertTrue(len(self.net.switches) == 5) - # check compute list result - self.assertTrue(len(self.dc[0].listCompute()) == 1) - self.assertTrue(len(self.dc[1].listCompute()) == 1) - # check connectivity by using ping - self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0) - # remove compute resources - self.dc[0].stopCompute("vnf1") - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 1) - self.assertTrue(len(self.net.hosts) == 1) - self.assertTrue(len(self.net.switches) == 5) - # check compute list result - self.assertTrue(len(self.dc[0].listCompute()) == 0) - self.assertTrue(len(self.dc[1].listCompute()) == 1) - # add compute resources - vnf3 = self.dc[0].startCompute("vnf3") - vnf4 = self.dc[0].startCompute("vnf4") - # check compute list result - self.assertTrue(len(self.dc[0].listCompute()) == 2) - self.assertTrue(len(self.dc[1].listCompute()) == 1) - self.assertTrue(self.net.ping([vnf3, vnf2]) <= 0.0) - self.assertTrue(self.net.ping([vnf4, vnf2]) <= 0.0) - # remove compute resources - self.dc[0].stopCompute("vnf3") - self.dc[0].stopCompute("vnf4") - self.dc[1].stopCompute("vnf2") - # check compute list result - self.assertTrue(len(self.dc[0].listCompute()) == 0) - self.assertTrue(len(self.dc[1].listCompute()) == 0) - # stop Mininet network - self.stopNet() - -if __name__ == '__main__': - unittest.main() diff --git a/src/emuvim/test/test_resourcemodel.py b/src/emuvim/test/test_resourcemodel.py deleted file mode 100644 index a1d273c..0000000 --- a/src/emuvim/test/test_resourcemodel.py +++ /dev/null @@ -1,339 +0,0 @@ -import time -import os -from emuvim.test.base import SimpleTestTopology -from emuvim.dcemulator.resourcemodel import BaseResourceModel, ResourceFlavor, NotEnoughResourcesAvailable, ResourceModelRegistrar -from emuvim.dcemulator.resourcemodel.upb.simple import UpbSimpleCloudDcRM, UpbOverprovisioningCloudDcRM, UpbDummyRM - - - -class testResourceModel(SimpleTestTopology): - """ - Test the general resource model API and functionality. - """ - - def testBaseResourceModelApi(self): - """ - Tast bare API without real resource madel. - :return: - """ - r = BaseResourceModel() - # check if default flavors are there - self.assertTrue(len(r._flavors) == 5) - # check addFlavor functionality - f = ResourceFlavor("test", {"testmetric": 42}) - r.addFlavour(f) - self.assertTrue("test" in r._flavors) - self.assertTrue(r._flavors.get("test").get("testmetric") == 42) - - def testAddRmToDc(self): - """ - Test is allocate/free is called when a RM is added to a DC. - :return: - """ - # create network - self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - self.net.addLink(self.h[1], self.dc[0]) - # add resource model - r = BaseResourceModel() - self.dc[0].assignResourceModel(r) - # start Mininet network - self.startNet() - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 0) - self.assertTrue(len(self.net.hosts) == 2) - self.assertTrue(len(self.net.switches) == 1) - # check resource model and resource model registrar - self.assertTrue(self.dc[0]._resource_model is not None) - self.assertTrue(len(self.net.rm_registrar.resource_models) == 1) - - # check if alloc was called during startCompute - self.assertTrue(len(r._allocated_compute_instances) == 0) - self.dc[0].startCompute("tc1") - time.sleep(1) - self.assertTrue(len(r._allocated_compute_instances) == 1) - # check if free was called during stopCompute - self.dc[0].stopCompute("tc1") - self.assertTrue(len(r._allocated_compute_instances) == 0) - # check connectivity by using ping - self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) - # stop Mininet network - self.stopNet() - - -def createDummyContainerObject(name, flavor): - - class DummyContainer(object): - - def __init__(self): - self.cpu_period = -1 - self.cpu_quota = -1 - self.mem_limit = -1 - self.memswap_limit = -1 - - def updateCpuLimit(self, cpu_period, cpu_quota): - self.cpu_period = cpu_period - self.cpu_quota = cpu_quota - - def updateMemoryLimit(self, mem_limit): - self.mem_limit = mem_limit - - d = DummyContainer() - d.name = name - d.flavor_name = flavor - return d - - - - -class testUpbSimpleCloudDcRM(SimpleTestTopology): - """ - Test the UpbSimpleCloudDc resource model. - """ - - def testAllocationComputations(self): - """ - Test the allocation procedures and correct calculations. - :return: - """ - # config - E_CPU = 1.0 - MAX_CU = 100 - E_MEM = 512 - MAX_MU = 2048 - # create dummy resource model environment - reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM) - rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU) - reg.register("test_dc", rm) - - c1 = createDummyContainerObject("c1", flavor="tiny") - rm.allocate(c1) # calculate allocation - self.assertEqual(float(c1.cpu_quota) / c1.cpu_period, E_CPU / MAX_CU * 0.5) # validate compute result - self.assertEqual(float(c1.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 32) # validate memory result - - c2 = createDummyContainerObject("c2", flavor="small") - rm.allocate(c2) # calculate allocation - self.assertEqual(float(c2.cpu_quota) / c2.cpu_period, E_CPU / MAX_CU * 1) # validate compute result - self.assertEqual(float(c2.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128) # validate memory result - - c3 = createDummyContainerObject("c3", flavor="medium") - rm.allocate(c3) # calculate allocation - self.assertEqual(float(c3.cpu_quota) / c3.cpu_period, E_CPU / MAX_CU * 4) # validate compute result - self.assertEqual(float(c3.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 256) # validate memory result - - c4 = createDummyContainerObject("c4", flavor="large") - rm.allocate(c4) # calculate allocation - self.assertEqual(float(c4.cpu_quota) / c4.cpu_period, E_CPU / MAX_CU * 8) # validate compute result - self.assertEqual(float(c4.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 512) # validate memory result - - c5 = createDummyContainerObject("c5", flavor="xlarge") - rm.allocate(c5) # calculate allocation - self.assertEqual(float(c5.cpu_quota) / c5.cpu_period, E_CPU / MAX_CU * 16) # validate compute result - self.assertEqual(float(c5.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 1024) # validate memory result - - - def testAllocationCpuLimit(self): - """ - Test CPU allocation limit - :return: - """ - # config - E_CPU = 1.0 - MAX_CU = 40 - E_MEM = 512 - MAX_MU = 4096 - # create dummy resource model environment - reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM) - rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU) - reg.register("test_dc", rm) - - # test over provisioning exeption - exception = False - try: - c6 = createDummyContainerObject("c6", flavor="xlarge") - c7 = createDummyContainerObject("c7", flavor="xlarge") - c8 = createDummyContainerObject("c8", flavor="xlarge") - c9 = createDummyContainerObject("c9", flavor="xlarge") - rm.allocate(c6) # calculate allocation - rm.allocate(c7) # calculate allocation - rm.allocate(c8) # calculate allocation - rm.allocate(c9) # calculate allocation - except NotEnoughResourcesAvailable as e: - self.assertIn("Not enough compute", e.message) - exception = True - self.assertTrue(exception) - - def testAllocationMemLimit(self): - """ - Test MEM allocation limit - :return: - """ - # config - E_CPU = 1.0 - MAX_CU = 500 - E_MEM = 512 - MAX_MU = 2048 - # create dummy resource model environment - reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM) - rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU) - reg.register("test_dc", rm) - - # test over provisioning exeption - exception = False - try: - c6 = createDummyContainerObject("c6", flavor="xlarge") - c7 = createDummyContainerObject("c7", flavor="xlarge") - c8 = createDummyContainerObject("c8", flavor="xlarge") - rm.allocate(c6) # calculate allocation - rm.allocate(c7) # calculate allocation - rm.allocate(c8) # calculate allocation - except NotEnoughResourcesAvailable as e: - self.assertIn("Not enough memory", e.message) - exception = True - self.assertTrue(exception) - - def testFree(self): - """ - Test the free procedure. - :return: - """ - # config - E_CPU = 1.0 - MAX_CU = 100 - # create dummy resource model environment - reg = ResourceModelRegistrar(dc_emulation_max_cpu=1.0, dc_emulation_max_mem=512) - rm = UpbSimpleCloudDcRM(max_cu=100, max_mu=100) - reg.register("test_dc", rm) - c1 = createDummyContainerObject("c6", flavor="tiny") - rm.allocate(c1) # calculate allocation - self.assertTrue(rm.dc_alloc_cu == 0.5) - rm.free(c1) - self.assertTrue(rm.dc_alloc_cu == 0) - - def testInRealTopo(self): - """ - Start a real container and check if limitations are really passed down to Conteinernet. - :return: - """ - # ATTENTION: This test should only be executed if emu runs not inside a Docker container, - # because it manipulates cgroups. - if os.environ.get("SON_EMU_IN_DOCKER") is not None: - return - # create network - self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - self.net.addLink(self.h[1], self.dc[0]) - # add resource model - r = UpbSimpleCloudDcRM(max_cu=100, max_mu=100) - self.dc[0].assignResourceModel(r) - # start Mininet network - self.startNet() - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 0) - self.assertTrue(len(self.net.hosts) == 2) - self.assertTrue(len(self.net.switches) == 1) - # check resource model and resource model registrar - self.assertTrue(self.dc[0]._resource_model is not None) - self.assertTrue(len(self.net.rm_registrar.resource_models) == 1) - - # check if alloc was called during startCompute - self.assertTrue(len(r._allocated_compute_instances) == 0) - tc1 = self.dc[0].startCompute("tc1", flavor_name="tiny") - time.sleep(1) - self.assertTrue(len(r._allocated_compute_instances) == 1) - - # check if there is a real limitation set for containers cgroup - # deactivated for now, seems not to work in docker-in-docker setup used in CI - self.assertEqual(float(tc1.cpu_quota)/tc1.cpu_period, 0.005) - - # check if free was called during stopCompute - self.dc[0].stopCompute("tc1") - self.assertTrue(len(r._allocated_compute_instances) == 0) - # check connectivity by using ping - self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) - # stop Mininet network - self.stopNet() - - -class testUpbOverprovisioningCloudDcRM(SimpleTestTopology): - """ - Test the UpbOverprovisioningCloudDc resource model. - """ - - def testAllocationComputations(self): - """ - Test the allocation procedures and correct calculations. - :return: - """ - # config - E_CPU = 1.0 - MAX_CU = 3 - E_MEM = 512 - MAX_MU = 2048 - # create dummy resource model environment - reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM) - rm = UpbOverprovisioningCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU) - reg.register("test_dc", rm) - - c1 = createDummyContainerObject("c1", flavor="small") - rm.allocate(c1) # calculate allocation - self.assertAlmostEqual(float(c1.cpu_quota) / c1.cpu_period, E_CPU / MAX_CU * 1.0, places=5) - self.assertAlmostEqual(float(c1.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128) - self.assertAlmostEqual(rm.cpu_op_factor, 1.0) - - c2 = createDummyContainerObject("c2", flavor="small") - rm.allocate(c2) # calculate allocation - self.assertAlmostEqual(float(c2.cpu_quota) / c2.cpu_period, E_CPU / MAX_CU * 1.0, places=5) - self.assertAlmostEqual(float(c2.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128) - self.assertAlmostEqual(rm.cpu_op_factor, 1.0) - - c3 = createDummyContainerObject("c3", flavor="small") - rm.allocate(c3) # calculate allocation - self.assertAlmostEqual(float(c3.cpu_quota) / c3.cpu_period, E_CPU / MAX_CU * 1.0, places=5) - self.assertAlmostEqual(float(c3.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128) - self.assertAlmostEqual(rm.cpu_op_factor, 1.0) - - # from this container onwards, we should go to over provisioning mode: - c4 = createDummyContainerObject("c4", flavor="small") - rm.allocate(c4) # calculate allocation - self.assertAlmostEqual(float(c4.cpu_quota) / c4.cpu_period, E_CPU / MAX_CU * (float(3) / 4), places=5) - self.assertAlmostEqual(float(c4.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128, places=5) - self.assertAlmostEqual(rm.cpu_op_factor, 0.75) - - c5 = createDummyContainerObject("c5", flavor="small") - rm.allocate(c5) # calculate allocation - self.assertAlmostEqual(float(c5.cpu_quota) / c5.cpu_period, E_CPU / MAX_CU * (float(3) / 5), places=5) - self.assertAlmostEqual(float(c5.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128) - self.assertAlmostEqual(rm.cpu_op_factor, 0.6) - - -class testUpbDummyRM(SimpleTestTopology): - """ - Test the UpbDummyRM resource model. - """ - - def testAllocationComputations(self): - """ - Test the allocation procedures and correct calculations. - :return: - """ - # config - E_CPU = 1.0 - MAX_CU = 3 - E_MEM = 512 - MAX_MU = 2048 - # create dummy resource model environment - reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM) - rm = UpbDummyRM(max_cu=MAX_CU, max_mu=MAX_MU) - reg.register("test_dc", rm) - - c1 = createDummyContainerObject("c1", flavor="small") - rm.allocate(c1) # calculate allocation - self.assertEqual(len(rm._allocated_compute_instances), 1) - - c2 = createDummyContainerObject("c2", flavor="small") - rm.allocate(c2) # calculate allocation - self.assertEqual(len(rm._allocated_compute_instances), 2) - diff --git a/src/emuvim/test/test_sonata_dummy_gatekeeper.py b/src/emuvim/test/test_sonata_dummy_gatekeeper.py deleted file mode 100644 index 33a1d57..0000000 --- a/src/emuvim/test/test_sonata_dummy_gatekeeper.py +++ /dev/null @@ -1,74 +0,0 @@ -import time -import requests -import subprocess -import os -import unittest -from emuvim.test.base import SimpleTestTopology -from emuvim.api.sonata import SonataDummyGatekeeperEndpoint - - - -class testSonataDummyGatekeeper(SimpleTestTopology): - - @unittest.skip("disabled test since ubuntu:trusty not used in current example package") - def testAPI(self): - # create network - self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0) - # setup links - self.net.addLink(self.dc[0], self.h[0]) - self.net.addLink(self.dc[0], self.dc[1]) - self.net.addLink(self.h[1], self.dc[1]) - # connect dummy GK to data centers - sdkg1 = SonataDummyGatekeeperEndpoint("0.0.0.0", 5000) - sdkg1.connectDatacenter(self.dc[0]) - sdkg1.connectDatacenter(self.dc[1]) - # run the dummy gatekeeper (in another thread, don't block) - sdkg1.start() - # start Mininet network - self.startNet() - time.sleep(1) - - # download example from GitHub - print "downloading latest son-demo.son from GitHub" - subprocess.call( - ["wget", - "http://github.com/sonata-nfv/son-schema/blob/master/package-descriptor/examples/sonata-demo.son?raw=true", - "-O", - "son-demo.son"] - ) - - print "starting tests" - # board package - files = {"package": open("son-demo.son", "rb")} - r = requests.post("http://127.0.0.1:5000/packages", files=files) - self.assertEqual(r.status_code, 200) - self.assertTrue(r.json().get("service_uuid") is not None) - os.remove("son-demo.son") - - # instantiate service - service_uuid = r.json().get("service_uuid") - r2 = requests.post("http://127.0.0.1:5000/instantiations", json={"service_uuid": service_uuid}) - self.assertEqual(r2.status_code, 200) - - # give the emulator some time to instantiate everything - time.sleep(2) - - # check get request APIs - r3 = requests.get("http://127.0.0.1:5000/packages") - self.assertEqual(len(r3.json().get("service_uuid_list")), 1) - r4 = requests.get("http://127.0.0.1:5000/instantiations") - self.assertEqual(len(r4.json().get("service_instance_list")), 1) - - # check number of running nodes - self.assertTrue(len(self.getContainernetContainers()) == 3) - self.assertTrue(len(self.net.hosts) == 5) - self.assertTrue(len(self.net.switches) == 2) - # check compute list result - self.assertTrue(len(self.dc[0].listCompute()) == 3) - # check connectivity by using ping - for vnf in self.dc[0].listCompute(): - self.assertTrue(self.net.ping([self.h[0], vnf]) <= 0.0) - # stop Mininet network - self.stopNet() - - diff --git a/src/emuvim/test/unittests/__init__.py b/src/emuvim/test/unittests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/emuvim/test/unittests/test_emulator.py b/src/emuvim/test/unittests/test_emulator.py new file mode 100755 index 0000000..243f050 --- /dev/null +++ b/src/emuvim/test/unittests/test_emulator.py @@ -0,0 +1,270 @@ +""" +Test suite to automatically test emulator functionalities. +Directly interacts with the emulator through the Mininet-like +Python API. + +Does not test API endpoints. This is done in separated test suites. +""" + +import time +import unittest +from emuvim.dcemulator.node import EmulatorCompute +from emuvim.test.base import SimpleTestTopology + + +#@unittest.skip("disabled topology tests for development") +class testEmulatorTopology( SimpleTestTopology ): + """ + Tests to check the topology API of the emulator. + """ + + def testSingleDatacenter(self): + """ + Create a single data center and add check if its switch is up + by using manually added hosts. Tests especially the + data center specific addLink method. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.h[1], self.dc[0]) + # start Mininet network + self.startNet() + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 0) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 1) + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) + # stop Mininet network + self.stopNet() + + #@unittest.skip("disabled to test if CI fails because this is the first test.") + def testMultipleDatacenterDirect(self): + """ + Create a two data centers and interconnect them. + """ + # create network + self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.h[1], self.dc[1]) + self.net.addLink(self.dc[0], self.dc[1]) + # start Mininet network + self.startNet() + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 0) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 2) + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) + # stop Mininet network + self.stopNet() + + def testMultipleDatacenterWithIntermediateSwitches(self): + """ + Create a two data centers and interconnect them with additional + switches between them. + """ + # create network + self.createNet( + nswitches=3, ndatacenter=2, nhosts=2, ndockers=0, + autolinkswitches=True) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.h[1], self.dc[1]) + self.net.addLink(self.dc[0], self.s[0]) + self.net.addLink(self.s[2], self.dc[1]) + # start Mininet network + self.startNet() + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 0) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 5) + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) + # stop Mininet network + self.stopNet() + + +#@unittest.skip("disabled compute tests for development") +class testEmulatorCompute( SimpleTestTopology ): + """ + Tests to check the emulator's API to add and remove + compute resources at runtime. + """ + + def testAddSingleComputeSingleDC(self): + """ + Adds a single compute instance to + a single DC and checks its connectivity with a + manually added host. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 1) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 1) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 1) + self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute)) + self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1") + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0) + # stop Mininet network + self.stopNet() + + def testRemoveSingleComputeSingleDC(self): + """ + Test stop method for compute instances. + Check that the instance is really removed. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 1) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 1) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 1) + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0) + # remove compute resources + self.dc[0].stopCompute("vnf1") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 0) + self.assertTrue(len(self.net.hosts) == 1) + self.assertTrue(len(self.net.switches) == 1) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 0) + # stop Mininet network + self.stopNet() + + def testGetStatusSingleComputeSingleDC(self): + """ + Check if the getStatus functionality of EmulatorCompute + objects works well. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 1) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 1) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 1) + self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute)) + self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1") + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0) + # check get status + s = self.dc[0].containers.get("vnf1").getStatus() + self.assertTrue(s["name"] == "vnf1") + self.assertTrue(s["state"]["Running"]) + # stop Mininet network + self.stopNet() + + def testConnectivityMultiDC(self): + """ + Test if compute instances started in different data centers + are able to talk to each other. + """ + # create network + self.createNet( + nswitches=3, ndatacenter=2, nhosts=0, ndockers=0, + autolinkswitches=True) + # setup links + self.net.addLink(self.dc[0], self.s[0]) + self.net.addLink(self.dc[1], self.s[2]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + vnf2 = self.dc[1].startCompute("vnf2") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 2) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 5) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 1) + self.assertTrue(len(self.dc[1].listCompute()) == 1) + # check connectivity by using ping + self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0) + # stop Mininet network + self.stopNet() + + def testInterleavedAddRemoveMultiDC(self): + """ + Test multiple, interleaved add and remove operations and ensure + that always all expected compute instances are reachable. + """ + # create network + self.createNet( + nswitches=3, ndatacenter=2, nhosts=0, ndockers=0, + autolinkswitches=True) + # setup links + self.net.addLink(self.dc[0], self.s[0]) + self.net.addLink(self.dc[1], self.s[2]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + vnf2 = self.dc[1].startCompute("vnf2") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 2) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 5) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 1) + self.assertTrue(len(self.dc[1].listCompute()) == 1) + # check connectivity by using ping + self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0) + # remove compute resources + self.dc[0].stopCompute("vnf1") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 1) + self.assertTrue(len(self.net.hosts) == 1) + self.assertTrue(len(self.net.switches) == 5) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 0) + self.assertTrue(len(self.dc[1].listCompute()) == 1) + # add compute resources + vnf3 = self.dc[0].startCompute("vnf3") + vnf4 = self.dc[0].startCompute("vnf4") + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 2) + self.assertTrue(len(self.dc[1].listCompute()) == 1) + self.assertTrue(self.net.ping([vnf3, vnf2]) <= 0.0) + self.assertTrue(self.net.ping([vnf4, vnf2]) <= 0.0) + # remove compute resources + self.dc[0].stopCompute("vnf3") + self.dc[0].stopCompute("vnf4") + self.dc[1].stopCompute("vnf2") + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 0) + self.assertTrue(len(self.dc[1].listCompute()) == 0) + # stop Mininet network + self.stopNet() + +if __name__ == '__main__': + unittest.main() diff --git a/src/emuvim/test/unittests/test_resourcemodel.py b/src/emuvim/test/unittests/test_resourcemodel.py new file mode 100644 index 0000000..a1d273c --- /dev/null +++ b/src/emuvim/test/unittests/test_resourcemodel.py @@ -0,0 +1,339 @@ +import time +import os +from emuvim.test.base import SimpleTestTopology +from emuvim.dcemulator.resourcemodel import BaseResourceModel, ResourceFlavor, NotEnoughResourcesAvailable, ResourceModelRegistrar +from emuvim.dcemulator.resourcemodel.upb.simple import UpbSimpleCloudDcRM, UpbOverprovisioningCloudDcRM, UpbDummyRM + + + +class testResourceModel(SimpleTestTopology): + """ + Test the general resource model API and functionality. + """ + + def testBaseResourceModelApi(self): + """ + Tast bare API without real resource madel. + :return: + """ + r = BaseResourceModel() + # check if default flavors are there + self.assertTrue(len(r._flavors) == 5) + # check addFlavor functionality + f = ResourceFlavor("test", {"testmetric": 42}) + r.addFlavour(f) + self.assertTrue("test" in r._flavors) + self.assertTrue(r._flavors.get("test").get("testmetric") == 42) + + def testAddRmToDc(self): + """ + Test is allocate/free is called when a RM is added to a DC. + :return: + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.h[1], self.dc[0]) + # add resource model + r = BaseResourceModel() + self.dc[0].assignResourceModel(r) + # start Mininet network + self.startNet() + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 0) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 1) + # check resource model and resource model registrar + self.assertTrue(self.dc[0]._resource_model is not None) + self.assertTrue(len(self.net.rm_registrar.resource_models) == 1) + + # check if alloc was called during startCompute + self.assertTrue(len(r._allocated_compute_instances) == 0) + self.dc[0].startCompute("tc1") + time.sleep(1) + self.assertTrue(len(r._allocated_compute_instances) == 1) + # check if free was called during stopCompute + self.dc[0].stopCompute("tc1") + self.assertTrue(len(r._allocated_compute_instances) == 0) + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) + # stop Mininet network + self.stopNet() + + +def createDummyContainerObject(name, flavor): + + class DummyContainer(object): + + def __init__(self): + self.cpu_period = -1 + self.cpu_quota = -1 + self.mem_limit = -1 + self.memswap_limit = -1 + + def updateCpuLimit(self, cpu_period, cpu_quota): + self.cpu_period = cpu_period + self.cpu_quota = cpu_quota + + def updateMemoryLimit(self, mem_limit): + self.mem_limit = mem_limit + + d = DummyContainer() + d.name = name + d.flavor_name = flavor + return d + + + + +class testUpbSimpleCloudDcRM(SimpleTestTopology): + """ + Test the UpbSimpleCloudDc resource model. + """ + + def testAllocationComputations(self): + """ + Test the allocation procedures and correct calculations. + :return: + """ + # config + E_CPU = 1.0 + MAX_CU = 100 + E_MEM = 512 + MAX_MU = 2048 + # create dummy resource model environment + reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM) + rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU) + reg.register("test_dc", rm) + + c1 = createDummyContainerObject("c1", flavor="tiny") + rm.allocate(c1) # calculate allocation + self.assertEqual(float(c1.cpu_quota) / c1.cpu_period, E_CPU / MAX_CU * 0.5) # validate compute result + self.assertEqual(float(c1.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 32) # validate memory result + + c2 = createDummyContainerObject("c2", flavor="small") + rm.allocate(c2) # calculate allocation + self.assertEqual(float(c2.cpu_quota) / c2.cpu_period, E_CPU / MAX_CU * 1) # validate compute result + self.assertEqual(float(c2.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128) # validate memory result + + c3 = createDummyContainerObject("c3", flavor="medium") + rm.allocate(c3) # calculate allocation + self.assertEqual(float(c3.cpu_quota) / c3.cpu_period, E_CPU / MAX_CU * 4) # validate compute result + self.assertEqual(float(c3.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 256) # validate memory result + + c4 = createDummyContainerObject("c4", flavor="large") + rm.allocate(c4) # calculate allocation + self.assertEqual(float(c4.cpu_quota) / c4.cpu_period, E_CPU / MAX_CU * 8) # validate compute result + self.assertEqual(float(c4.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 512) # validate memory result + + c5 = createDummyContainerObject("c5", flavor="xlarge") + rm.allocate(c5) # calculate allocation + self.assertEqual(float(c5.cpu_quota) / c5.cpu_period, E_CPU / MAX_CU * 16) # validate compute result + self.assertEqual(float(c5.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 1024) # validate memory result + + + def testAllocationCpuLimit(self): + """ + Test CPU allocation limit + :return: + """ + # config + E_CPU = 1.0 + MAX_CU = 40 + E_MEM = 512 + MAX_MU = 4096 + # create dummy resource model environment + reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM) + rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU) + reg.register("test_dc", rm) + + # test over provisioning exeption + exception = False + try: + c6 = createDummyContainerObject("c6", flavor="xlarge") + c7 = createDummyContainerObject("c7", flavor="xlarge") + c8 = createDummyContainerObject("c8", flavor="xlarge") + c9 = createDummyContainerObject("c9", flavor="xlarge") + rm.allocate(c6) # calculate allocation + rm.allocate(c7) # calculate allocation + rm.allocate(c8) # calculate allocation + rm.allocate(c9) # calculate allocation + except NotEnoughResourcesAvailable as e: + self.assertIn("Not enough compute", e.message) + exception = True + self.assertTrue(exception) + + def testAllocationMemLimit(self): + """ + Test MEM allocation limit + :return: + """ + # config + E_CPU = 1.0 + MAX_CU = 500 + E_MEM = 512 + MAX_MU = 2048 + # create dummy resource model environment + reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM) + rm = UpbSimpleCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU) + reg.register("test_dc", rm) + + # test over provisioning exeption + exception = False + try: + c6 = createDummyContainerObject("c6", flavor="xlarge") + c7 = createDummyContainerObject("c7", flavor="xlarge") + c8 = createDummyContainerObject("c8", flavor="xlarge") + rm.allocate(c6) # calculate allocation + rm.allocate(c7) # calculate allocation + rm.allocate(c8) # calculate allocation + except NotEnoughResourcesAvailable as e: + self.assertIn("Not enough memory", e.message) + exception = True + self.assertTrue(exception) + + def testFree(self): + """ + Test the free procedure. + :return: + """ + # config + E_CPU = 1.0 + MAX_CU = 100 + # create dummy resource model environment + reg = ResourceModelRegistrar(dc_emulation_max_cpu=1.0, dc_emulation_max_mem=512) + rm = UpbSimpleCloudDcRM(max_cu=100, max_mu=100) + reg.register("test_dc", rm) + c1 = createDummyContainerObject("c6", flavor="tiny") + rm.allocate(c1) # calculate allocation + self.assertTrue(rm.dc_alloc_cu == 0.5) + rm.free(c1) + self.assertTrue(rm.dc_alloc_cu == 0) + + def testInRealTopo(self): + """ + Start a real container and check if limitations are really passed down to Conteinernet. + :return: + """ + # ATTENTION: This test should only be executed if emu runs not inside a Docker container, + # because it manipulates cgroups. + if os.environ.get("SON_EMU_IN_DOCKER") is not None: + return + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.h[1], self.dc[0]) + # add resource model + r = UpbSimpleCloudDcRM(max_cu=100, max_mu=100) + self.dc[0].assignResourceModel(r) + # start Mininet network + self.startNet() + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 0) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 1) + # check resource model and resource model registrar + self.assertTrue(self.dc[0]._resource_model is not None) + self.assertTrue(len(self.net.rm_registrar.resource_models) == 1) + + # check if alloc was called during startCompute + self.assertTrue(len(r._allocated_compute_instances) == 0) + tc1 = self.dc[0].startCompute("tc1", flavor_name="tiny") + time.sleep(1) + self.assertTrue(len(r._allocated_compute_instances) == 1) + + # check if there is a real limitation set for containers cgroup + # deactivated for now, seems not to work in docker-in-docker setup used in CI + self.assertEqual(float(tc1.cpu_quota)/tc1.cpu_period, 0.005) + + # check if free was called during stopCompute + self.dc[0].stopCompute("tc1") + self.assertTrue(len(r._allocated_compute_instances) == 0) + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) + # stop Mininet network + self.stopNet() + + +class testUpbOverprovisioningCloudDcRM(SimpleTestTopology): + """ + Test the UpbOverprovisioningCloudDc resource model. + """ + + def testAllocationComputations(self): + """ + Test the allocation procedures and correct calculations. + :return: + """ + # config + E_CPU = 1.0 + MAX_CU = 3 + E_MEM = 512 + MAX_MU = 2048 + # create dummy resource model environment + reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM) + rm = UpbOverprovisioningCloudDcRM(max_cu=MAX_CU, max_mu=MAX_MU) + reg.register("test_dc", rm) + + c1 = createDummyContainerObject("c1", flavor="small") + rm.allocate(c1) # calculate allocation + self.assertAlmostEqual(float(c1.cpu_quota) / c1.cpu_period, E_CPU / MAX_CU * 1.0, places=5) + self.assertAlmostEqual(float(c1.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128) + self.assertAlmostEqual(rm.cpu_op_factor, 1.0) + + c2 = createDummyContainerObject("c2", flavor="small") + rm.allocate(c2) # calculate allocation + self.assertAlmostEqual(float(c2.cpu_quota) / c2.cpu_period, E_CPU / MAX_CU * 1.0, places=5) + self.assertAlmostEqual(float(c2.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128) + self.assertAlmostEqual(rm.cpu_op_factor, 1.0) + + c3 = createDummyContainerObject("c3", flavor="small") + rm.allocate(c3) # calculate allocation + self.assertAlmostEqual(float(c3.cpu_quota) / c3.cpu_period, E_CPU / MAX_CU * 1.0, places=5) + self.assertAlmostEqual(float(c3.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128) + self.assertAlmostEqual(rm.cpu_op_factor, 1.0) + + # from this container onwards, we should go to over provisioning mode: + c4 = createDummyContainerObject("c4", flavor="small") + rm.allocate(c4) # calculate allocation + self.assertAlmostEqual(float(c4.cpu_quota) / c4.cpu_period, E_CPU / MAX_CU * (float(3) / 4), places=5) + self.assertAlmostEqual(float(c4.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128, places=5) + self.assertAlmostEqual(rm.cpu_op_factor, 0.75) + + c5 = createDummyContainerObject("c5", flavor="small") + rm.allocate(c5) # calculate allocation + self.assertAlmostEqual(float(c5.cpu_quota) / c5.cpu_period, E_CPU / MAX_CU * (float(3) / 5), places=5) + self.assertAlmostEqual(float(c5.mem_limit/1024/1024), float(E_MEM) / MAX_MU * 128) + self.assertAlmostEqual(rm.cpu_op_factor, 0.6) + + +class testUpbDummyRM(SimpleTestTopology): + """ + Test the UpbDummyRM resource model. + """ + + def testAllocationComputations(self): + """ + Test the allocation procedures and correct calculations. + :return: + """ + # config + E_CPU = 1.0 + MAX_CU = 3 + E_MEM = 512 + MAX_MU = 2048 + # create dummy resource model environment + reg = ResourceModelRegistrar(dc_emulation_max_cpu=E_CPU, dc_emulation_max_mem=E_MEM) + rm = UpbDummyRM(max_cu=MAX_CU, max_mu=MAX_MU) + reg.register("test_dc", rm) + + c1 = createDummyContainerObject("c1", flavor="small") + rm.allocate(c1) # calculate allocation + self.assertEqual(len(rm._allocated_compute_instances), 1) + + c2 = createDummyContainerObject("c2", flavor="small") + rm.allocate(c2) # calculate allocation + self.assertEqual(len(rm._allocated_compute_instances), 2) + diff --git a/utils/ci/build_01_unit_tests.sh b/utils/ci/build_01_unit_tests.sh index ab6ece3..5544503 100755 --- a/utils/ci/build_01_unit_tests.sh +++ b/utils/ci/build_01_unit_tests.sh @@ -10,4 +10,4 @@ cd ${BASE_DIR} rm -rf utils/ci/junit-xml/* # Launch the unit testing on emuvim -py.test -v --junit-xml=utils/ci/junit-xml/pytest_emuvim.xml src/emuvim +py.test -v --junit-xml=utils/ci/junit-xml/pytest_emuvim.xml src/emuvim/test/unittests