From: stevenvanrossem Date: Wed, 11 May 2016 21:03:35 +0000 (+0200) Subject: merge master X-Git-Tag: v3.1~127^2~2 X-Git-Url: https://osm.etsi.org/gitweb/?p=osm%2Fvim-emu.git;a=commitdiff_plain;h=241e3e951c4871d956c29887ab101adce4817363;hp=-c merge master --- 241e3e951c4871d956c29887ab101adce4817363 diff --combined src/emuvim/dcemulator/net.py index 9ca75f7,7f31a46..115b9e5 --- a/src/emuvim/dcemulator/net.py +++ b/src/emuvim/dcemulator/net.py @@@ -12,7 -12,7 +12,7 @@@ import r import urllib2 from functools import partial - from mininet.net import Dockernet + from mininet.net import Containernet from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController from mininet.cli import CLI from mininet.link import TCLink @@@ -21,21 -21,20 +21,21 @@@ from emuvim.dcemulator.monitoring impor from emuvim.dcemulator.node import Datacenter, EmulatorCompute from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar - class DCNetwork(Dockernet): + class DCNetwork(Containernet): """ - Wraps the original Mininet/Dockernet class and provides + Wraps the original Mininet/Containernet class and provides methods to add data centers, switches, etc. This class is used by topology definition scripts. """ def __init__(self, controller=RemoteController, monitor=False, + enable_learning = True, # in case of RemoteController (Ryu), learning switch behavior can be turned off/on dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation dc_emulation_max_mem=512, # emulation max mem in MB **kwargs): """ - Create an extended version of a Dockernet network + Create an extended version of a Containernet network :param dc_emulation_max_cpu: max. CPU time used by containers in data centers :param kwargs: path through for Mininet parameters :return: @@@ -43,14 -42,14 +43,15 @@@ self.dcs = {} # call original Docker.__init__ and setup default controller - Dockernet.__init__( + Containernet.__init__( - self, switch=OVSKernelSwitch, **kwargs) + self, switch=OVSKernelSwitch, controller=controller, **kwargs) + + # Ryu management self.ryu_process = None if controller == RemoteController: # start Ryu controller - self.startRyu() + self.startRyu(learning_switch=enable_learning) # add the specified controller self.addController('c0', controller=controller) @@@ -122,11 -121,11 +123,11 @@@ params["params2"]["ip"] = self.getNextIp() # ensure that we allow TCLinks between data centers # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers - # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3 + # see Containernet issue: https://github.com/mpeuster/containernet/issues/3 if "cls" not in params: params["cls"] = TCLink - link = Dockernet.addLink(self, node1, node2, **params) + link = Containernet.addLink(self, node1, node2, **params) # try to give container interfaces a default id node1_port_id = node1.ports[link.intf1] @@@ -144,7 -143,7 +145,7 @@@ # add edge and assigned port number to graph in both directions between node1 and node2 # port_id: id given in descriptor (if available, otherwise same as port) - # port: portnumber assigned by Dockernet + # port: portnumber assigned by Containernet attr_dict = {} # possible weight metrics allowed by TClink class: @@@ -181,14 -180,14 +182,14 @@@ Wrapper for addDocker method to use custom container class. """ self.DCNetwork_graph.add_node(label) - return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params) + return Containernet.addDocker(self, label, cls=EmulatorCompute, **params) def removeDocker( self, label, **params ): """ Wrapper for removeDocker method to update graph. """ self.DCNetwork_graph.remove_node(label) - return Dockernet.removeDocker(self, label, **params) + return Containernet.removeDocker(self, label, **params) def addSwitch( self, name, add_to_graph=True, **params ): """ @@@ -196,7 -195,7 +197,7 @@@ """ if add_to_graph: self.DCNetwork_graph.add_node(name) - return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params) + return Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params) def getAllContainers(self): """ @@@ -211,7 -210,7 +212,7 @@@ # start for dc in self.dcs.itervalues(): dc.start() - Dockernet.start(self) + Containernet.start(self) def stop(self): @@@ -220,7 -219,7 +221,7 @@@ self.monitor_agent.stop() # stop emulator net - Dockernet.stop(self) + Containernet.stop(self) # stop Ryu controller self.stopRyu() @@@ -332,15 -331,10 +333,15 @@@ kwargs['vlan'] = vlan kwargs['path'] = path kwargs['current_hop'] = current_hop - ## set flow entry via ovs-ofctl - #self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs) - ## set flow entry via ryu rest api - self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs) + + if self.controller == RemoteController: + ## set flow entry via ryu rest api + self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs) + else: + ## set flow entry via ovs-ofctl + self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs) + + # take first link between switches by default if isinstance( next_node, OVSSwitch ): @@@ -365,8 -359,6 +366,8 @@@ flow = {} flow['dpid'] = int(node.dpid, 16) + logging.info('node name:{0}'.format(node.name)) + if cookie: flow['cookie'] = int(cookie) @@@ -377,13 -369,15 +378,13 @@@ # http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#add-a-flow-entry if cmd == 'add-flow': prefix = 'stats/flowentry/add' - action = {} - action['type'] = 'OUTPUT' - action['port'] = switch_outport_nr - flow['actions'].append(action) if vlan != None: if path.index(current_hop) == 0: # first node action = {} action['type'] = 'PUSH_VLAN' # Push a new VLAN tag if a input frame is non-VLAN-tagged action['ethertype'] = 33024 # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame + flow['actions'].append(action) + action = {} action['type'] = 'SET_FIELD' action['field'] = 'vlan_vid' action['value'] = vlan @@@ -395,11 -389,6 +396,11 @@@ flow['actions'].append(action) else: # middle nodes match += ',dl_vlan=%s' % vlan + # output action must come last + action = {} + action['type'] = 'OUTPUT' + action['port'] = switch_outport_nr + flow['actions'].append(action) #flow['match'] = self._parse_match(match) elif cmd == 'del-flows': #del(flow['actions']) @@@ -455,7 -444,7 +456,7 @@@ switch_outport_nr, cmd)) # start Ryu Openflow controller as Remote Controller for the DCNetwork - def startRyu(self): + def startRyu(self, learning_switch=True): # start Ryu controller with rest-API python_install_path = site.getsitepackages()[0] ryu_path = python_install_path + '/ryu/app/simple_switch_13.py' @@@ -466,11 -455,9 +467,11 @@@ ryu_of_port = '6653' ryu_cmd = 'ryu-manager' FNULL = open("/tmp/ryu.log", 'w') - self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL) - # no learning switch - #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL) + if learning_switch: + self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL) + else: + # no learning switch + self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL) time.sleep(1) def stopRyu(self): @@@ -479,22 -466,18 +480,22 @@@ self.ryu_process.kill() def ryu_REST(self, prefix, dpid=None, data=None): - if dpid: - url = self.ryu_REST_api + '/' + str(prefix) + '/' + str(dpid) - else: - url = self.ryu_REST_api + '/' + str(prefix) - if data: - #logging.info('POST: {0}'.format(str(data))) - req = urllib2.Request(url, str(data)) - else: - req = urllib2.Request(url) + try: + if dpid: + url = self.ryu_REST_api + '/' + str(prefix) + '/' + str(dpid) + else: + url = self.ryu_REST_api + '/' + str(prefix) + if data: + #logging.info('POST: {0}'.format(str(data))) + req = urllib2.Request(url, str(data)) + else: + req = urllib2.Request(url) - ret = urllib2.urlopen(req).read() - return ret + ret = urllib2.urlopen(req).read() + return ret + except: + logging.info('error url: {0}'.format(str(url))) + if data: logging.info('error POST: {0}'.format(str(data))) # need to respect that some match fields must be integers # http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#description-of-match-and-actions diff --combined src/emuvim/test/base.py index 9efb4ab,5a83547..2021355 --- a/src/emuvim/test/base.py +++ b/src/emuvim/test/base.py @@@ -28,7 -28,7 +28,7 @@@ class SimpleTestTopology(unittest.TestC def createNet( self, nswitches=0, ndatacenter=0, nhosts=0, ndockers=0, - autolinkswitches=False, controller=Controller): + autolinkswitches=False, controller=Controller, **kwargs): """ Creates a Mininet instance and automatically adds some nodes to it. @@@ -37,7 -37,7 +37,7 @@@ for our tests. Only use other controllers if you want to test specific controller functionality. """ - self.net = DCNetwork(controller=controller) + self.net = DCNetwork(controller=controller, **kwargs) # add some switches for i in range(0, nswitches): @@@ -74,11 -74,11 +74,11 @@@ base_url='unix://var/run/docker.sock') return self.docker_cli - def getDockernetContainers(self): + def getContainernetContainers(self): """ - List the containers managed by dockernet + List the containers managed by containernet """ - return self.getDockerCli().containers(filters={"label": "com.dockernet"}) + return self.getDockerCli().containers(filters={"label": "com.containernet"}) @staticmethod def setUp(): @@@ -90,7 -90,7 +90,7 @@@ # make sure that all pending docker containers are killed with open(os.devnull, 'w') as devnull: subprocess.call( - "sudo docker rm -f $(sudo docker ps --filter 'label=com.dockernet' -a -q)", + "sudo docker rm -f $(sudo docker ps --filter 'label=com.containernet' -a -q)", stdout=devnull, stderr=devnull, shell=True) diff --combined src/emuvim/test/unittests/test_emulator.py index 0000000,243f050..e2c3b6b mode 000000,100755..100755 --- a/src/emuvim/test/unittests/test_emulator.py +++ b/src/emuvim/test/unittests/test_emulator.py @@@ -1,0 -1,270 +1,319 @@@ + """ + Test suite to automatically test emulator functionalities. + Directly interacts with the emulator through the Mininet-like + Python API. + + Does not test API endpoints. This is done in separated test suites. + """ + + import time + import unittest + from emuvim.dcemulator.node import EmulatorCompute + from emuvim.test.base import SimpleTestTopology ++from mininet.node import RemoteController + + + #@unittest.skip("disabled topology tests for development") + class testEmulatorTopology( SimpleTestTopology ): + """ + Tests to check the topology API of the emulator. + """ + + def testSingleDatacenter(self): + """ + Create a single data center and add check if its switch is up + by using manually added hosts. Tests especially the + data center specific addLink method. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.h[1], self.dc[0]) + # start Mininet network + self.startNet() + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 0) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 1) + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) + # stop Mininet network + self.stopNet() + + #@unittest.skip("disabled to test if CI fails because this is the first test.") + def testMultipleDatacenterDirect(self): + """ + Create a two data centers and interconnect them. + """ + # create network + self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.h[1], self.dc[1]) + self.net.addLink(self.dc[0], self.dc[1]) + # start Mininet network + self.startNet() + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 0) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 2) + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) + # stop Mininet network + self.stopNet() + + def testMultipleDatacenterWithIntermediateSwitches(self): + """ + Create a two data centers and interconnect them with additional + switches between them. + """ + # create network + self.createNet( + nswitches=3, ndatacenter=2, nhosts=2, ndockers=0, + autolinkswitches=True) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + self.net.addLink(self.h[1], self.dc[1]) + self.net.addLink(self.dc[0], self.s[0]) + self.net.addLink(self.s[2], self.dc[1]) + # start Mininet network + self.startNet() + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 0) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 5) + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0) + # stop Mininet network + self.stopNet() + ++class testEmulatorNetworking( SimpleTestTopology ): ++ ++ def testSDNChaining(self): ++ """ ++ Create a two data centers and interconnect them with additional ++ switches between them. ++ Uses Ryu SDN controller. ++ Connect the Docker hosts to different datacenters and setup the links between. ++ """ ++ # create network ++ self.createNet( ++ nswitches=3, ndatacenter=2, nhosts=0, ndockers=0, ++ autolinkswitches=True, ++ controller=RemoteController, ++ enable_learning=False) ++ # setup links ++ self.net.addLink(self.dc[0], self.s[0]) ++ self.net.addLink(self.s[2], self.dc[1]) ++ # start Mininet network ++ self.startNet() ++ ++ # add compute resources ++ vnf1 = self.dc[0].startCompute("vnf1", network=[{'id':'intf1', 'ip':'10.0.10.1/24'}]) ++ vnf2 = self.dc[1].startCompute("vnf2", network=[{'id':'intf2', 'ip':'10.0.10.2/24'}]) ++ # check number of running nodes ++ self.assertTrue(len(self.getDockernetContainers()) == 2) ++ self.assertTrue(len(self.net.hosts) == 2) ++ self.assertTrue(len(self.net.switches) == 5) ++ # check status ++ # check get status ++ s1 = self.dc[0].containers.get("vnf1").getStatus() ++ self.assertTrue(s1["name"] == "vnf1") ++ self.assertTrue(s1["state"]["Running"]) ++ self.assertTrue(s1["network"][0]['intf_name'] == 'intf1') ++ self.assertTrue(s1["network"][0]['ip'] == '10.0.10.1') ++ ++ s2 = self.dc[1].containers.get("vnf2").getStatus() ++ self.assertTrue(s2["name"] == "vnf2") ++ self.assertTrue(s2["state"]["Running"]) ++ self.assertTrue(s2["network"][0]['intf_name'] == 'intf2') ++ self.assertTrue(s2["network"][0]['ip'] == '10.0.10.2') ++ ++ # setup links ++ self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2', bidirectional=True, cmd='add-flow') ++ # check connectivity by using ping ++ self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0) ++ # stop Mininet network ++ self.stopNet() + + #@unittest.skip("disabled compute tests for development") + class testEmulatorCompute( SimpleTestTopology ): + """ + Tests to check the emulator's API to add and remove + compute resources at runtime. + """ + + def testAddSingleComputeSingleDC(self): + """ + Adds a single compute instance to + a single DC and checks its connectivity with a + manually added host. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 1) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 1) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 1) + self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute)) + self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1") + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0) + # stop Mininet network + self.stopNet() + + def testRemoveSingleComputeSingleDC(self): + """ + Test stop method for compute instances. + Check that the instance is really removed. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 1) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 1) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 1) + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0) + # remove compute resources + self.dc[0].stopCompute("vnf1") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 0) + self.assertTrue(len(self.net.hosts) == 1) + self.assertTrue(len(self.net.switches) == 1) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 0) + # stop Mininet network + self.stopNet() + + def testGetStatusSingleComputeSingleDC(self): + """ + Check if the getStatus functionality of EmulatorCompute + objects works well. + """ + # create network + self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0) + # setup links + self.net.addLink(self.dc[0], self.h[0]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 1) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 1) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 1) + self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute)) + self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1") + # check connectivity by using ping + self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0) + # check get status + s = self.dc[0].containers.get("vnf1").getStatus() + self.assertTrue(s["name"] == "vnf1") + self.assertTrue(s["state"]["Running"]) + # stop Mininet network + self.stopNet() + + def testConnectivityMultiDC(self): + """ + Test if compute instances started in different data centers + are able to talk to each other. + """ + # create network + self.createNet( + nswitches=3, ndatacenter=2, nhosts=0, ndockers=0, + autolinkswitches=True) + # setup links + self.net.addLink(self.dc[0], self.s[0]) + self.net.addLink(self.dc[1], self.s[2]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + vnf2 = self.dc[1].startCompute("vnf2") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 2) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 5) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 1) + self.assertTrue(len(self.dc[1].listCompute()) == 1) + # check connectivity by using ping + self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0) + # stop Mininet network + self.stopNet() + + def testInterleavedAddRemoveMultiDC(self): + """ + Test multiple, interleaved add and remove operations and ensure + that always all expected compute instances are reachable. + """ + # create network + self.createNet( + nswitches=3, ndatacenter=2, nhosts=0, ndockers=0, + autolinkswitches=True) + # setup links + self.net.addLink(self.dc[0], self.s[0]) + self.net.addLink(self.dc[1], self.s[2]) + # start Mininet network + self.startNet() + # add compute resources + vnf1 = self.dc[0].startCompute("vnf1") + vnf2 = self.dc[1].startCompute("vnf2") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 2) + self.assertTrue(len(self.net.hosts) == 2) + self.assertTrue(len(self.net.switches) == 5) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 1) + self.assertTrue(len(self.dc[1].listCompute()) == 1) + # check connectivity by using ping + self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0) + # remove compute resources + self.dc[0].stopCompute("vnf1") + # check number of running nodes + self.assertTrue(len(self.getContainernetContainers()) == 1) + self.assertTrue(len(self.net.hosts) == 1) + self.assertTrue(len(self.net.switches) == 5) + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 0) + self.assertTrue(len(self.dc[1].listCompute()) == 1) + # add compute resources + vnf3 = self.dc[0].startCompute("vnf3") + vnf4 = self.dc[0].startCompute("vnf4") + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 2) + self.assertTrue(len(self.dc[1].listCompute()) == 1) + self.assertTrue(self.net.ping([vnf3, vnf2]) <= 0.0) + self.assertTrue(self.net.ping([vnf4, vnf2]) <= 0.0) + # remove compute resources + self.dc[0].stopCompute("vnf3") + self.dc[0].stopCompute("vnf4") + self.dc[1].stopCompute("vnf2") + # check compute list result + self.assertTrue(len(self.dc[0].listCompute()) == 0) + self.assertTrue(len(self.dc[1].listCompute()) == 0) + # stop Mininet network + self.stopNet() + + if __name__ == '__main__': + unittest.main()