import urllib2
from functools import partial
- from mininet.net import Dockernet
+ from mininet.net import Containernet
from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
from mininet.cli import CLI
from mininet.link import TCLink
from emuvim.dcemulator.node import Datacenter, EmulatorCompute
from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
- class DCNetwork(Dockernet):
+ class DCNetwork(Containernet):
"""
- Wraps the original Mininet/Dockernet class and provides
+ Wraps the original Mininet/Containernet class and provides
methods to add data centers, switches, etc.
This class is used by topology definition scripts.
"""
def __init__(self, controller=RemoteController, monitor=False,
+ enable_learning = True, # in case of RemoteController (Ryu), learning switch behavior can be turned off/on
dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
dc_emulation_max_mem=512, # emulation max mem in MB
**kwargs):
"""
- Create an extended version of a Dockernet network
+ Create an extended version of a Containernet network
:param dc_emulation_max_cpu: max. CPU time used by containers in data centers
:param kwargs: path through for Mininet parameters
:return:
self.dcs = {}
# call original Docker.__init__ and setup default controller
- Dockernet.__init__(
+ Containernet.__init__(
- self, switch=OVSKernelSwitch, **kwargs)
+ self, switch=OVSKernelSwitch, controller=controller, **kwargs)
+
+
# Ryu management
self.ryu_process = None
if controller == RemoteController:
# start Ryu controller
- self.startRyu()
+ self.startRyu(learning_switch=enable_learning)
# add the specified controller
self.addController('c0', controller=controller)
params["params2"]["ip"] = self.getNextIp()
# ensure that we allow TCLinks between data centers
# TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
- # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
+ # see Containernet issue: https://github.com/mpeuster/containernet/issues/3
if "cls" not in params:
params["cls"] = TCLink
- link = Dockernet.addLink(self, node1, node2, **params)
+ link = Containernet.addLink(self, node1, node2, **params)
# try to give container interfaces a default id
node1_port_id = node1.ports[link.intf1]
# add edge and assigned port number to graph in both directions between node1 and node2
# port_id: id given in descriptor (if available, otherwise same as port)
- # port: portnumber assigned by Dockernet
+ # port: portnumber assigned by Containernet
attr_dict = {}
# possible weight metrics allowed by TClink class:
Wrapper for addDocker method to use custom container class.
"""
self.DCNetwork_graph.add_node(label)
- return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
+ return Containernet.addDocker(self, label, cls=EmulatorCompute, **params)
def removeDocker( self, label, **params ):
"""
Wrapper for removeDocker method to update graph.
"""
self.DCNetwork_graph.remove_node(label)
- return Dockernet.removeDocker(self, label, **params)
+ return Containernet.removeDocker(self, label, **params)
def addSwitch( self, name, add_to_graph=True, **params ):
"""
"""
if add_to_graph:
self.DCNetwork_graph.add_node(name)
- return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
+ return Containernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
def getAllContainers(self):
"""
# start
for dc in self.dcs.itervalues():
dc.start()
- Dockernet.start(self)
+ Containernet.start(self)
def stop(self):
self.monitor_agent.stop()
# stop emulator net
- Dockernet.stop(self)
+ Containernet.stop(self)
# stop Ryu controller
self.stopRyu()
kwargs['vlan'] = vlan
kwargs['path'] = path
kwargs['current_hop'] = current_hop
- ## set flow entry via ovs-ofctl
- #self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
- ## set flow entry via ryu rest api
- self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+
+ if self.controller == RemoteController:
+ ## set flow entry via ryu rest api
+ self._set_flow_entry_ryu_rest(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+ else:
+ ## set flow entry via ovs-ofctl
+ self._set_flow_entry_dpctl(current_node, switch_inport_nr, switch_outport_nr, **kwargs)
+
+
# take first link between switches by default
if isinstance( next_node, OVSSwitch ):
flow = {}
flow['dpid'] = int(node.dpid, 16)
+ logging.info('node name:{0}'.format(node.name))
+
if cookie:
flow['cookie'] = int(cookie)
# http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#add-a-flow-entry
if cmd == 'add-flow':
prefix = 'stats/flowentry/add'
- action = {}
- action['type'] = 'OUTPUT'
- action['port'] = switch_outport_nr
- flow['actions'].append(action)
if vlan != None:
if path.index(current_hop) == 0: # first node
action = {}
action['type'] = 'PUSH_VLAN' # Push a new VLAN tag if a input frame is non-VLAN-tagged
action['ethertype'] = 33024 # Ethertype 0x8100(=33024): IEEE 802.1Q VLAN-tagged frame
+ flow['actions'].append(action)
+ action = {}
action['type'] = 'SET_FIELD'
action['field'] = 'vlan_vid'
action['value'] = vlan
flow['actions'].append(action)
else: # middle nodes
match += ',dl_vlan=%s' % vlan
+ # output action must come last
+ action = {}
+ action['type'] = 'OUTPUT'
+ action['port'] = switch_outport_nr
+ flow['actions'].append(action)
#flow['match'] = self._parse_match(match)
elif cmd == 'del-flows':
#del(flow['actions'])
switch_outport_nr, cmd))
# start Ryu Openflow controller as Remote Controller for the DCNetwork
- def startRyu(self):
+ def startRyu(self, learning_switch=True):
# start Ryu controller with rest-API
python_install_path = site.getsitepackages()[0]
ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
ryu_of_port = '6653'
ryu_cmd = 'ryu-manager'
FNULL = open("/tmp/ryu.log", 'w')
- self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
- # no learning switch
- #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+ if learning_switch:
+ self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
+ else:
+ # no learning switch
+ self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
time.sleep(1)
def stopRyu(self):
self.ryu_process.kill()
def ryu_REST(self, prefix, dpid=None, data=None):
- if dpid:
- url = self.ryu_REST_api + '/' + str(prefix) + '/' + str(dpid)
- else:
- url = self.ryu_REST_api + '/' + str(prefix)
- if data:
- #logging.info('POST: {0}'.format(str(data)))
- req = urllib2.Request(url, str(data))
- else:
- req = urllib2.Request(url)
+ try:
+ if dpid:
+ url = self.ryu_REST_api + '/' + str(prefix) + '/' + str(dpid)
+ else:
+ url = self.ryu_REST_api + '/' + str(prefix)
+ if data:
+ #logging.info('POST: {0}'.format(str(data)))
+ req = urllib2.Request(url, str(data))
+ else:
+ req = urllib2.Request(url)
- ret = urllib2.urlopen(req).read()
- return ret
+ ret = urllib2.urlopen(req).read()
+ return ret
+ except:
+ logging.info('error url: {0}'.format(str(url)))
+ if data: logging.info('error POST: {0}'.format(str(data)))
# need to respect that some match fields must be integers
# http://ryu.readthedocs.io/en/latest/app/ofctl_rest.html#description-of-match-and-actions
--- /dev/null
+ """
+ Test suite to automatically test emulator functionalities.
+ Directly interacts with the emulator through the Mininet-like
+ Python API.
+
+ Does not test API endpoints. This is done in separated test suites.
+ """
+
+ import time
+ import unittest
+ from emuvim.dcemulator.node import EmulatorCompute
+ from emuvim.test.base import SimpleTestTopology
++from mininet.node import RemoteController
+
+
+ #@unittest.skip("disabled topology tests for development")
+ class testEmulatorTopology( SimpleTestTopology ):
+ """
+ Tests to check the topology API of the emulator.
+ """
+
+ def testSingleDatacenter(self):
+ """
+ Create a single data center and add check if its switch is up
+ by using manually added hosts. Tests especially the
+ data center specific addLink method.
+ """
+ # create network
+ self.createNet(nswitches=0, ndatacenter=1, nhosts=2, ndockers=0)
+ # setup links
+ self.net.addLink(self.dc[0], self.h[0])
+ self.net.addLink(self.h[1], self.dc[0])
+ # start Mininet network
+ self.startNet()
+ # check number of running nodes
+ self.assertTrue(len(self.getContainernetContainers()) == 0)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 1)
+ # check connectivity by using ping
+ self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+ # stop Mininet network
+ self.stopNet()
+
+ #@unittest.skip("disabled to test if CI fails because this is the first test.")
+ def testMultipleDatacenterDirect(self):
+ """
+ Create a two data centers and interconnect them.
+ """
+ # create network
+ self.createNet(nswitches=0, ndatacenter=2, nhosts=2, ndockers=0)
+ # setup links
+ self.net.addLink(self.dc[0], self.h[0])
+ self.net.addLink(self.h[1], self.dc[1])
+ self.net.addLink(self.dc[0], self.dc[1])
+ # start Mininet network
+ self.startNet()
+ # check number of running nodes
+ self.assertTrue(len(self.getContainernetContainers()) == 0)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 2)
+ # check connectivity by using ping
+ self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+ # stop Mininet network
+ self.stopNet()
+
+ def testMultipleDatacenterWithIntermediateSwitches(self):
+ """
+ Create a two data centers and interconnect them with additional
+ switches between them.
+ """
+ # create network
+ self.createNet(
+ nswitches=3, ndatacenter=2, nhosts=2, ndockers=0,
+ autolinkswitches=True)
+ # setup links
+ self.net.addLink(self.dc[0], self.h[0])
+ self.net.addLink(self.h[1], self.dc[1])
+ self.net.addLink(self.dc[0], self.s[0])
+ self.net.addLink(self.s[2], self.dc[1])
+ # start Mininet network
+ self.startNet()
+ # check number of running nodes
+ self.assertTrue(len(self.getContainernetContainers()) == 0)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 5)
+ # check connectivity by using ping
+ self.assertTrue(self.net.ping([self.h[0], self.h[1]]) <= 0.0)
+ # stop Mininet network
+ self.stopNet()
+
++class testEmulatorNetworking( SimpleTestTopology ):
++
++ def testSDNChaining(self):
++ """
++ Create a two data centers and interconnect them with additional
++ switches between them.
++ Uses Ryu SDN controller.
++ Connect the Docker hosts to different datacenters and setup the links between.
++ """
++ # create network
++ self.createNet(
++ nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
++ autolinkswitches=True,
++ controller=RemoteController,
++ enable_learning=False)
++ # setup links
++ self.net.addLink(self.dc[0], self.s[0])
++ self.net.addLink(self.s[2], self.dc[1])
++ # start Mininet network
++ self.startNet()
++
++ # add compute resources
++ vnf1 = self.dc[0].startCompute("vnf1", network=[{'id':'intf1', 'ip':'10.0.10.1/24'}])
++ vnf2 = self.dc[1].startCompute("vnf2", network=[{'id':'intf2', 'ip':'10.0.10.2/24'}])
++ # check number of running nodes
++ self.assertTrue(len(self.getDockernetContainers()) == 2)
++ self.assertTrue(len(self.net.hosts) == 2)
++ self.assertTrue(len(self.net.switches) == 5)
++ # check status
++ # check get status
++ s1 = self.dc[0].containers.get("vnf1").getStatus()
++ self.assertTrue(s1["name"] == "vnf1")
++ self.assertTrue(s1["state"]["Running"])
++ self.assertTrue(s1["network"][0]['intf_name'] == 'intf1')
++ self.assertTrue(s1["network"][0]['ip'] == '10.0.10.1')
++
++ s2 = self.dc[1].containers.get("vnf2").getStatus()
++ self.assertTrue(s2["name"] == "vnf2")
++ self.assertTrue(s2["state"]["Running"])
++ self.assertTrue(s2["network"][0]['intf_name'] == 'intf2')
++ self.assertTrue(s2["network"][0]['ip'] == '10.0.10.2')
++
++ # setup links
++ self.net.setChain('vnf1', 'vnf2', 'intf1', 'intf2', bidirectional=True, cmd='add-flow')
++ # check connectivity by using ping
++ self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
++ # stop Mininet network
++ self.stopNet()
+
+ #@unittest.skip("disabled compute tests for development")
+ class testEmulatorCompute( SimpleTestTopology ):
+ """
+ Tests to check the emulator's API to add and remove
+ compute resources at runtime.
+ """
+
+ def testAddSingleComputeSingleDC(self):
+ """
+ Adds a single compute instance to
+ a single DC and checks its connectivity with a
+ manually added host.
+ """
+ # create network
+ self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
+ # setup links
+ self.net.addLink(self.dc[0], self.h[0])
+ # start Mininet network
+ self.startNet()
+ # add compute resources
+ vnf1 = self.dc[0].startCompute("vnf1")
+ # check number of running nodes
+ self.assertTrue(len(self.getContainernetContainers()) == 1)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 1)
+ # check compute list result
+ self.assertTrue(len(self.dc[0].listCompute()) == 1)
+ self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+ self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
+ # check connectivity by using ping
+ self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
+ # stop Mininet network
+ self.stopNet()
+
+ def testRemoveSingleComputeSingleDC(self):
+ """
+ Test stop method for compute instances.
+ Check that the instance is really removed.
+ """
+ # create network
+ self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
+ # setup links
+ self.net.addLink(self.dc[0], self.h[0])
+ # start Mininet network
+ self.startNet()
+ # add compute resources
+ vnf1 = self.dc[0].startCompute("vnf1")
+ # check number of running nodes
+ self.assertTrue(len(self.getContainernetContainers()) == 1)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 1)
+ # check compute list result
+ self.assertTrue(len(self.dc[0].listCompute()) == 1)
+ # check connectivity by using ping
+ self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
+ # remove compute resources
+ self.dc[0].stopCompute("vnf1")
+ # check number of running nodes
+ self.assertTrue(len(self.getContainernetContainers()) == 0)
+ self.assertTrue(len(self.net.hosts) == 1)
+ self.assertTrue(len(self.net.switches) == 1)
+ # check compute list result
+ self.assertTrue(len(self.dc[0].listCompute()) == 0)
+ # stop Mininet network
+ self.stopNet()
+
+ def testGetStatusSingleComputeSingleDC(self):
+ """
+ Check if the getStatus functionality of EmulatorCompute
+ objects works well.
+ """
+ # create network
+ self.createNet(nswitches=0, ndatacenter=1, nhosts=1, ndockers=0)
+ # setup links
+ self.net.addLink(self.dc[0], self.h[0])
+ # start Mininet network
+ self.startNet()
+ # add compute resources
+ vnf1 = self.dc[0].startCompute("vnf1")
+ # check number of running nodes
+ self.assertTrue(len(self.getContainernetContainers()) == 1)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 1)
+ # check compute list result
+ self.assertTrue(len(self.dc[0].listCompute()) == 1)
+ self.assertTrue(isinstance(self.dc[0].listCompute()[0], EmulatorCompute))
+ self.assertTrue(self.dc[0].listCompute()[0].name == "vnf1")
+ # check connectivity by using ping
+ self.assertTrue(self.net.ping([self.h[0], vnf1]) <= 0.0)
+ # check get status
+ s = self.dc[0].containers.get("vnf1").getStatus()
+ self.assertTrue(s["name"] == "vnf1")
+ self.assertTrue(s["state"]["Running"])
+ # stop Mininet network
+ self.stopNet()
+
+ def testConnectivityMultiDC(self):
+ """
+ Test if compute instances started in different data centers
+ are able to talk to each other.
+ """
+ # create network
+ self.createNet(
+ nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
+ autolinkswitches=True)
+ # setup links
+ self.net.addLink(self.dc[0], self.s[0])
+ self.net.addLink(self.dc[1], self.s[2])
+ # start Mininet network
+ self.startNet()
+ # add compute resources
+ vnf1 = self.dc[0].startCompute("vnf1")
+ vnf2 = self.dc[1].startCompute("vnf2")
+ # check number of running nodes
+ self.assertTrue(len(self.getContainernetContainers()) == 2)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 5)
+ # check compute list result
+ self.assertTrue(len(self.dc[0].listCompute()) == 1)
+ self.assertTrue(len(self.dc[1].listCompute()) == 1)
+ # check connectivity by using ping
+ self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
+ # stop Mininet network
+ self.stopNet()
+
+ def testInterleavedAddRemoveMultiDC(self):
+ """
+ Test multiple, interleaved add and remove operations and ensure
+ that always all expected compute instances are reachable.
+ """
+ # create network
+ self.createNet(
+ nswitches=3, ndatacenter=2, nhosts=0, ndockers=0,
+ autolinkswitches=True)
+ # setup links
+ self.net.addLink(self.dc[0], self.s[0])
+ self.net.addLink(self.dc[1], self.s[2])
+ # start Mininet network
+ self.startNet()
+ # add compute resources
+ vnf1 = self.dc[0].startCompute("vnf1")
+ vnf2 = self.dc[1].startCompute("vnf2")
+ # check number of running nodes
+ self.assertTrue(len(self.getContainernetContainers()) == 2)
+ self.assertTrue(len(self.net.hosts) == 2)
+ self.assertTrue(len(self.net.switches) == 5)
+ # check compute list result
+ self.assertTrue(len(self.dc[0].listCompute()) == 1)
+ self.assertTrue(len(self.dc[1].listCompute()) == 1)
+ # check connectivity by using ping
+ self.assertTrue(self.net.ping([vnf1, vnf2]) <= 0.0)
+ # remove compute resources
+ self.dc[0].stopCompute("vnf1")
+ # check number of running nodes
+ self.assertTrue(len(self.getContainernetContainers()) == 1)
+ self.assertTrue(len(self.net.hosts) == 1)
+ self.assertTrue(len(self.net.switches) == 5)
+ # check compute list result
+ self.assertTrue(len(self.dc[0].listCompute()) == 0)
+ self.assertTrue(len(self.dc[1].listCompute()) == 1)
+ # add compute resources
+ vnf3 = self.dc[0].startCompute("vnf3")
+ vnf4 = self.dc[0].startCompute("vnf4")
+ # check compute list result
+ self.assertTrue(len(self.dc[0].listCompute()) == 2)
+ self.assertTrue(len(self.dc[1].listCompute()) == 1)
+ self.assertTrue(self.net.ping([vnf3, vnf2]) <= 0.0)
+ self.assertTrue(self.net.ping([vnf4, vnf2]) <= 0.0)
+ # remove compute resources
+ self.dc[0].stopCompute("vnf3")
+ self.dc[0].stopCompute("vnf4")
+ self.dc[1].stopCompute("vnf2")
+ # check compute list result
+ self.assertTrue(len(self.dc[0].listCompute()) == 0)
+ self.assertTrue(len(self.dc[1].listCompute()) == 0)
+ # stop Mininet network
+ self.stopNet()
+
+ if __name__ == '__main__':
+ unittest.main()