2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
8 from subprocess
import Popen
11 from mininet
.net
import Dockernet
12 from mininet
.node
import Controller
, OVSSwitch
, OVSKernelSwitch
, Switch
, Docker
, Host
, RemoteController
13 from mininet
.cli
import CLI
14 from mininet
.log
import setLogLevel
, info
, debug
15 from mininet
.link
import TCLink
17 from emuvim
.dcemulator
.monitoring
import DCNetworkMonitor
18 from emuvim
.dcemulator
.node
import Datacenter
, EmulatorCompute
19 from emuvim
.dcemulator
.resourcemodel
import ResourceModelRegistrar
22 class DCNetwork(Dockernet
):
24 Wraps the original Mininet/Dockernet class and provides
25 methods to add data centers, switches, etc.
27 This class is used by topology definition scripts.
30 def __init__(self
, dc_emulation_max_cpu
=1.0, **kwargs
):
32 Create an extended version of a Dockernet network
33 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
34 :param kwargs: path through for Mininet parameters
39 # call original Docker.__init__ and setup default controller
41 self
, controller
=RemoteController
, switch
=OVSKernelSwitch
, **kwargs
)
43 # ass a remote controller to be able to use Ryu
44 self
.addController('c0', controller
=RemoteController
)
46 # graph of the complete DC network
47 self
.DCNetwork_graph
=nx
.DiGraph()
50 self
.monitor_agent
= DCNetworkMonitor(self
)
52 # start Ryu controller
55 # initialize resource model registrar
56 self
.rm_registrar
= ResourceModelRegistrar(dc_emulation_max_cpu
)
58 def addDatacenter(self
, label
, metadata
={}):
60 Create and add a logical cloud data center to the network.
63 raise Exception("Data center label already exists: %s" % label
)
64 dc
= Datacenter(label
, metadata
=metadata
)
65 dc
.net
= self
# set reference to network
67 dc
.create() # finally create the data center in our Mininet instance
68 logging
.info("added data center: %s" % label
)
71 def addLink(self
, node1
, node2
, **params
):
73 Able to handle Datacenter objects as link
76 assert node1
is not None
77 assert node2
is not None
78 logging
.debug("addLink: n1=%s n2=%s" % (str(node1
), str(node2
)))
79 # ensure type of node1
80 if isinstance( node1
, basestring
):
82 node1
= self
.dcs
[node1
].switch
83 if isinstance( node1
, Datacenter
):
85 # ensure type of node2
86 if isinstance( node2
, basestring
):
88 node2
= self
.dcs
[node2
].switch
89 if isinstance( node2
, Datacenter
):
91 # try to give containers a default IP
92 if isinstance( node1
, Docker
):
93 if "params1" not in params
:
94 params
["params1"] = {}
95 if "ip" not in params
["params1"]:
96 params
["params1"]["ip"] = self
.getNextIp()
97 if isinstance( node2
, Docker
):
98 if "params2" not in params
:
99 params
["params2"] = {}
100 if "ip" not in params
["params2"]:
101 params
["params2"]["ip"] = self
.getNextIp()
102 # ensure that we allow TCLinks between data centers
103 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
104 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
105 if "cls" not in params
:
106 params
["cls"] = TCLink
108 link
= Dockernet
.addLink(self
, node1
, node2
, **params
)
110 # add edge and assigned port number to graph in both directions between node1 and node2
111 self
.DCNetwork_graph
.add_edge(node1
.name
, node2
.name
, \
112 {'src_port': node1
.ports
[link
.intf1
], 'dst_port': node2
.ports
[link
.intf2
]})
113 self
.DCNetwork_graph
.add_edge(node2
.name
, node1
.name
, \
114 {'src_port': node2
.ports
[link
.intf2
], 'dst_port': node1
.ports
[link
.intf1
]})
118 def addDocker( self
, label
, **params
):
120 Wrapper for addDocker method to use custom container class.
122 self
.DCNetwork_graph
.add_node(label
)
123 return Dockernet
.addDocker(self
, label
, cls
=EmulatorCompute
, **params
)
125 def removeDocker( self
, label
, **params
):
127 Wrapper for removeDocker method to update graph.
129 self
.DCNetwork_graph
.remove_node(label
)
130 return Dockernet
.removeDocker(self
, label
, **params
)
132 def addSwitch( self
, name
, add_to_graph
=True, **params
):
134 Wrapper for addSwitch method to store switch also in graph.
137 self
.DCNetwork_graph
.add_node(name
)
138 return Dockernet
.addSwitch(self
, name
, protocols
='OpenFlow10,OpenFlow12,OpenFlow13', **params
)
140 def getAllContainers(self
):
142 Returns a list with all containers within all data centers.
145 for dc
in self
.dcs
.itervalues():
146 all_containers
+= dc
.listCompute()
147 return all_containers
151 for dc
in self
.dcs
.itervalues():
153 Dockernet
.start(self
)
156 # stop Ryu controller
157 self
.ryu_process
.terminate()
158 #self.ryu_process.kill()
164 # to remove chain do setChain( src, dst, cmd='del-flows')
165 def setChain(self
, vnf_src_name
, vnf_dst_name
, cmd
='add-flow'):
167 path
= nx
.shortest_path(self
.DCNetwork_graph
, vnf_src_name
, vnf_dst_name
)
168 logging
.info("Path between {0} and {1}: {2}".format(vnf_src_name
, vnf_dst_name
, path
))
170 current_hop
= vnf_src_name
171 for i
in range(0,len(path
)):
172 next_hop
= path
[path
.index(current_hop
)+1]
173 next_node
= self
.getNodeByName(next_hop
)
175 if next_hop
== vnf_dst_name
:
176 return "path added between {0} and {1}".format(vnf_src_name
, vnf_dst_name
)
177 elif not isinstance( next_node
, OVSSwitch
):
178 logging
.info("Next node: {0} is not a switch".format(next_hop
))
179 return "Next node: {0} is not a switch".format(next_hop
)
182 switch_inport
= self
.DCNetwork_graph
[current_hop
][next_hop
]['dst_port']
183 next2_hop
= path
[path
.index(current_hop
)+2]
184 switch_outport
= self
.DCNetwork_graph
[next_hop
][next2_hop
]['src_port']
186 logging
.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node
.name
, switch_inport
, switch_outport
))
187 # set of entry via ovs-ofctl
188 # TODO use rest API of ryu to set flow entries to correct witch dpid
189 if isinstance( next_node
, OVSSwitch
):
190 match
= 'in_port=%s' % switch_inport
193 action
= 'action=%s' % switch_outport
195 ofcmd
= s
.join([match
,action
])
196 elif cmd
=='del-flows':
201 next_node
.dpctl(cmd
, ofcmd
)
203 current_hop
= next_hop
205 return "destination node: {0} not reached".format(vnf_dst_name
)
207 # start Ryu Openflow controller as Remote Controller for the DCNetwork
209 # start Ryu controller with rest-API
210 python_install_path
= site
.getsitepackages()[0]
211 ryu_path
= python_install_path
+ '/ryu/app/simple_switch_13.py'
212 ryu_path2
= python_install_path
+ '/ryu/app/ofctl_rest.py'
213 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
214 # Ryu still uses 6633 as default
215 ryu_option
= '--ofp-tcp-listen-port'
217 ryu_cmd
= 'ryu-manager'
218 FNULL
= open(os
.devnull
, 'w')
219 self
.ryu_process
= Popen([ryu_cmd
, ryu_path
, ryu_path2
, ryu_option
, ryu_of_port
], stdout
=FNULL
, stderr
=FNULL
)