2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
9 from subprocess
import Popen
15 from mininet
.net
import Dockernet
16 from mininet
.node
import Controller
, DefaultController
, OVSSwitch
, OVSKernelSwitch
, Docker
, RemoteController
17 from mininet
.cli
import CLI
18 from mininet
.link
import TCLink
20 from emuvim
.dcemulator
.monitoring
import DCNetworkMonitor
21 from emuvim
.dcemulator
.node
import Datacenter
, EmulatorCompute
22 from emuvim
.dcemulator
.resourcemodel
import ResourceModelRegistrar
24 class DCNetwork(Dockernet
):
26 Wraps the original Mininet/Dockernet class and provides
27 methods to add data centers, switches, etc.
29 This class is used by topology definition scripts.
32 def __init__(self
, controller
=RemoteController
, monitor
=False,
33 dc_emulation_max_cpu
=1.0, # fraction of overall CPU time for emulation
34 dc_emulation_max_mem
=512, # emulation max mem in MB
37 Create an extended version of a Dockernet network
38 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
39 :param kwargs: path through for Mininet parameters
44 # call original Docker.__init__ and setup default controller
46 self
, switch
=OVSKernelSwitch
, **kwargs
)
49 self
.ryu_process
= None
50 if controller
== RemoteController
:
51 # start Ryu controller
54 # add the specified controller
55 self
.addController('c0', controller
=controller
)
57 # graph of the complete DC network
58 self
.DCNetwork_graph
= nx
.MultiDiGraph()
60 # initialize pool of vlan tags to setup the SDN paths
61 self
.vlans
= range(4096)[::-1]
65 self
.monitor_agent
= DCNetworkMonitor(self
)
67 self
.monitor_agent
= None
69 # initialize resource model registrar
70 self
.rm_registrar
= ResourceModelRegistrar(
71 dc_emulation_max_cpu
, dc_emulation_max_mem
)
73 def addDatacenter(self
, label
, metadata
={}, resource_log_path
=None):
75 Create and add a logical cloud data center to the network.
78 raise Exception("Data center label already exists: %s" % label
)
79 dc
= Datacenter(label
, metadata
=metadata
, resource_log_path
=resource_log_path
)
80 dc
.net
= self
# set reference to network
82 dc
.create() # finally create the data center in our Mininet instance
83 logging
.info("added data center: %s" % label
)
86 def addLink(self
, node1
, node2
, **params
):
88 Able to handle Datacenter objects as link
91 assert node1
is not None
92 assert node2
is not None
93 logging
.debug("addLink: n1=%s n2=%s" % (str(node1
), str(node2
)))
94 # ensure type of node1
95 if isinstance( node1
, basestring
):
97 node1
= self
.dcs
[node1
].switch
98 if isinstance( node1
, Datacenter
):
100 # ensure type of node2
101 if isinstance( node2
, basestring
):
102 if node2
in self
.dcs
:
103 node2
= self
.dcs
[node2
].switch
104 if isinstance( node2
, Datacenter
):
106 # try to give containers a default IP
107 if isinstance( node1
, Docker
):
108 if "params1" not in params
:
109 params
["params1"] = {}
110 if "ip" not in params
["params1"]:
111 params
["params1"]["ip"] = self
.getNextIp()
112 if isinstance( node2
, Docker
):
113 if "params2" not in params
:
114 params
["params2"] = {}
115 if "ip" not in params
["params2"]:
116 params
["params2"]["ip"] = self
.getNextIp()
117 # ensure that we allow TCLinks between data centers
118 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
119 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
120 if "cls" not in params
:
121 params
["cls"] = TCLink
123 link
= Dockernet
.addLink(self
, node1
, node2
, **params
)
125 # try to give container interfaces a default id
126 node1_port_id
= node1
.ports
[link
.intf1
]
127 if isinstance(node1
, Docker
):
128 if "id" in params
["params1"]:
129 node1_port_id
= params
["params1"]["id"]
130 node1_port_name
= link
.intf1
.name
132 node2_port_id
= node2
.ports
[link
.intf2
]
133 if isinstance(node2
, Docker
):
134 if "id" in params
["params2"]:
135 node2_port_id
= params
["params2"]["id"]
136 node2_port_name
= link
.intf2
.name
139 # add edge and assigned port number to graph in both directions between node1 and node2
140 # port_id: id given in descriptor (if available, otherwise same as port)
141 # port: portnumber assigned by Dockernet
144 # possible weight metrics allowed by TClink class:
145 weight_metrics
= ['bw', 'delay', 'jitter', 'loss']
146 edge_attributes
= [p
for p
in params
if p
in weight_metrics
]
147 for attr
in edge_attributes
:
148 # if delay: strip ms (need number as weight in graph)
149 match
= re
.search('([0-9]*\.?[0-9]+)', params
[attr
])
151 attr_number
= match
.group(1)
154 attr_dict
[attr
] = attr_number
157 attr_dict2
= {'src_port_id': node1_port_id
, 'src_port_nr': node1
.ports
[link
.intf1
],
158 'src_port_name': node1_port_name
,
159 'dst_port_id': node2_port_id
, 'dst_port_nr': node2
.ports
[link
.intf2
],
160 'dst_port_name': node2_port_name
}
161 attr_dict2
.update(attr_dict
)
162 self
.DCNetwork_graph
.add_edge(node1
.name
, node2
.name
, attr_dict
=attr_dict2
)
164 attr_dict2
= {'src_port_id': node2_port_id
, 'src_port_nr': node2
.ports
[link
.intf2
],
165 'src_port_name': node2_port_name
,
166 'dst_port_id': node1_port_id
, 'dst_port_nr': node1
.ports
[link
.intf1
],
167 'dst_port_name': node1_port_name
}
168 attr_dict2
.update(attr_dict
)
169 self
.DCNetwork_graph
.add_edge(node2
.name
, node1
.name
, attr_dict
=attr_dict2
)
173 def addDocker( self
, label
, **params
):
175 Wrapper for addDocker method to use custom container class.
177 self
.DCNetwork_graph
.add_node(label
)
178 return Dockernet
.addDocker(self
, label
, cls
=EmulatorCompute
, **params
)
180 def removeDocker( self
, label
, **params
):
182 Wrapper for removeDocker method to update graph.
184 self
.DCNetwork_graph
.remove_node(label
)
185 return Dockernet
.removeDocker(self
, label
, **params
)
187 def addSwitch( self
, name
, add_to_graph
=True, **params
):
189 Wrapper for addSwitch method to store switch also in graph.
192 self
.DCNetwork_graph
.add_node(name
)
193 return Dockernet
.addSwitch(self
, name
, protocols
='OpenFlow10,OpenFlow12,OpenFlow13', **params
)
195 def getAllContainers(self
):
197 Returns a list with all containers within all data centers.
200 for dc
in self
.dcs
.itervalues():
201 all_containers
+= dc
.listCompute()
202 return all_containers
206 for dc
in self
.dcs
.itervalues():
208 Dockernet
.start(self
)
212 # stop the monitor agent
213 if self
.monitor_agent
is not None:
214 self
.monitor_agent
.stop()
219 # stop Ryu controller
226 # to remove chain do setChain( src, dst, cmd='del-flows')
227 def setChain(self
, vnf_src_name
, vnf_dst_name
, vnf_src_interface
=None, vnf_dst_interface
=None, **kwargs
):
228 cmd
= kwargs
.get('cmd')
229 if cmd
== 'add-flow':
230 ret
= self
._chainAddFlow
(vnf_src_name
, vnf_dst_name
, vnf_src_interface
, vnf_dst_interface
, **kwargs
)
231 if kwargs
.get('bidirectional'):
232 return ret
+'\n' + self
._chainAddFlow
(vnf_dst_name
, vnf_src_name
, vnf_dst_interface
, vnf_src_interface
, **kwargs
)
234 elif cmd
== 'del-flows': # TODO: del-flow to be implemented
235 ret
= self
._chainAddFlow
(vnf_src_name
, vnf_dst_name
, vnf_src_interface
, vnf_dst_interface
, **kwargs
)
236 if kwargs
.get('bidirectional'):
237 return ret
+ '\n' + self
._chainAddFlow
(vnf_dst_name
, vnf_src_name
, vnf_dst_interface
, vnf_src_interface
, **kwargs
)
240 return "Command unknown"
243 def _chainAddFlow(self
, vnf_src_name
, vnf_dst_name
, vnf_src_interface
=None, vnf_dst_interface
=None, **kwargs
):
245 # TODO: this needs to be cleaned up
246 #check if port is specified (vnf:port)
247 if vnf_src_interface
is None:
248 # take first interface by default
249 connected_sw
= self
.DCNetwork_graph
.neighbors(vnf_src_name
)[0]
250 link_dict
= self
.DCNetwork_graph
[vnf_src_name
][connected_sw
]
251 vnf_src_interface
= link_dict
[0]['src_port_id']
253 for connected_sw
in self
.DCNetwork_graph
.neighbors(vnf_src_name
):
254 link_dict
= self
.DCNetwork_graph
[vnf_src_name
][connected_sw
]
255 for link
in link_dict
:
256 if link_dict
[link
]['src_port_id'] == vnf_src_interface
:
257 # found the right link and connected switch
258 src_sw
= connected_sw
260 src_sw_inport_nr
= link_dict
[link
]['dst_port_nr']
263 if vnf_dst_interface
is None:
264 # take first interface by default
265 connected_sw
= self
.DCNetwork_graph
.neighbors(vnf_dst_name
)[0]
266 link_dict
= self
.DCNetwork_graph
[connected_sw
][vnf_dst_name
]
267 vnf_dst_interface
= link_dict
[0]['dst_port_id']
269 vnf_dst_name
= vnf_dst_name
.split(':')[0]
270 for connected_sw
in self
.DCNetwork_graph
.neighbors(vnf_dst_name
):
271 link_dict
= self
.DCNetwork_graph
[connected_sw
][vnf_dst_name
]
272 for link
in link_dict
:
273 if link_dict
[link
]['dst_port_id'] == vnf_dst_interface
:
274 # found the right link and connected switch
275 dst_sw
= connected_sw
276 dst_sw_outport_nr
= link_dict
[link
]['src_port_nr']
282 # returns the first found shortest path
283 # if all shortest paths are wanted, use: all_shortest_paths
284 path
= nx
.shortest_path(self
.DCNetwork_graph
, src_sw
, dst_sw
, weight
=kwargs
.get('weight'))
286 logging
.info("No path could be found between {0} and {1}".format(vnf_src_name
, vnf_dst_name
))
287 return "No path could be found between {0} and {1}".format(vnf_src_name
, vnf_dst_name
)
289 logging
.info("Path between {0} and {1}: {2}".format(vnf_src_name
, vnf_dst_name
, path
))
292 switch_inport_nr
= src_sw_inport_nr
294 # choose free vlan if path contains more than 1 switch
296 vlan
= self
.vlans
.pop()
300 for i
in range(0,len(path
)):
301 current_node
= self
.getNodeByName(current_hop
)
303 if path
.index(current_hop
) < len(path
)-1:
304 next_hop
= path
[path
.index(current_hop
)+1]
307 next_hop
= vnf_dst_name
309 next_node
= self
.getNodeByName(next_hop
)
311 if next_hop
== vnf_dst_name
:
312 switch_outport_nr
= dst_sw_outport_nr
313 logging
.info("end node reached: {0}".format(vnf_dst_name
))
314 elif not isinstance( next_node
, OVSSwitch
):
315 logging
.info("Next node: {0} is not a switch".format(next_hop
))
316 return "Next node: {0} is not a switch".format(next_hop
)
318 # take first link between switches by default
320 switch_outport_nr
= self
.DCNetwork_graph
[current_hop
][next_hop
][index_edge_out
]['src_port_nr']
323 # set of entry via ovs-ofctl
324 if isinstance( current_node
, OVSSwitch
):
325 kwargs
['vlan'] = vlan
326 kwargs
['path'] = path
327 kwargs
['current_hop'] = current_hop
328 self
._set
_flow
_entry
_dpctl
(current_node
, switch_inport_nr
, switch_outport_nr
, **kwargs
)
329 # TODO set entry via Ryu REST api (in case emulator is running remote...)
331 # take first link between switches by default
332 if isinstance( next_node
, OVSSwitch
):
333 switch_inport_nr
= self
.DCNetwork_graph
[current_hop
][next_hop
][0]['dst_port_nr']
334 current_hop
= next_hop
336 return "path added between {0} and {1}".format(vnf_src_name
, vnf_dst_name
)
338 def _set_flow_entry_dpctl(self
, node
, switch_inport_nr
, switch_outport_nr
, **kwargs
):
339 match
= 'in_port=%s' % switch_inport_nr
341 cookie
= kwargs
.get('cookie')
342 match_input
= kwargs
.get('match')
343 cmd
= kwargs
.get('cmd')
344 path
= kwargs
.get('path')
345 current_hop
= kwargs
.get('current_hop')
346 vlan
= kwargs
.get('vlan')
350 cookie
= 'cookie=%s' % cookie
351 match
= s
.join([cookie
, match
])
353 match
= s
.join([match
, match_input
])
354 if cmd
== 'add-flow':
355 action
= 'action=%s' % switch_outport_nr
357 if path
.index(current_hop
) == 0: # first node
358 action
= ('action=mod_vlan_vid:%s' % vlan
) + (',output=%s' % switch_outport_nr
)
359 match
= '-O OpenFlow13 ' + match
360 elif path
.index(current_hop
) == len(path
) - 1: # last node
361 match
+= ',dl_vlan=%s' % vlan
362 action
= 'action=strip_vlan,output=%s' % switch_outport_nr
364 match
+= ',dl_vlan=%s' % vlan
365 ofcmd
= s
.join([match
, action
])
366 elif cmd
== 'del-flows':
371 node
.dpctl(cmd
, ofcmd
)
372 logging
.info("{3} in switch: {0} in_port: {1} out_port: {2}".format(node
.name
, switch_inport_nr
,
373 switch_outport_nr
, cmd
))
375 # start Ryu Openflow controller as Remote Controller for the DCNetwork
377 # start Ryu controller with rest-API
378 python_install_path
= site
.getsitepackages()[0]
379 ryu_path
= python_install_path
+ '/ryu/app/simple_switch_13.py'
380 ryu_path2
= python_install_path
+ '/ryu/app/ofctl_rest.py'
381 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
382 # Ryu still uses 6633 as default
383 ryu_option
= '--ofp-tcp-listen-port'
385 ryu_cmd
= 'ryu-manager'
386 FNULL
= open("/tmp/ryu.log", 'w')
387 self
.ryu_process
= Popen([ryu_cmd
, ryu_path
, ryu_path2
, ryu_option
, ryu_of_port
], stdout
=FNULL
, stderr
=FNULL
)
389 #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
393 if self
.ryu_process
is not None:
394 self
.ryu_process
.terminate()
395 self
.ryu_process
.kill()