dbfde5ca2430cd271be1376d7950641315bdf656
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
9 from subprocess
import Popen
12 from mininet
.net
import Dockernet
13 from mininet
.node
import Controller
, DefaultController
, OVSSwitch
, OVSKernelSwitch
, Docker
, RemoteController
14 from mininet
.cli
import CLI
15 from mininet
.link
import TCLink
17 from emuvim
.dcemulator
.monitoring
import DCNetworkMonitor
18 from emuvim
.dcemulator
.node
import Datacenter
, EmulatorCompute
19 from emuvim
.dcemulator
.resourcemodel
import ResourceModelRegistrar
21 class DCNetwork(Dockernet
):
23 Wraps the original Mininet/Dockernet class and provides
24 methods to add data centers, switches, etc.
26 This class is used by topology definition scripts.
29 def __init__(self
, controller
=RemoteController
, monitor
=False,
30 dc_emulation_max_cpu
=1.0, # fraction of overall CPU time for emulation
31 dc_emulation_max_mem
=512, # emulation max mem in MB
34 Create an extended version of a Dockernet network
35 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
36 :param kwargs: path through for Mininet parameters
41 # call original Docker.__init__ and setup default controller
43 self
, switch
=OVSKernelSwitch
, **kwargs
)
46 self
.ryu_process
= None
47 if controller
== RemoteController
:
48 # start Ryu controller
51 # add the specified controller
52 self
.addController('c0', controller
=controller
)
54 # graph of the complete DC network
55 self
.DCNetwork_graph
= nx
.MultiDiGraph()
59 self
.monitor_agent
= DCNetworkMonitor(self
)
61 self
.monitor_agent
= None
63 # initialize resource model registrar
64 self
.rm_registrar
= ResourceModelRegistrar(
65 dc_emulation_max_cpu
, dc_emulation_max_mem
)
67 def addDatacenter(self
, label
, metadata
={}, resource_log_path
=None):
69 Create and add a logical cloud data center to the network.
72 raise Exception("Data center label already exists: %s" % label
)
73 dc
= Datacenter(label
, metadata
=metadata
, resource_log_path
=resource_log_path
)
74 dc
.net
= self
# set reference to network
76 dc
.create() # finally create the data center in our Mininet instance
77 logging
.info("added data center: %s" % label
)
80 def addLink(self
, node1
, node2
, **params
):
82 Able to handle Datacenter objects as link
85 assert node1
is not None
86 assert node2
is not None
87 logging
.debug("addLink: n1=%s n2=%s" % (str(node1
), str(node2
)))
88 # ensure type of node1
89 if isinstance( node1
, basestring
):
91 node1
= self
.dcs
[node1
].switch
92 if isinstance( node1
, Datacenter
):
94 # ensure type of node2
95 if isinstance( node2
, basestring
):
97 node2
= self
.dcs
[node2
].switch
98 if isinstance( node2
, Datacenter
):
100 # try to give containers a default IP
101 if isinstance( node1
, Docker
):
102 if "params1" not in params
:
103 params
["params1"] = {}
104 if "ip" not in params
["params1"]:
105 params
["params1"]["ip"] = self
.getNextIp()
106 if isinstance( node2
, Docker
):
107 if "params2" not in params
:
108 params
["params2"] = {}
109 if "ip" not in params
["params2"]:
110 params
["params2"]["ip"] = self
.getNextIp()
111 # ensure that we allow TCLinks between data centers
112 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
113 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
114 if "cls" not in params
:
115 params
["cls"] = TCLink
117 link
= Dockernet
.addLink(self
, node1
, node2
, **params
)
119 # try to give container interfaces a default id
120 node1_port_id
= node1
.ports
[link
.intf1
]
121 if isinstance(node1
, Docker
):
122 if "id" in params
["params1"]:
123 node1_port_id
= params
["params1"]["id"]
125 node2_port_id
= node2
.ports
[link
.intf2
]
126 if isinstance(node2
, Docker
):
127 if "id" in params
["params2"]:
128 node2_port_id
= params
["params2"]["id"]
130 # add edge and assigned port number to graph in both directions between node1 and node2
131 # port_id: id given in descriptor (if available, otherwise same as port)
132 # port: portnumber assigned by Dockernet
134 self
.DCNetwork_graph
.add_edge(node1
.name
, node2
.name
,
135 attr_dict
={'src_port_id': node1_port_id
, 'src_port': node1
.ports
[link
.intf1
],
136 'dst_port_id': node2_port_id
, 'dst_port': node2
.ports
[link
.intf2
]})
137 self
.DCNetwork_graph
.add_edge(node2
.name
, node1
.name
,
138 attr_dict
={'src_port_id': node2_port_id
, 'src_port': node2
.ports
[link
.intf2
],
139 'dst_port_id': node1_port_id
, 'dst_port': node1
.ports
[link
.intf1
]})
143 def addDocker( self
, label
, **params
):
145 Wrapper for addDocker method to use custom container class.
147 self
.DCNetwork_graph
.add_node(label
)
148 return Dockernet
.addDocker(self
, label
, cls
=EmulatorCompute
, **params
)
150 def removeDocker( self
, label
, **params
):
152 Wrapper for removeDocker method to update graph.
154 self
.DCNetwork_graph
.remove_node(label
)
155 return Dockernet
.removeDocker(self
, label
, **params
)
157 def addSwitch( self
, name
, add_to_graph
=True, **params
):
159 Wrapper for addSwitch method to store switch also in graph.
162 self
.DCNetwork_graph
.add_node(name
)
163 return Dockernet
.addSwitch(self
, name
, protocols
='OpenFlow10,OpenFlow12,OpenFlow13', **params
)
165 def getAllContainers(self
):
167 Returns a list with all containers within all data centers.
170 for dc
in self
.dcs
.itervalues():
171 all_containers
+= dc
.listCompute()
172 return all_containers
176 for dc
in self
.dcs
.itervalues():
178 Dockernet
.start(self
)
182 # stop the monitor agent
183 if self
.monitor_agent
is not None:
184 self
.monitor_agent
.stop()
189 # stop Ryu controller
196 # to remove chain do setChain( src, dst, cmd='del-flows')
197 def setChain(self
, vnf_src_name
, vnf_dst_name
, vnf_src_interface
=None, vnf_dst_interface
=None, cmd
='add-flow'):
199 #check if port is specified (vnf:port)
200 if vnf_src_interface
is None:
201 # take first interface by default
202 connected_sw
= self
.DCNetwork_graph
.neighbors(vnf_src_name
)[0]
203 link_dict
= self
.DCNetwork_graph
[vnf_src_name
][connected_sw
]
204 vnf_src_interface
= link_dict
[0]['src_port_id']
205 #vnf_source_interface = 0
207 for connected_sw
in self
.DCNetwork_graph
.neighbors(vnf_src_name
):
208 link_dict
= self
.DCNetwork_graph
[vnf_src_name
][connected_sw
]
209 for link
in link_dict
:
210 #logging.info("{0},{1}".format(link_dict[link],vnf_source_interface))
211 if link_dict
[link
]['src_port_id'] == vnf_src_interface
:
212 # found the right link and connected switch
213 #logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
214 src_sw
= connected_sw
216 src_sw_inport
= link_dict
[link
]['dst_port']
219 if vnf_dst_interface
is None:
220 # take first interface by default
221 connected_sw
= self
.DCNetwork_graph
.neighbors(vnf_dst_name
)[0]
222 link_dict
= self
.DCNetwork_graph
[connected_sw
][vnf_dst_name
]
223 vnf_dst_interface
= link_dict
[0]['dst_port_id']
224 #vnf_dest_interface = 0
226 vnf_dst_name
= vnf_dst_name
.split(':')[0]
227 for connected_sw
in self
.DCNetwork_graph
.neighbors(vnf_dst_name
):
228 link_dict
= self
.DCNetwork_graph
[connected_sw
][vnf_dst_name
]
229 for link
in link_dict
:
230 if link_dict
[link
]['dst_port_id'] == vnf_dst_interface
:
231 # found the right link and connected switch
232 dst_sw
= connected_sw
233 dst_sw_outport
= link_dict
[link
]['src_port']
238 #path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
240 path
= nx
.shortest_path(self
.DCNetwork_graph
, src_sw
, dst_sw
)
242 logging
.info("No path could be found between {0} and {1}".format(vnf_src_name
, vnf_dst_name
))
243 return "No path could be found between {0} and {1}".format(vnf_src_name
, vnf_dst_name
)
245 logging
.info("Path between {0} and {1}: {2}".format(vnf_src_name
, vnf_dst_name
, path
))
247 #current_hop = vnf_src_name
249 switch_inport
= src_sw_inport
251 for i
in range(0,len(path
)):
252 current_node
= self
.getNodeByName(current_hop
)
253 if path
.index(current_hop
) < len(path
)-1:
254 next_hop
= path
[path
.index(current_hop
)+1]
257 next_hop
= vnf_dst_name
259 next_node
= self
.getNodeByName(next_hop
)
261 if next_hop
== vnf_dst_name
:
262 switch_outport
= dst_sw_outport
263 logging
.info("end node reached: {0}".format(vnf_dst_name
))
264 elif not isinstance( next_node
, OVSSwitch
):
265 logging
.info("Next node: {0} is not a switch".format(next_hop
))
266 return "Next node: {0} is not a switch".format(next_hop
)
268 # take first link between switches by default
270 switch_outport
= self
.DCNetwork_graph
[current_hop
][next_hop
][index_edge_out
]['src_port']
272 # take into account that multiple edges are possible between 2 nodes
276 #switch_inport = self.DCNetwork_graph[current_hop][next_hop][index_edge_in]['dst_port']
278 #next2_hop = path[path.index(current_hop)+2]
280 #switch_outport = self.DCNetwork_graph[next_hop][next2_hop][index_edge_out]['src_port']
281 #switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
283 #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport, switch_outport))
284 # set of entry via ovs-ofctl
285 # TODO use rest API of ryu to set flow entries to correct dpid
286 # TODO this only sets port in to out, no match, so this will give trouble when multiple services are deployed...
287 # TODO need multiple matches to do this (VLAN tags)
288 if isinstance( current_node
, OVSSwitch
):
289 match
= 'in_port=%s' % switch_inport
292 action
= 'action=%s' % switch_outport
294 ofcmd
= s
.join([match
,action
])
295 elif cmd
=='del-flows':
300 current_node
.dpctl(cmd
, ofcmd
)
301 logging
.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node
.name
, switch_inport
,
303 # take first link between switches by default
304 if isinstance( next_node
, OVSSwitch
):
305 switch_inport
= self
.DCNetwork_graph
[current_hop
][next_hop
][0]['dst_port']
306 current_hop
= next_hop
308 return "path added between {0} and {1}".format(vnf_src_name
, vnf_dst_name
)
309 #return "destination node: {0} not reached".format(vnf_dst_name)
311 # start Ryu Openflow controller as Remote Controller for the DCNetwork
313 # start Ryu controller with rest-API
314 python_install_path
= site
.getsitepackages()[0]
315 ryu_path
= python_install_path
+ '/ryu/app/simple_switch_13.py'
316 ryu_path2
= python_install_path
+ '/ryu/app/ofctl_rest.py'
317 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
318 # Ryu still uses 6633 as default
319 ryu_option
= '--ofp-tcp-listen-port'
321 ryu_cmd
= 'ryu-manager'
322 FNULL
= open("/tmp/ryu.log", 'w')
323 self
.ryu_process
= Popen([ryu_cmd
, ryu_path
, ryu_path2
, ryu_option
, ryu_of_port
], stdout
=FNULL
, stderr
=FNULL
)
325 #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
329 if self
.ryu_process
is not None:
330 self
.ryu_process
.terminate()
331 self
.ryu_process
.kill()