Merge pull request #74 from stevenvanrossem/master
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 import site
8 import time
9 from subprocess import Popen
10 import os
11
12 from mininet.net import Dockernet
13 from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
14 from mininet.cli import CLI
15 from mininet.link import TCLink
16 import networkx as nx
17 from emuvim.dcemulator.monitoring import DCNetworkMonitor
18 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
19 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
20
21
22 class DCNetwork(Dockernet):
23 """
24 Wraps the original Mininet/Dockernet class and provides
25 methods to add data centers, switches, etc.
26
27 This class is used by topology definition scripts.
28 """
29
30 def __init__(self, controller=RemoteController,
31 dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
32 dc_emulation_max_mem=512, # emulation max mem in MB
33 **kwargs):
34 """
35 Create an extended version of a Dockernet network
36 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
37 :param kwargs: path through for Mininet parameters
38 :return:
39 """
40 self.dcs = {}
41
42 # call original Docker.__init__ and setup default controller
43 Dockernet.__init__(
44 self, switch=OVSKernelSwitch, **kwargs)
45
46 # Ryu management
47 self.ryu_process = None
48 if controller == RemoteController:
49 # start Ryu controller
50 self.startRyu()
51
52 # add the specified controller
53 self.addController('c0', controller=controller)
54
55 # graph of the complete DC network
56 self.DCNetwork_graph = nx.MultiDiGraph()
57
58 # monitoring agent
59 self.monitor_agent = DCNetworkMonitor(self)
60
61 # initialize resource model registrar
62 self.rm_registrar = ResourceModelRegistrar(
63 dc_emulation_max_cpu, dc_emulation_max_mem)
64
65 def addDatacenter(self, label, metadata={}, resource_log_path=None):
66 """
67 Create and add a logical cloud data center to the network.
68 """
69 if label in self.dcs:
70 raise Exception("Data center label already exists: %s" % label)
71 dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
72 dc.net = self # set reference to network
73 self.dcs[label] = dc
74 dc.create() # finally create the data center in our Mininet instance
75 logging.info("added data center: %s" % label)
76 return dc
77
78 def addLink(self, node1, node2, **params):
79 """
80 Able to handle Datacenter objects as link
81 end points.
82 """
83 assert node1 is not None
84 assert node2 is not None
85 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
86 # ensure type of node1
87 if isinstance( node1, basestring ):
88 if node1 in self.dcs:
89 node1 = self.dcs[node1].switch
90 if isinstance( node1, Datacenter ):
91 node1 = node1.switch
92 # ensure type of node2
93 if isinstance( node2, basestring ):
94 if node2 in self.dcs:
95 node2 = self.dcs[node2].switch
96 if isinstance( node2, Datacenter ):
97 node2 = node2.switch
98 # try to give containers a default IP
99 if isinstance( node1, Docker ):
100 if "params1" not in params:
101 params["params1"] = {}
102 if "ip" not in params["params1"]:
103 params["params1"]["ip"] = self.getNextIp()
104 if isinstance( node2, Docker ):
105 if "params2" not in params:
106 params["params2"] = {}
107 if "ip" not in params["params2"]:
108 params["params2"]["ip"] = self.getNextIp()
109 # ensure that we allow TCLinks between data centers
110 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
111 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
112 if "cls" not in params:
113 params["cls"] = TCLink
114
115 link = Dockernet.addLink(self, node1, node2, **params)
116
117 # try to give container interfaces a default id
118 node1_port_id = node1.ports[link.intf1]
119 if isinstance(node1, Docker):
120 if "id" in params["params1"]:
121 node1_port_id = params["params1"]["id"]
122
123 node2_port_id = node2.ports[link.intf2]
124 if isinstance(node2, Docker):
125 if "id" in params["params2"]:
126 node2_port_id = params["params2"]["id"]
127
128 # add edge and assigned port number to graph in both directions between node1 and node2
129 # port_id: id given in descriptor (if available, otherwise same as port)
130 # port: portnumber assigned by Dockernet
131
132 self.DCNetwork_graph.add_edge(node1.name, node2.name,
133 attr_dict={'src_port_id': node1_port_id, 'src_port': node1.ports[link.intf1],
134 'dst_port_id': node2_port_id, 'dst_port': node2.ports[link.intf2]})
135 self.DCNetwork_graph.add_edge(node2.name, node1.name,
136 attr_dict={'src_port_id': node2_port_id, 'src_port': node2.ports[link.intf2],
137 'dst_port_id': node1_port_id, 'dst_port': node1.ports[link.intf1]})
138
139 return link
140
141 def addDocker( self, label, **params ):
142 """
143 Wrapper for addDocker method to use custom container class.
144 """
145 self.DCNetwork_graph.add_node(label)
146 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
147
148 def removeDocker( self, label, **params ):
149 """
150 Wrapper for removeDocker method to update graph.
151 """
152 self.DCNetwork_graph.remove_node(label)
153 return Dockernet.removeDocker(self, label, **params)
154
155 def addSwitch( self, name, add_to_graph=True, **params ):
156 """
157 Wrapper for addSwitch method to store switch also in graph.
158 """
159 if add_to_graph:
160 self.DCNetwork_graph.add_node(name)
161 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
162
163 def getAllContainers(self):
164 """
165 Returns a list with all containers within all data centers.
166 """
167 all_containers = []
168 for dc in self.dcs.itervalues():
169 all_containers += dc.listCompute()
170 return all_containers
171
172 def start(self):
173 # start
174 for dc in self.dcs.itervalues():
175 dc.start()
176 Dockernet.start(self)
177
178 def stop(self):
179 # stop Ryu controller
180 Dockernet.stop(self)
181 self.stopRyu()
182
183 def CLI(self):
184 CLI(self)
185
186 # to remove chain do setChain( src, dst, cmd='del-flows')
187 def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, cmd='add-flow'):
188
189 #check if port is specified (vnf:port)
190 if vnf_src_interface is None:
191 # take first interface by default
192 connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
193 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
194 vnf_src_interface = link_dict[0]['src_port_id']
195 #vnf_source_interface = 0
196
197 for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
198 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
199 for link in link_dict:
200 #logging.info("{0},{1}".format(link_dict[link],vnf_source_interface))
201 if link_dict[link]['src_port_id'] == vnf_src_interface:
202 # found the right link and connected switch
203 #logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
204 src_sw = connected_sw
205
206 src_sw_inport = link_dict[link]['dst_port']
207 break
208
209 if vnf_dst_interface is None:
210 # take first interface by default
211 connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]
212 link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
213 vnf_dst_interface = link_dict[0]['dst_port_id']
214 #vnf_dest_interface = 0
215
216 vnf_dst_name = vnf_dst_name.split(':')[0]
217 for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):
218 link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
219 for link in link_dict:
220 if link_dict[link]['dst_port_id'] == vnf_dst_interface:
221 # found the right link and connected switch
222 dst_sw = connected_sw
223 dst_sw_outport = link_dict[link]['src_port']
224 break
225
226
227 # get shortest path
228 #path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
229 try:
230 path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw)
231 except:
232 logging.info("No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name))
233 return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
234
235 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
236
237 #current_hop = vnf_src_name
238 current_hop = src_sw
239 switch_inport = src_sw_inport
240
241 for i in range(0,len(path)):
242 current_node = self.getNodeByName(current_hop)
243 if path.index(current_hop) < len(path)-1:
244 next_hop = path[path.index(current_hop)+1]
245 else:
246 #last switch reached
247 next_hop = vnf_dst_name
248
249 next_node = self.getNodeByName(next_hop)
250
251 if next_hop == vnf_dst_name:
252 switch_outport = dst_sw_outport
253 logging.info("end node reached: {0}".format(vnf_dst_name))
254 elif not isinstance( next_node, OVSSwitch ):
255 logging.info("Next node: {0} is not a switch".format(next_hop))
256 return "Next node: {0} is not a switch".format(next_hop)
257 else:
258 # take first link between switches by default
259 index_edge_out = 0
260 switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
261
262 # take into account that multiple edges are possible between 2 nodes
263 index_edge_in = 0
264
265
266 #switch_inport = self.DCNetwork_graph[current_hop][next_hop][index_edge_in]['dst_port']
267
268 #next2_hop = path[path.index(current_hop)+2]
269 #index_edge_out = 0
270 #switch_outport = self.DCNetwork_graph[next_hop][next2_hop][index_edge_out]['src_port']
271 #switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
272
273 #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport, switch_outport))
274 # set of entry via ovs-ofctl
275 # TODO use rest API of ryu to set flow entries to correct witch dpid
276 if isinstance( current_node, OVSSwitch ):
277 match = 'in_port=%s' % switch_inport
278
279 if cmd=='add-flow':
280 action = 'action=%s' % switch_outport
281 s = ','
282 ofcmd = s.join([match,action])
283 elif cmd=='del-flows':
284 ofcmd = match
285 else:
286 ofcmd=''
287
288 current_node.dpctl(cmd, ofcmd)
289 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport,
290 switch_outport))
291 # take first link between switches by default
292 if isinstance( next_node, OVSSwitch ):
293 switch_inport = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port']
294 current_hop = next_hop
295
296 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
297 #return "destination node: {0} not reached".format(vnf_dst_name)
298
299 # start Ryu Openflow controller as Remote Controller for the DCNetwork
300 def startRyu(self):
301 # start Ryu controller with rest-API
302 python_install_path = site.getsitepackages()[0]
303 ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
304 ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
305 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
306 # Ryu still uses 6633 as default
307 ryu_option = '--ofp-tcp-listen-port'
308 ryu_of_port = '6653'
309 ryu_cmd = 'ryu-manager'
310 FNULL = open("/tmp/ryu.log", 'w')
311 self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
312 # no learning switch
313 #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
314 time.sleep(1)
315
316 def stopRyu(self):
317 if self.ryu_process is not None:
318 self.ryu_process.terminate()
319 self.ryu_process.kill()
320