Monitoring should be optional, not default to keep emulate footprint small.
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 import site
8 import time
9 from subprocess import Popen
10 import os
11
12 from mininet.net import Dockernet
13 from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
14 from mininet.cli import CLI
15 from mininet.link import TCLink
16 import networkx as nx
17 from emuvim.dcemulator.monitoring import DCNetworkMonitor
18 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
19 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
20
21 class DCNetwork(Dockernet):
22 """
23 Wraps the original Mininet/Dockernet class and provides
24 methods to add data centers, switches, etc.
25
26 This class is used by topology definition scripts.
27 """
28
29 def __init__(self, controller=RemoteController, monitor=False,
30 dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
31 dc_emulation_max_mem=512, # emulation max mem in MB
32 **kwargs):
33 """
34 Create an extended version of a Dockernet network
35 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
36 :param kwargs: path through for Mininet parameters
37 :return:
38 """
39 self.dcs = {}
40
41 # call original Docker.__init__ and setup default controller
42 Dockernet.__init__(
43 self, switch=OVSKernelSwitch, **kwargs)
44
45 # Ryu management
46 self.ryu_process = None
47 if controller == RemoteController:
48 # start Ryu controller
49 self.startRyu()
50
51 # add the specified controller
52 self.addController('c0', controller=controller)
53
54 # graph of the complete DC network
55 self.DCNetwork_graph = nx.MultiDiGraph()
56
57 # monitoring agent
58 if monitor:
59 self.monitor_agent = DCNetworkMonitor(self)
60 else:
61 self.monitor_agent = None
62
63 # initialize resource model registrar
64 self.rm_registrar = ResourceModelRegistrar(
65 dc_emulation_max_cpu, dc_emulation_max_mem)
66
67 def addDatacenter(self, label, metadata={}, resource_log_path=None):
68 """
69 Create and add a logical cloud data center to the network.
70 """
71 if label in self.dcs:
72 raise Exception("Data center label already exists: %s" % label)
73 dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
74 dc.net = self # set reference to network
75 self.dcs[label] = dc
76 dc.create() # finally create the data center in our Mininet instance
77 logging.info("added data center: %s" % label)
78 return dc
79
80 def addLink(self, node1, node2, **params):
81 """
82 Able to handle Datacenter objects as link
83 end points.
84 """
85 assert node1 is not None
86 assert node2 is not None
87 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
88 # ensure type of node1
89 if isinstance( node1, basestring ):
90 if node1 in self.dcs:
91 node1 = self.dcs[node1].switch
92 if isinstance( node1, Datacenter ):
93 node1 = node1.switch
94 # ensure type of node2
95 if isinstance( node2, basestring ):
96 if node2 in self.dcs:
97 node2 = self.dcs[node2].switch
98 if isinstance( node2, Datacenter ):
99 node2 = node2.switch
100 # try to give containers a default IP
101 if isinstance( node1, Docker ):
102 if "params1" not in params:
103 params["params1"] = {}
104 if "ip" not in params["params1"]:
105 params["params1"]["ip"] = self.getNextIp()
106 if isinstance( node2, Docker ):
107 if "params2" not in params:
108 params["params2"] = {}
109 if "ip" not in params["params2"]:
110 params["params2"]["ip"] = self.getNextIp()
111 # ensure that we allow TCLinks between data centers
112 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
113 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
114 if "cls" not in params:
115 params["cls"] = TCLink
116
117 link = Dockernet.addLink(self, node1, node2, **params)
118
119 # try to give container interfaces a default id
120 node1_port_id = node1.ports[link.intf1]
121 if isinstance(node1, Docker):
122 if "id" in params["params1"]:
123 node1_port_id = params["params1"]["id"]
124
125 node2_port_id = node2.ports[link.intf2]
126 if isinstance(node2, Docker):
127 if "id" in params["params2"]:
128 node2_port_id = params["params2"]["id"]
129
130 # add edge and assigned port number to graph in both directions between node1 and node2
131 # port_id: id given in descriptor (if available, otherwise same as port)
132 # port: portnumber assigned by Dockernet
133
134 self.DCNetwork_graph.add_edge(node1.name, node2.name,
135 attr_dict={'src_port_id': node1_port_id, 'src_port': node1.ports[link.intf1],
136 'dst_port_id': node2_port_id, 'dst_port': node2.ports[link.intf2]})
137 self.DCNetwork_graph.add_edge(node2.name, node1.name,
138 attr_dict={'src_port_id': node2_port_id, 'src_port': node2.ports[link.intf2],
139 'dst_port_id': node1_port_id, 'dst_port': node1.ports[link.intf1]})
140
141 return link
142
143 def addDocker( self, label, **params ):
144 """
145 Wrapper for addDocker method to use custom container class.
146 """
147 self.DCNetwork_graph.add_node(label)
148 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
149
150 def removeDocker( self, label, **params ):
151 """
152 Wrapper for removeDocker method to update graph.
153 """
154 self.DCNetwork_graph.remove_node(label)
155 return Dockernet.removeDocker(self, label, **params)
156
157 def addSwitch( self, name, add_to_graph=True, **params ):
158 """
159 Wrapper for addSwitch method to store switch also in graph.
160 """
161 if add_to_graph:
162 self.DCNetwork_graph.add_node(name)
163 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
164
165 def getAllContainers(self):
166 """
167 Returns a list with all containers within all data centers.
168 """
169 all_containers = []
170 for dc in self.dcs.itervalues():
171 all_containers += dc.listCompute()
172 return all_containers
173
174 def start(self):
175 # start
176 for dc in self.dcs.itervalues():
177 dc.start()
178 Dockernet.start(self)
179
180 def stop(self):
181
182 # stop the monitor agent
183 if self.monitor_agent is not None:
184 self.monitor_agent.stop()
185
186 # stop emulator net
187 Dockernet.stop(self)
188
189 # stop Ryu controller
190 self.stopRyu()
191
192
193 def CLI(self):
194 CLI(self)
195
196 # to remove chain do setChain( src, dst, cmd='del-flows')
197 def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, cmd='add-flow'):
198
199 #check if port is specified (vnf:port)
200 if vnf_src_interface is None:
201 # take first interface by default
202 connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
203 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
204 vnf_src_interface = link_dict[0]['src_port_id']
205 #vnf_source_interface = 0
206
207 for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
208 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
209 for link in link_dict:
210 #logging.info("{0},{1}".format(link_dict[link],vnf_source_interface))
211 if link_dict[link]['src_port_id'] == vnf_src_interface:
212 # found the right link and connected switch
213 #logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
214 src_sw = connected_sw
215
216 src_sw_inport = link_dict[link]['dst_port']
217 break
218
219 if vnf_dst_interface is None:
220 # take first interface by default
221 connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]
222 link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
223 vnf_dst_interface = link_dict[0]['dst_port_id']
224 #vnf_dest_interface = 0
225
226 vnf_dst_name = vnf_dst_name.split(':')[0]
227 for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):
228 link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
229 for link in link_dict:
230 if link_dict[link]['dst_port_id'] == vnf_dst_interface:
231 # found the right link and connected switch
232 dst_sw = connected_sw
233 dst_sw_outport = link_dict[link]['src_port']
234 break
235
236
237 # get shortest path
238 #path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
239 try:
240 path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw)
241 except:
242 logging.info("No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name))
243 return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
244
245 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
246
247 #current_hop = vnf_src_name
248 current_hop = src_sw
249 switch_inport = src_sw_inport
250
251 for i in range(0,len(path)):
252 current_node = self.getNodeByName(current_hop)
253 if path.index(current_hop) < len(path)-1:
254 next_hop = path[path.index(current_hop)+1]
255 else:
256 #last switch reached
257 next_hop = vnf_dst_name
258
259 next_node = self.getNodeByName(next_hop)
260
261 if next_hop == vnf_dst_name:
262 switch_outport = dst_sw_outport
263 logging.info("end node reached: {0}".format(vnf_dst_name))
264 elif not isinstance( next_node, OVSSwitch ):
265 logging.info("Next node: {0} is not a switch".format(next_hop))
266 return "Next node: {0} is not a switch".format(next_hop)
267 else:
268 # take first link between switches by default
269 index_edge_out = 0
270 switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
271
272 # take into account that multiple edges are possible between 2 nodes
273 index_edge_in = 0
274
275
276 #switch_inport = self.DCNetwork_graph[current_hop][next_hop][index_edge_in]['dst_port']
277
278 #next2_hop = path[path.index(current_hop)+2]
279 #index_edge_out = 0
280 #switch_outport = self.DCNetwork_graph[next_hop][next2_hop][index_edge_out]['src_port']
281 #switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
282
283 #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport, switch_outport))
284 # set of entry via ovs-ofctl
285 # TODO use rest API of ryu to set flow entries to correct witch dpid
286 if isinstance( current_node, OVSSwitch ):
287 match = 'in_port=%s' % switch_inport
288
289 if cmd=='add-flow':
290 action = 'action=%s' % switch_outport
291 s = ','
292 ofcmd = s.join([match,action])
293 elif cmd=='del-flows':
294 ofcmd = match
295 else:
296 ofcmd=''
297
298 current_node.dpctl(cmd, ofcmd)
299 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport,
300 switch_outport))
301 # take first link between switches by default
302 if isinstance( next_node, OVSSwitch ):
303 switch_inport = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port']
304 current_hop = next_hop
305
306 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
307 #return "destination node: {0} not reached".format(vnf_dst_name)
308
309 # start Ryu Openflow controller as Remote Controller for the DCNetwork
310 def startRyu(self):
311 # start Ryu controller with rest-API
312 python_install_path = site.getsitepackages()[0]
313 ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
314 ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
315 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
316 # Ryu still uses 6633 as default
317 ryu_option = '--ofp-tcp-listen-port'
318 ryu_of_port = '6653'
319 ryu_cmd = 'ryu-manager'
320 FNULL = open("/tmp/ryu.log", 'w')
321 self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
322 # no learning switch
323 #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
324 time.sleep(1)
325
326 def stopRyu(self):
327 if self.ryu_process is not None:
328 self.ryu_process.terminate()
329 self.ryu_process.kill()
330