update monitor features
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 import site
8 import time
9 from subprocess import Popen
10 import os
11
12 from mininet.net import Dockernet
13 from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
14 from mininet.cli import CLI
15 from mininet.link import TCLink
16 import networkx as nx
17 from emuvim.dcemulator.monitoring import DCNetworkMonitor
18 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
19 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
20
21 class DCNetwork(Dockernet):
22 """
23 Wraps the original Mininet/Dockernet class and provides
24 methods to add data centers, switches, etc.
25
26 This class is used by topology definition scripts.
27 """
28
29 def __init__(self, controller=RemoteController,
30 dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
31 dc_emulation_max_mem=512, # emulation max mem in MB
32 **kwargs):
33 """
34 Create an extended version of a Dockernet network
35 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
36 :param kwargs: path through for Mininet parameters
37 :return:
38 """
39 self.dcs = {}
40
41 # call original Docker.__init__ and setup default controller
42 Dockernet.__init__(
43 self, switch=OVSKernelSwitch, **kwargs)
44
45 # Ryu management
46 self.ryu_process = None
47 if controller == RemoteController:
48 # start Ryu controller
49 self.startRyu()
50
51 # add the specified controller
52 self.addController('c0', controller=controller)
53
54 # graph of the complete DC network
55 self.DCNetwork_graph = nx.MultiDiGraph()
56
57 # monitoring agent
58 self.monitor_agent = DCNetworkMonitor(self)
59
60 # initialize resource model registrar
61 self.rm_registrar = ResourceModelRegistrar(
62 dc_emulation_max_cpu, dc_emulation_max_mem)
63
64 def addDatacenter(self, label, metadata={}, resource_log_path=None):
65 """
66 Create and add a logical cloud data center to the network.
67 """
68 if label in self.dcs:
69 raise Exception("Data center label already exists: %s" % label)
70 dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
71 dc.net = self # set reference to network
72 self.dcs[label] = dc
73 dc.create() # finally create the data center in our Mininet instance
74 logging.info("added data center: %s" % label)
75 return dc
76
77 def addLink(self, node1, node2, **params):
78 """
79 Able to handle Datacenter objects as link
80 end points.
81 """
82 assert node1 is not None
83 assert node2 is not None
84 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
85 # ensure type of node1
86 if isinstance( node1, basestring ):
87 if node1 in self.dcs:
88 node1 = self.dcs[node1].switch
89 if isinstance( node1, Datacenter ):
90 node1 = node1.switch
91 # ensure type of node2
92 if isinstance( node2, basestring ):
93 if node2 in self.dcs:
94 node2 = self.dcs[node2].switch
95 if isinstance( node2, Datacenter ):
96 node2 = node2.switch
97 # try to give containers a default IP
98 if isinstance( node1, Docker ):
99 if "params1" not in params:
100 params["params1"] = {}
101 if "ip" not in params["params1"]:
102 params["params1"]["ip"] = self.getNextIp()
103 if isinstance( node2, Docker ):
104 if "params2" not in params:
105 params["params2"] = {}
106 if "ip" not in params["params2"]:
107 params["params2"]["ip"] = self.getNextIp()
108 # ensure that we allow TCLinks between data centers
109 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
110 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
111 if "cls" not in params:
112 params["cls"] = TCLink
113
114 link = Dockernet.addLink(self, node1, node2, **params)
115
116 # try to give container interfaces a default id
117 node1_port_id = node1.ports[link.intf1]
118 if isinstance(node1, Docker):
119 if "id" in params["params1"]:
120 node1_port_id = params["params1"]["id"]
121
122 node2_port_id = node2.ports[link.intf2]
123 if isinstance(node2, Docker):
124 if "id" in params["params2"]:
125 node2_port_id = params["params2"]["id"]
126
127 # add edge and assigned port number to graph in both directions between node1 and node2
128 # port_id: id given in descriptor (if available, otherwise same as port)
129 # port: portnumber assigned by Dockernet
130
131 self.DCNetwork_graph.add_edge(node1.name, node2.name,
132 attr_dict={'src_port_id': node1_port_id, 'src_port': node1.ports[link.intf1],
133 'dst_port_id': node2_port_id, 'dst_port': node2.ports[link.intf2]})
134 self.DCNetwork_graph.add_edge(node2.name, node1.name,
135 attr_dict={'src_port_id': node2_port_id, 'src_port': node2.ports[link.intf2],
136 'dst_port_id': node1_port_id, 'dst_port': node1.ports[link.intf1]})
137
138 return link
139
140 def addDocker( self, label, **params ):
141 """
142 Wrapper for addDocker method to use custom container class.
143 """
144 self.DCNetwork_graph.add_node(label)
145 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
146
147 def removeDocker( self, label, **params ):
148 """
149 Wrapper for removeDocker method to update graph.
150 """
151 self.DCNetwork_graph.remove_node(label)
152 return Dockernet.removeDocker(self, label, **params)
153
154 def addSwitch( self, name, add_to_graph=True, **params ):
155 """
156 Wrapper for addSwitch method to store switch also in graph.
157 """
158 if add_to_graph:
159 self.DCNetwork_graph.add_node(name)
160 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
161
162 def getAllContainers(self):
163 """
164 Returns a list with all containers within all data centers.
165 """
166 all_containers = []
167 for dc in self.dcs.itervalues():
168 all_containers += dc.listCompute()
169 return all_containers
170
171 def start(self):
172 # start
173 for dc in self.dcs.itervalues():
174 dc.start()
175 Dockernet.start(self)
176
177 def stop(self):
178 # stop Ryu controller
179 Dockernet.stop(self)
180 self.stopRyu()
181
182 def CLI(self):
183 CLI(self)
184
185 # to remove chain do setChain( src, dst, cmd='del-flows')
186 def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, cmd='add-flow'):
187
188 #check if port is specified (vnf:port)
189 if vnf_src_interface is None:
190 # take first interface by default
191 connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
192 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
193 vnf_src_interface = link_dict[0]['src_port_id']
194 #vnf_source_interface = 0
195
196 for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
197 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
198 for link in link_dict:
199 #logging.info("{0},{1}".format(link_dict[link],vnf_source_interface))
200 if link_dict[link]['src_port_id'] == vnf_src_interface:
201 # found the right link and connected switch
202 #logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
203 src_sw = connected_sw
204
205 src_sw_inport = link_dict[link]['dst_port']
206 break
207
208 if vnf_dst_interface is None:
209 # take first interface by default
210 connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]
211 link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
212 vnf_dst_interface = link_dict[0]['dst_port_id']
213 #vnf_dest_interface = 0
214
215 vnf_dst_name = vnf_dst_name.split(':')[0]
216 for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):
217 link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
218 for link in link_dict:
219 if link_dict[link]['dst_port_id'] == vnf_dst_interface:
220 # found the right link and connected switch
221 dst_sw = connected_sw
222 dst_sw_outport = link_dict[link]['src_port']
223 break
224
225
226 # get shortest path
227 #path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
228 try:
229 path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw)
230 except:
231 logging.info("No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name))
232 return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
233
234 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
235
236 #current_hop = vnf_src_name
237 current_hop = src_sw
238 switch_inport = src_sw_inport
239
240 for i in range(0,len(path)):
241 current_node = self.getNodeByName(current_hop)
242 if path.index(current_hop) < len(path)-1:
243 next_hop = path[path.index(current_hop)+1]
244 else:
245 #last switch reached
246 next_hop = vnf_dst_name
247
248 next_node = self.getNodeByName(next_hop)
249
250 if next_hop == vnf_dst_name:
251 switch_outport = dst_sw_outport
252 logging.info("end node reached: {0}".format(vnf_dst_name))
253 elif not isinstance( next_node, OVSSwitch ):
254 logging.info("Next node: {0} is not a switch".format(next_hop))
255 return "Next node: {0} is not a switch".format(next_hop)
256 else:
257 # take first link between switches by default
258 index_edge_out = 0
259 switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
260
261 # take into account that multiple edges are possible between 2 nodes
262 index_edge_in = 0
263
264
265 #switch_inport = self.DCNetwork_graph[current_hop][next_hop][index_edge_in]['dst_port']
266
267 #next2_hop = path[path.index(current_hop)+2]
268 #index_edge_out = 0
269 #switch_outport = self.DCNetwork_graph[next_hop][next2_hop][index_edge_out]['src_port']
270 #switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
271
272 #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport, switch_outport))
273 # set of entry via ovs-ofctl
274 # TODO use rest API of ryu to set flow entries to correct witch dpid
275 if isinstance( current_node, OVSSwitch ):
276 match = 'in_port=%s' % switch_inport
277
278 if cmd=='add-flow':
279 action = 'action=%s' % switch_outport
280 s = ','
281 ofcmd = s.join([match,action])
282 elif cmd=='del-flows':
283 ofcmd = match
284 else:
285 ofcmd=''
286
287 current_node.dpctl(cmd, ofcmd)
288 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport,
289 switch_outport))
290 # take first link between switches by default
291 if isinstance( next_node, OVSSwitch ):
292 switch_inport = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port']
293 current_hop = next_hop
294
295 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
296 #return "destination node: {0} not reached".format(vnf_dst_name)
297
298 # start Ryu Openflow controller as Remote Controller for the DCNetwork
299 def startRyu(self):
300 # start Ryu controller with rest-API
301 python_install_path = site.getsitepackages()[0]
302 ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
303 ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
304 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
305 # Ryu still uses 6633 as default
306 ryu_option = '--ofp-tcp-listen-port'
307 ryu_of_port = '6653'
308 ryu_cmd = 'ryu-manager'
309 FNULL = open("/tmp/ryu.log", 'w')
310 self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
311 # no learning switch
312 #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
313 time.sleep(1)
314
315 def stopRyu(self):
316 if self.ryu_process is not None:
317 self.ryu_process.terminate()
318 self.ryu_process.kill()
319