start cadvisor and prometheus at startup
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 import site
8 import time
9 from subprocess import Popen
10 import os
11
12 from mininet.net import Dockernet
13 from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
14 from mininet.cli import CLI
15 from mininet.link import TCLink
16 import networkx as nx
17 from emuvim.dcemulator.monitoring import DCNetworkMonitor
18 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
19 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
20
21 class DCNetwork(Dockernet):
22 """
23 Wraps the original Mininet/Dockernet class and provides
24 methods to add data centers, switches, etc.
25
26 This class is used by topology definition scripts.
27 """
28
29 def __init__(self, controller=RemoteController,
30 dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
31 dc_emulation_max_mem=512, # emulation max mem in MB
32 **kwargs):
33 """
34 Create an extended version of a Dockernet network
35 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
36 :param kwargs: path through for Mininet parameters
37 :return:
38 """
39 self.dcs = {}
40
41 # call original Docker.__init__ and setup default controller
42 Dockernet.__init__(
43 self, switch=OVSKernelSwitch, **kwargs)
44
45 # Ryu management
46 self.ryu_process = None
47 if controller == RemoteController:
48 # start Ryu controller
49 self.startRyu()
50
51 # add the specified controller
52 self.addController('c0', controller=controller)
53
54 # graph of the complete DC network
55 self.DCNetwork_graph = nx.MultiDiGraph()
56
57 # monitoring agent
58 self.monitor_agent = DCNetworkMonitor(self)
59
60 # initialize resource model registrar
61 self.rm_registrar = ResourceModelRegistrar(
62 dc_emulation_max_cpu, dc_emulation_max_mem)
63
64 def addDatacenter(self, label, metadata={}, resource_log_path=None):
65 """
66 Create and add a logical cloud data center to the network.
67 """
68 if label in self.dcs:
69 raise Exception("Data center label already exists: %s" % label)
70 dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
71 dc.net = self # set reference to network
72 self.dcs[label] = dc
73 dc.create() # finally create the data center in our Mininet instance
74 logging.info("added data center: %s" % label)
75 return dc
76
77 def addLink(self, node1, node2, **params):
78 """
79 Able to handle Datacenter objects as link
80 end points.
81 """
82 assert node1 is not None
83 assert node2 is not None
84 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
85 # ensure type of node1
86 if isinstance( node1, basestring ):
87 if node1 in self.dcs:
88 node1 = self.dcs[node1].switch
89 if isinstance( node1, Datacenter ):
90 node1 = node1.switch
91 # ensure type of node2
92 if isinstance( node2, basestring ):
93 if node2 in self.dcs:
94 node2 = self.dcs[node2].switch
95 if isinstance( node2, Datacenter ):
96 node2 = node2.switch
97 # try to give containers a default IP
98 if isinstance( node1, Docker ):
99 if "params1" not in params:
100 params["params1"] = {}
101 if "ip" not in params["params1"]:
102 params["params1"]["ip"] = self.getNextIp()
103 if isinstance( node2, Docker ):
104 if "params2" not in params:
105 params["params2"] = {}
106 if "ip" not in params["params2"]:
107 params["params2"]["ip"] = self.getNextIp()
108 # ensure that we allow TCLinks between data centers
109 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
110 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
111 if "cls" not in params:
112 params["cls"] = TCLink
113
114 link = Dockernet.addLink(self, node1, node2, **params)
115
116 # try to give container interfaces a default id
117 node1_port_id = node1.ports[link.intf1]
118 if isinstance(node1, Docker):
119 if "id" in params["params1"]:
120 node1_port_id = params["params1"]["id"]
121
122 node2_port_id = node2.ports[link.intf2]
123 if isinstance(node2, Docker):
124 if "id" in params["params2"]:
125 node2_port_id = params["params2"]["id"]
126
127 # add edge and assigned port number to graph in both directions between node1 and node2
128 # port_id: id given in descriptor (if available, otherwise same as port)
129 # port: portnumber assigned by Dockernet
130
131 self.DCNetwork_graph.add_edge(node1.name, node2.name,
132 attr_dict={'src_port_id': node1_port_id, 'src_port': node1.ports[link.intf1],
133 'dst_port_id': node2_port_id, 'dst_port': node2.ports[link.intf2]})
134 self.DCNetwork_graph.add_edge(node2.name, node1.name,
135 attr_dict={'src_port_id': node2_port_id, 'src_port': node2.ports[link.intf2],
136 'dst_port_id': node1_port_id, 'dst_port': node1.ports[link.intf1]})
137
138 return link
139
140 def addDocker( self, label, **params ):
141 """
142 Wrapper for addDocker method to use custom container class.
143 """
144 self.DCNetwork_graph.add_node(label)
145 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
146
147 def removeDocker( self, label, **params ):
148 """
149 Wrapper for removeDocker method to update graph.
150 """
151 self.DCNetwork_graph.remove_node(label)
152 return Dockernet.removeDocker(self, label, **params)
153
154 def addSwitch( self, name, add_to_graph=True, **params ):
155 """
156 Wrapper for addSwitch method to store switch also in graph.
157 """
158 if add_to_graph:
159 self.DCNetwork_graph.add_node(name)
160 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
161
162 def getAllContainers(self):
163 """
164 Returns a list with all containers within all data centers.
165 """
166 all_containers = []
167 for dc in self.dcs.itervalues():
168 all_containers += dc.listCompute()
169 return all_containers
170
171 def start(self):
172 # start
173 for dc in self.dcs.itervalues():
174 dc.start()
175 Dockernet.start(self)
176
177 def stop(self):
178 # stop the monitor agent
179 self.monitor_agent.stop()
180
181 # stop emulator net
182 Dockernet.stop(self)
183
184 # stop Ryu controller
185 self.stopRyu()
186
187
188 def CLI(self):
189 CLI(self)
190
191 # to remove chain do setChain( src, dst, cmd='del-flows')
192 def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, cmd='add-flow'):
193
194 #check if port is specified (vnf:port)
195 if vnf_src_interface is None:
196 # take first interface by default
197 connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
198 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
199 vnf_src_interface = link_dict[0]['src_port_id']
200 #vnf_source_interface = 0
201
202 for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
203 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
204 for link in link_dict:
205 #logging.info("{0},{1}".format(link_dict[link],vnf_source_interface))
206 if link_dict[link]['src_port_id'] == vnf_src_interface:
207 # found the right link and connected switch
208 #logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
209 src_sw = connected_sw
210
211 src_sw_inport = link_dict[link]['dst_port']
212 break
213
214 if vnf_dst_interface is None:
215 # take first interface by default
216 connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]
217 link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
218 vnf_dst_interface = link_dict[0]['dst_port_id']
219 #vnf_dest_interface = 0
220
221 vnf_dst_name = vnf_dst_name.split(':')[0]
222 for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):
223 link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
224 for link in link_dict:
225 if link_dict[link]['dst_port_id'] == vnf_dst_interface:
226 # found the right link and connected switch
227 dst_sw = connected_sw
228 dst_sw_outport = link_dict[link]['src_port']
229 break
230
231
232 # get shortest path
233 #path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
234 try:
235 path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw)
236 except:
237 logging.info("No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name))
238 return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
239
240 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
241
242 #current_hop = vnf_src_name
243 current_hop = src_sw
244 switch_inport = src_sw_inport
245
246 for i in range(0,len(path)):
247 current_node = self.getNodeByName(current_hop)
248 if path.index(current_hop) < len(path)-1:
249 next_hop = path[path.index(current_hop)+1]
250 else:
251 #last switch reached
252 next_hop = vnf_dst_name
253
254 next_node = self.getNodeByName(next_hop)
255
256 if next_hop == vnf_dst_name:
257 switch_outport = dst_sw_outport
258 logging.info("end node reached: {0}".format(vnf_dst_name))
259 elif not isinstance( next_node, OVSSwitch ):
260 logging.info("Next node: {0} is not a switch".format(next_hop))
261 return "Next node: {0} is not a switch".format(next_hop)
262 else:
263 # take first link between switches by default
264 index_edge_out = 0
265 switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
266
267 # take into account that multiple edges are possible between 2 nodes
268 index_edge_in = 0
269
270
271 #switch_inport = self.DCNetwork_graph[current_hop][next_hop][index_edge_in]['dst_port']
272
273 #next2_hop = path[path.index(current_hop)+2]
274 #index_edge_out = 0
275 #switch_outport = self.DCNetwork_graph[next_hop][next2_hop][index_edge_out]['src_port']
276 #switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
277
278 #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport, switch_outport))
279 # set of entry via ovs-ofctl
280 # TODO use rest API of ryu to set flow entries to correct witch dpid
281 if isinstance( current_node, OVSSwitch ):
282 match = 'in_port=%s' % switch_inport
283
284 if cmd=='add-flow':
285 action = 'action=%s' % switch_outport
286 s = ','
287 ofcmd = s.join([match,action])
288 elif cmd=='del-flows':
289 ofcmd = match
290 else:
291 ofcmd=''
292
293 current_node.dpctl(cmd, ofcmd)
294 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport,
295 switch_outport))
296 # take first link between switches by default
297 if isinstance( next_node, OVSSwitch ):
298 switch_inport = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port']
299 current_hop = next_hop
300
301 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
302 #return "destination node: {0} not reached".format(vnf_dst_name)
303
304 # start Ryu Openflow controller as Remote Controller for the DCNetwork
305 def startRyu(self):
306 # start Ryu controller with rest-API
307 python_install_path = site.getsitepackages()[0]
308 ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
309 ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
310 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
311 # Ryu still uses 6633 as default
312 ryu_option = '--ofp-tcp-listen-port'
313 ryu_of_port = '6653'
314 ryu_cmd = 'ryu-manager'
315 FNULL = open("/tmp/ryu.log", 'w')
316 self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
317 # no learning switch
318 #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
319 time.sleep(1)
320
321 def stopRyu(self):
322 if self.ryu_process is not None:
323 self.ryu_process.terminate()
324 self.ryu_process.kill()
325