start prometheus and cadvisor with Dockernet
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 import site
8 import time
9 from subprocess import Popen
10 import os
11
12 from mininet.net import Dockernet
13 from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
14 from mininet.cli import CLI
15 from mininet.link import TCLink
16 import networkx as nx
17 from emuvim.dcemulator.monitoring import DCNetworkMonitor
18 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
19 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
20
21 class DCNetwork(Dockernet):
22 """
23 Wraps the original Mininet/Dockernet class and provides
24 methods to add data centers, switches, etc.
25
26 This class is used by topology definition scripts.
27 """
28
29 def __init__(self, controller=RemoteController,
30 dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
31 dc_emulation_max_mem=512, # emulation max mem in MB
32 **kwargs):
33 """
34 Create an extended version of a Dockernet network
35 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
36 :param kwargs: path through for Mininet parameters
37 :return:
38 """
39 self.dcs = {}
40
41 # call original Docker.__init__ and setup default controller
42 Dockernet.__init__(
43 self, switch=OVSKernelSwitch, **kwargs)
44
45 # Ryu management
46 self.ryu_process = None
47 if controller == RemoteController:
48 # start Ryu controller
49 self.startRyu()
50
51 # add the specified controller
52 self.addController('c0', controller=controller)
53
54 # graph of the complete DC network
55 self.DCNetwork_graph = nx.MultiDiGraph()
56
57 # monitoring agent
58 self.monitor_agent = DCNetworkMonitor(self)
59
60 # initialize resource model registrar
61 self.rm_registrar = ResourceModelRegistrar(
62 dc_emulation_max_cpu, dc_emulation_max_mem)
63
64 def addDatacenter(self, label, metadata={}, resource_log_path=None):
65 """
66 Create and add a logical cloud data center to the network.
67 """
68 if label in self.dcs:
69 raise Exception("Data center label already exists: %s" % label)
70 dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
71 dc.net = self # set reference to network
72 self.dcs[label] = dc
73 dc.create() # finally create the data center in our Mininet instance
74 logging.info("added data center: %s" % label)
75 return dc
76
77 def addLink(self, node1, node2, **params):
78 """
79 Able to handle Datacenter objects as link
80 end points.
81 """
82 assert node1 is not None
83 assert node2 is not None
84 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
85 # ensure type of node1
86 if isinstance( node1, basestring ):
87 if node1 in self.dcs:
88 node1 = self.dcs[node1].switch
89 if isinstance( node1, Datacenter ):
90 node1 = node1.switch
91 # ensure type of node2
92 if isinstance( node2, basestring ):
93 if node2 in self.dcs:
94 node2 = self.dcs[node2].switch
95 if isinstance( node2, Datacenter ):
96 node2 = node2.switch
97 # try to give containers a default IP
98 if isinstance( node1, Docker ):
99 if "params1" not in params:
100 params["params1"] = {}
101 if "ip" not in params["params1"]:
102 params["params1"]["ip"] = self.getNextIp()
103 if isinstance( node2, Docker ):
104 if "params2" not in params:
105 params["params2"] = {}
106 if "ip" not in params["params2"]:
107 params["params2"]["ip"] = self.getNextIp()
108 # ensure that we allow TCLinks between data centers
109 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
110 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
111 if "cls" not in params:
112 params["cls"] = TCLink
113
114 link = Dockernet.addLink(self, node1, node2, **params)
115
116 # try to give container interfaces a default id
117 node1_port_id = node1.ports[link.intf1]
118 if isinstance(node1, Docker):
119 if "id" in params["params1"]:
120 node1_port_id = params["params1"]["id"]
121
122 node2_port_id = node2.ports[link.intf2]
123 if isinstance(node2, Docker):
124 if "id" in params["params2"]:
125 node2_port_id = params["params2"]["id"]
126
127 # add edge and assigned port number to graph in both directions between node1 and node2
128 # port_id: id given in descriptor (if available, otherwise same as port)
129 # port: portnumber assigned by Dockernet
130
131 self.DCNetwork_graph.add_edge(node1.name, node2.name,
132 attr_dict={'src_port_id': node1_port_id, 'src_port': node1.ports[link.intf1],
133 'dst_port_id': node2_port_id, 'dst_port': node2.ports[link.intf2]})
134 self.DCNetwork_graph.add_edge(node2.name, node1.name,
135 attr_dict={'src_port_id': node2_port_id, 'src_port': node2.ports[link.intf2],
136 'dst_port_id': node1_port_id, 'dst_port': node1.ports[link.intf1]})
137
138 return link
139
140 def addDocker( self, label, **params ):
141 """
142 Wrapper for addDocker method to use custom container class.
143 """
144 self.DCNetwork_graph.add_node(label)
145 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
146
147 def removeDocker( self, label, **params ):
148 """
149 Wrapper for removeDocker method to update graph.
150 """
151 self.DCNetwork_graph.remove_node(label)
152 return Dockernet.removeDocker(self, label, **params)
153
154 def addSwitch( self, name, add_to_graph=True, **params ):
155 """
156 Wrapper for addSwitch method to store switch also in graph.
157 """
158 if add_to_graph:
159 self.DCNetwork_graph.add_node(name)
160 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
161
162 def getAllContainers(self):
163 """
164 Returns a list with all containers within all data centers.
165 """
166 all_containers = []
167 for dc in self.dcs.itervalues():
168 all_containers += dc.listCompute()
169 return all_containers
170
171 def start(self):
172 # start
173 for dc in self.dcs.itervalues():
174 dc.start()
175 Dockernet.start(self)
176
177 def stop(self):
178 # stop Ryu controller
179 Dockernet.stop(self)
180 self.stopRyu()
181 # stop the monitor agent
182 self.monitor_agent.stop()
183
184 def CLI(self):
185 CLI(self)
186
187 # to remove chain do setChain( src, dst, cmd='del-flows')
188 def setChain(self, vnf_src_name, vnf_dst_name, vnf_src_interface=None, vnf_dst_interface=None, cmd='add-flow'):
189
190 #check if port is specified (vnf:port)
191 if vnf_src_interface is None:
192 # take first interface by default
193 connected_sw = self.DCNetwork_graph.neighbors(vnf_src_name)[0]
194 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
195 vnf_src_interface = link_dict[0]['src_port_id']
196 #vnf_source_interface = 0
197
198 for connected_sw in self.DCNetwork_graph.neighbors(vnf_src_name):
199 link_dict = self.DCNetwork_graph[vnf_src_name][connected_sw]
200 for link in link_dict:
201 #logging.info("{0},{1}".format(link_dict[link],vnf_source_interface))
202 if link_dict[link]['src_port_id'] == vnf_src_interface:
203 # found the right link and connected switch
204 #logging.info("{0},{1}".format(link_dict[link]['src_port_id'], vnf_source_interface))
205 src_sw = connected_sw
206
207 src_sw_inport = link_dict[link]['dst_port']
208 break
209
210 if vnf_dst_interface is None:
211 # take first interface by default
212 connected_sw = self.DCNetwork_graph.neighbors(vnf_dst_name)[0]
213 link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
214 vnf_dst_interface = link_dict[0]['dst_port_id']
215 #vnf_dest_interface = 0
216
217 vnf_dst_name = vnf_dst_name.split(':')[0]
218 for connected_sw in self.DCNetwork_graph.neighbors(vnf_dst_name):
219 link_dict = self.DCNetwork_graph[connected_sw][vnf_dst_name]
220 for link in link_dict:
221 if link_dict[link]['dst_port_id'] == vnf_dst_interface:
222 # found the right link and connected switch
223 dst_sw = connected_sw
224 dst_sw_outport = link_dict[link]['src_port']
225 break
226
227
228 # get shortest path
229 #path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
230 try:
231 path = nx.shortest_path(self.DCNetwork_graph, src_sw, dst_sw)
232 except:
233 logging.info("No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name))
234 return "No path could be found between {0} and {1}".format(vnf_src_name, vnf_dst_name)
235
236 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
237
238 #current_hop = vnf_src_name
239 current_hop = src_sw
240 switch_inport = src_sw_inport
241
242 for i in range(0,len(path)):
243 current_node = self.getNodeByName(current_hop)
244 if path.index(current_hop) < len(path)-1:
245 next_hop = path[path.index(current_hop)+1]
246 else:
247 #last switch reached
248 next_hop = vnf_dst_name
249
250 next_node = self.getNodeByName(next_hop)
251
252 if next_hop == vnf_dst_name:
253 switch_outport = dst_sw_outport
254 logging.info("end node reached: {0}".format(vnf_dst_name))
255 elif not isinstance( next_node, OVSSwitch ):
256 logging.info("Next node: {0} is not a switch".format(next_hop))
257 return "Next node: {0} is not a switch".format(next_hop)
258 else:
259 # take first link between switches by default
260 index_edge_out = 0
261 switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
262
263 # take into account that multiple edges are possible between 2 nodes
264 index_edge_in = 0
265
266
267 #switch_inport = self.DCNetwork_graph[current_hop][next_hop][index_edge_in]['dst_port']
268
269 #next2_hop = path[path.index(current_hop)+2]
270 #index_edge_out = 0
271 #switch_outport = self.DCNetwork_graph[next_hop][next2_hop][index_edge_out]['src_port']
272 #switch_outport = self.DCNetwork_graph[current_hop][next_hop][index_edge_out]['src_port']
273
274 #logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport, switch_outport))
275 # set of entry via ovs-ofctl
276 # TODO use rest API of ryu to set flow entries to correct witch dpid
277 if isinstance( current_node, OVSSwitch ):
278 match = 'in_port=%s' % switch_inport
279
280 if cmd=='add-flow':
281 action = 'action=%s' % switch_outport
282 s = ','
283 ofcmd = s.join([match,action])
284 elif cmd=='del-flows':
285 ofcmd = match
286 else:
287 ofcmd=''
288
289 current_node.dpctl(cmd, ofcmd)
290 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(current_node.name, switch_inport,
291 switch_outport))
292 # take first link between switches by default
293 if isinstance( next_node, OVSSwitch ):
294 switch_inport = self.DCNetwork_graph[current_hop][next_hop][0]['dst_port']
295 current_hop = next_hop
296
297 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
298 #return "destination node: {0} not reached".format(vnf_dst_name)
299
300 # start Ryu Openflow controller as Remote Controller for the DCNetwork
301 def startRyu(self):
302 # start Ryu controller with rest-API
303 python_install_path = site.getsitepackages()[0]
304 ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
305 ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
306 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
307 # Ryu still uses 6633 as default
308 ryu_option = '--ofp-tcp-listen-port'
309 ryu_of_port = '6653'
310 ryu_cmd = 'ryu-manager'
311 FNULL = open("/tmp/ryu.log", 'w')
312 self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
313 # no learning switch
314 #self.ryu_process = Popen([ryu_cmd, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
315 time.sleep(1)
316
317 def stopRyu(self):
318 if self.ryu_process is not None:
319 self.ryu_process.terminate()
320 self.ryu_process.kill()
321