allow multiple edges/interfaces per vnf in the DCNetwork graph
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 import site
8 import time
9 from subprocess import Popen
10 import os
11
12 from mininet.net import Dockernet
13 from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
14 from mininet.cli import CLI
15 from mininet.link import TCLink
16 import networkx as nx
17 from emuvim.dcemulator.monitoring import DCNetworkMonitor
18 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
19 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
20
21
22 class DCNetwork(Dockernet):
23 """
24 Wraps the original Mininet/Dockernet class and provides
25 methods to add data centers, switches, etc.
26
27 This class is used by topology definition scripts.
28 """
29
30 def __init__(self, controller=RemoteController,
31 dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
32 dc_emulation_max_mem=512, # emulation max mem in MB
33 **kwargs):
34 """
35 Create an extended version of a Dockernet network
36 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
37 :param kwargs: path through for Mininet parameters
38 :return:
39 """
40 self.dcs = {}
41
42 # call original Docker.__init__ and setup default controller
43 Dockernet.__init__(
44 self, switch=OVSKernelSwitch, **kwargs)
45
46 # Ryu management
47 self.ryu_process = None
48 if controller == RemoteController:
49 # start Ryu controller
50 self.startRyu()
51
52 # add the specified controller
53 self.addController('c0', controller=controller)
54
55 # graph of the complete DC network
56 self.DCNetwork_graph = nx.MultiDiGraph()
57
58 # monitoring agent
59 self.monitor_agent = DCNetworkMonitor(self)
60
61 # initialize resource model registrar
62 self.rm_registrar = ResourceModelRegistrar(
63 dc_emulation_max_cpu, dc_emulation_max_mem)
64
65 def addDatacenter(self, label, metadata={}, resource_log_path=None):
66 """
67 Create and add a logical cloud data center to the network.
68 """
69 if label in self.dcs:
70 raise Exception("Data center label already exists: %s" % label)
71 dc = Datacenter(label, metadata=metadata, resource_log_path=resource_log_path)
72 dc.net = self # set reference to network
73 self.dcs[label] = dc
74 dc.create() # finally create the data center in our Mininet instance
75 logging.info("added data center: %s" % label)
76 return dc
77
78 def addLink(self, node1, node2, **params):
79 """
80 Able to handle Datacenter objects as link
81 end points.
82 """
83 assert node1 is not None
84 assert node2 is not None
85 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
86 # ensure type of node1
87 if isinstance( node1, basestring ):
88 if node1 in self.dcs:
89 node1 = self.dcs[node1].switch
90 if isinstance( node1, Datacenter ):
91 node1 = node1.switch
92 # ensure type of node2
93 if isinstance( node2, basestring ):
94 if node2 in self.dcs:
95 node2 = self.dcs[node2].switch
96 if isinstance( node2, Datacenter ):
97 node2 = node2.switch
98 # try to give containers a default IP
99 if isinstance( node1, Docker ):
100 if "params1" not in params:
101 params["params1"] = {}
102 if "ip" not in params["params1"]:
103 params["params1"]["ip"] = self.getNextIp()
104 if isinstance( node2, Docker ):
105 if "params2" not in params:
106 params["params2"] = {}
107 if "ip" not in params["params2"]:
108 params["params2"]["ip"] = self.getNextIp()
109 # ensure that we allow TCLinks between data centers
110 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
111 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
112 if "cls" not in params:
113 params["cls"] = TCLink
114
115 link = Dockernet.addLink(self, node1, node2, **params)
116
117 # try to give container interfaces a default id
118 node1_port_id = node1.ports[link.intf1]
119 if isinstance(node1, Docker):
120 if "id" in params["params1"]:
121 node1_port_id = params["params1"]["id"]
122
123 node2_port_id = node2.ports[link.intf2]
124 if isinstance(node2, Docker):
125 if "id" in params["params2"]:
126 node2_port_id = params["params2"]["id"]
127
128 # add edge and assigned port number to graph in both directions between node1 and node2
129 # port_id: id given in descriptor (if available, otherwise same as port)
130 # port: portnumber assigned by Dockernet
131
132 self.DCNetwork_graph.add_edge(node1.name, node2.name,
133 attr_dict={'src_port_id': node1_port_id, 'src_port': node1.ports[link.intf1],
134 'dst_port_id': node2_port_id, 'dst_port': node2.ports[link.intf2]})
135 self.DCNetwork_graph.add_edge(node2.name, node1.name,
136 attr_dict={'src_port_id': node2_port_id, 'src_port': node2.ports[link.intf2],
137 'dst_port_id': node1_port_id, 'dst_port': node1.ports[link.intf1]})
138
139 return link
140
141 def addDocker( self, label, **params ):
142 """
143 Wrapper for addDocker method to use custom container class.
144 """
145 self.DCNetwork_graph.add_node(label)
146 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
147
148 def removeDocker( self, label, **params ):
149 """
150 Wrapper for removeDocker method to update graph.
151 """
152 self.DCNetwork_graph.remove_node(label)
153 return Dockernet.removeDocker(self, label, **params)
154
155 def addSwitch( self, name, add_to_graph=True, **params ):
156 """
157 Wrapper for addSwitch method to store switch also in graph.
158 """
159 if add_to_graph:
160 self.DCNetwork_graph.add_node(name)
161 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
162
163 def getAllContainers(self):
164 """
165 Returns a list with all containers within all data centers.
166 """
167 all_containers = []
168 for dc in self.dcs.itervalues():
169 all_containers += dc.listCompute()
170 return all_containers
171
172 def start(self):
173 # start
174 for dc in self.dcs.itervalues():
175 dc.start()
176 Dockernet.start(self)
177
178 def stop(self):
179 # stop Ryu controller
180 Dockernet.stop(self)
181 self.stopRyu()
182
183 def CLI(self):
184 CLI(self)
185
186 # to remove chain do setChain( src, dst, cmd='del-flows')
187 def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'):
188 # get shortest path
189 path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
190 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
191
192 current_hop = vnf_src_name
193 for i in range(0,len(path)):
194 next_hop = path[path.index(current_hop)+1]
195 next_node = self.getNodeByName(next_hop)
196
197 if next_hop == vnf_dst_name:
198 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
199 elif not isinstance( next_node, OVSSwitch ):
200 logging.info("Next node: {0} is not a switch".format(next_hop))
201 return "Next node: {0} is not a switch".format(next_hop)
202
203
204 switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port']
205 next2_hop = path[path.index(current_hop)+2]
206 switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port']
207
208 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport))
209 # set of entry via ovs-ofctl
210 # TODO use rest API of ryu to set flow entries to correct witch dpid
211 if isinstance( next_node, OVSSwitch ):
212 match = 'in_port=%s' % switch_inport
213
214 if cmd=='add-flow':
215 action = 'action=%s' % switch_outport
216 s = ','
217 ofcmd = s.join([match,action])
218 elif cmd=='del-flows':
219 ofcmd = match
220 else:
221 ofcmd=''
222
223 next_node.dpctl(cmd, ofcmd)
224
225 current_hop = next_hop
226
227 return "destination node: {0} not reached".format(vnf_dst_name)
228
229 # start Ryu Openflow controller as Remote Controller for the DCNetwork
230 def startRyu(self):
231 # start Ryu controller with rest-API
232 python_install_path = site.getsitepackages()[0]
233 ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
234 ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
235 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
236 # Ryu still uses 6633 as default
237 ryu_option = '--ofp-tcp-listen-port'
238 ryu_of_port = '6653'
239 ryu_cmd = 'ryu-manager'
240 FNULL = open("/tmp/ryu.log", 'w')
241 self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
242 time.sleep(1)
243
244 def stopRyu(self):
245 if self.ryu_process is not None:
246 self.ryu_process.terminate()
247 self.ryu_process.kill()
248