Added emulation max memory property to network and RM registrar
[osm/vim-emu.git] / src / emuvim / dcemulator / net.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 import logging
6
7 import site
8 import time
9 from subprocess import Popen
10 import os
11
12 from mininet.net import Dockernet
13 from mininet.node import Controller, DefaultController, OVSSwitch, OVSKernelSwitch, Docker, RemoteController
14 from mininet.cli import CLI
15 from mininet.link import TCLink
16 import networkx as nx
17 from emuvim.dcemulator.monitoring import DCNetworkMonitor
18 from emuvim.dcemulator.node import Datacenter, EmulatorCompute
19 from emuvim.dcemulator.resourcemodel import ResourceModelRegistrar
20
21
22 class DCNetwork(Dockernet):
23 """
24 Wraps the original Mininet/Dockernet class and provides
25 methods to add data centers, switches, etc.
26
27 This class is used by topology definition scripts.
28 """
29
30 def __init__(self, controller=RemoteController,
31 dc_emulation_max_cpu=1.0, # fraction of overall CPU time for emulation
32 dc_emulation_max_mem=512, # emulation max mem in MB
33 **kwargs):
34 """
35 Create an extended version of a Dockernet network
36 :param dc_emulation_max_cpu: max. CPU time used by containers in data centers
37 :param kwargs: path through for Mininet parameters
38 :return:
39 """
40 self.dcs = {}
41
42 # call original Docker.__init__ and setup default controller
43 Dockernet.__init__(
44 self, switch=OVSKernelSwitch, **kwargs)
45
46 # Ryu management
47 self.ryu_process = None
48 if controller == RemoteController:
49 # start Ryu controller
50 self.startRyu()
51
52 # add the specified controller
53 self.addController('c0', controller=controller)
54
55 # graph of the complete DC network
56 self.DCNetwork_graph = nx.DiGraph()
57
58 # monitoring agent
59 self.monitor_agent = DCNetworkMonitor(self)
60
61 # initialize resource model registrar
62 self.rm_registrar = ResourceModelRegistrar(
63 dc_emulation_max_cpu, dc_emulation_max_mem)
64
65 def addDatacenter(self, label, metadata={}):
66 """
67 Create and add a logical cloud data center to the network.
68 """
69 if label in self.dcs:
70 raise Exception("Data center label already exists: %s" % label)
71 dc = Datacenter(label, metadata=metadata)
72 dc.net = self # set reference to network
73 self.dcs[label] = dc
74 dc.create() # finally create the data center in our Mininet instance
75 logging.info("added data center: %s" % label)
76 return dc
77
78 def addLink(self, node1, node2, **params):
79 """
80 Able to handle Datacenter objects as link
81 end points.
82 """
83 assert node1 is not None
84 assert node2 is not None
85 logging.debug("addLink: n1=%s n2=%s" % (str(node1), str(node2)))
86 # ensure type of node1
87 if isinstance( node1, basestring ):
88 if node1 in self.dcs:
89 node1 = self.dcs[node1].switch
90 if isinstance( node1, Datacenter ):
91 node1 = node1.switch
92 # ensure type of node2
93 if isinstance( node2, basestring ):
94 if node2 in self.dcs:
95 node2 = self.dcs[node2].switch
96 if isinstance( node2, Datacenter ):
97 node2 = node2.switch
98 # try to give containers a default IP
99 if isinstance( node1, Docker ):
100 if "params1" not in params:
101 params["params1"] = {}
102 if "ip" not in params["params1"]:
103 params["params1"]["ip"] = self.getNextIp()
104 if isinstance( node2, Docker ):
105 if "params2" not in params:
106 params["params2"] = {}
107 if "ip" not in params["params2"]:
108 params["params2"]["ip"] = self.getNextIp()
109 # ensure that we allow TCLinks between data centers
110 # TODO this is not optimal, we use cls=Link for containers and TCLink for data centers
111 # see Dockernet issue: https://github.com/mpeuster/dockernet/issues/3
112 if "cls" not in params:
113 params["cls"] = TCLink
114
115 link = Dockernet.addLink(self, node1, node2, **params)
116
117 # add edge and assigned port number to graph in both directions between node1 and node2
118 self.DCNetwork_graph.add_edge(node1.name, node2.name, \
119 {'src_port': node1.ports[link.intf1], 'dst_port': node2.ports[link.intf2]})
120 self.DCNetwork_graph.add_edge(node2.name, node1.name, \
121 {'src_port': node2.ports[link.intf2], 'dst_port': node1.ports[link.intf1]})
122
123 return link
124
125 def addDocker( self, label, **params ):
126 """
127 Wrapper for addDocker method to use custom container class.
128 """
129 self.DCNetwork_graph.add_node(label)
130 return Dockernet.addDocker(self, label, cls=EmulatorCompute, **params)
131
132 def removeDocker( self, label, **params ):
133 """
134 Wrapper for removeDocker method to update graph.
135 """
136 self.DCNetwork_graph.remove_node(label)
137 return Dockernet.removeDocker(self, label, **params)
138
139 def addSwitch( self, name, add_to_graph=True, **params ):
140 """
141 Wrapper for addSwitch method to store switch also in graph.
142 """
143 if add_to_graph:
144 self.DCNetwork_graph.add_node(name)
145 return Dockernet.addSwitch(self, name, protocols='OpenFlow10,OpenFlow12,OpenFlow13', **params)
146
147 def getAllContainers(self):
148 """
149 Returns a list with all containers within all data centers.
150 """
151 all_containers = []
152 for dc in self.dcs.itervalues():
153 all_containers += dc.listCompute()
154 return all_containers
155
156 def start(self):
157 # start
158 for dc in self.dcs.itervalues():
159 dc.start()
160 Dockernet.start(self)
161
162 def stop(self):
163 # stop Ryu controller
164 Dockernet.stop(self)
165 self.stopRyu()
166
167 def CLI(self):
168 CLI(self)
169
170 # to remove chain do setChain( src, dst, cmd='del-flows')
171 def setChain(self, vnf_src_name, vnf_dst_name, cmd='add-flow'):
172 # get shortest path
173 path = nx.shortest_path(self.DCNetwork_graph, vnf_src_name, vnf_dst_name)
174 logging.info("Path between {0} and {1}: {2}".format(vnf_src_name, vnf_dst_name, path))
175
176 current_hop = vnf_src_name
177 for i in range(0,len(path)):
178 next_hop = path[path.index(current_hop)+1]
179 next_node = self.getNodeByName(next_hop)
180
181 if next_hop == vnf_dst_name:
182 return "path added between {0} and {1}".format(vnf_src_name, vnf_dst_name)
183 elif not isinstance( next_node, OVSSwitch ):
184 logging.info("Next node: {0} is not a switch".format(next_hop))
185 return "Next node: {0} is not a switch".format(next_hop)
186
187
188 switch_inport = self.DCNetwork_graph[current_hop][next_hop]['dst_port']
189 next2_hop = path[path.index(current_hop)+2]
190 switch_outport = self.DCNetwork_graph[next_hop][next2_hop]['src_port']
191
192 logging.info("add flow in switch: {0} in_port: {1} out_port: {2}".format(next_node.name, switch_inport, switch_outport))
193 # set of entry via ovs-ofctl
194 # TODO use rest API of ryu to set flow entries to correct witch dpid
195 if isinstance( next_node, OVSSwitch ):
196 match = 'in_port=%s' % switch_inport
197
198 if cmd=='add-flow':
199 action = 'action=%s' % switch_outport
200 s = ','
201 ofcmd = s.join([match,action])
202 elif cmd=='del-flows':
203 ofcmd = match
204 else:
205 ofcmd=''
206
207 next_node.dpctl(cmd, ofcmd)
208
209 current_hop = next_hop
210
211 return "destination node: {0} not reached".format(vnf_dst_name)
212
213 # start Ryu Openflow controller as Remote Controller for the DCNetwork
214 def startRyu(self):
215 # start Ryu controller with rest-API
216 python_install_path = site.getsitepackages()[0]
217 ryu_path = python_install_path + '/ryu/app/simple_switch_13.py'
218 ryu_path2 = python_install_path + '/ryu/app/ofctl_rest.py'
219 # change the default Openflow controller port to 6653 (official IANA-assigned port number), as used by Mininet
220 # Ryu still uses 6633 as default
221 ryu_option = '--ofp-tcp-listen-port'
222 ryu_of_port = '6653'
223 ryu_cmd = 'ryu-manager'
224 FNULL = open("/tmp/ryu.log", 'w')
225 self.ryu_process = Popen([ryu_cmd, ryu_path, ryu_path2, ryu_option, ryu_of_port], stdout=FNULL, stderr=FNULL)
226 time.sleep(1)
227
228 def stopRyu(self):
229 if self.ryu_process is not None:
230 self.ryu_process.terminate()
231 self.ryu_process.kill()
232