Merge remote-tracking branch 'upstream/master'
[osm/vim-emu.git] / src / emuvim / dcemulator / node.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 from mininet.node import Docker
29 from mininet.link import Link
30 from emuvim.dcemulator.resourcemodel import NotEnoughResourcesAvailable
31 import logging
32 import time
33 import json
34
35 LOG = logging.getLogger("dcemulator.node")
36 LOG.setLevel(logging.DEBUG)
37
38
39 DCDPID_BASE = 1000 # start of switch dpid's used for data center switches
40
41 class EmulatorCompute(Docker):
42 """
43 Emulator specific compute node class.
44 Inherits from Containernet's Docker host class.
45 Represents a single container connected to a (logical)
46 data center.
47 We can add emulator specific helper functions to it.
48 """
49
50 def __init__(
51 self, name, dimage, **kwargs):
52 self.datacenter = kwargs.get("datacenter") # pointer to current DC
53 self.flavor_name = kwargs.get("flavor_name")
54 LOG.debug("Starting compute instance %r in data center %r" % (name, str(self.datacenter)))
55 # call original Docker.__init__
56 Docker.__init__(self, name, dimage, **kwargs)
57
58 def getNetworkStatus(self):
59 """
60 Helper method to receive information about the virtual networks
61 this compute instance is connected to.
62 """
63 # get all links and find dc switch interface
64 networkStatusList = []
65 for i in self.intfList():
66 vnf_name = self.name
67 vnf_interface = str(i)
68 dc_port_name = self.datacenter.net.find_connected_dc_interface(vnf_name, vnf_interface)
69 # format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
70 intf_dict = {'intf_name': str(i), 'ip': i.IP(), 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
71 networkStatusList.append(intf_dict)
72
73 return networkStatusList
74
75 def getStatus(self):
76 """
77 Helper method to receive information about this compute instance.
78 """
79 status = {}
80 status["name"] = self.name
81 status["network"] = self.getNetworkStatus()
82 status["docker_network"] = self.dcinfo['NetworkSettings']['IPAddress']
83 status["image"] = self.dimage
84 status["flavor_name"] = self.flavor_name
85 status["cpu_quota"] = self.resources.get('cpu_quota')
86 status["cpu_period"] = self.resources.get('cpu_period')
87 status["cpu_shares"] = self.resources.get('cpu_shares')
88 status["cpuset"] = self.resources.get('cpuset_cpus')
89 status["mem_limit"] = self.resources.get('mem_limit')
90 status["memswap_limit"] = self.resources.get('memswap_limit')
91 status["state"] = self.dcli.inspect_container(self.dc)["State"]
92 status["id"] = self.dcli.inspect_container(self.dc)["Id"]
93 status["short_id"] = self.dcli.inspect_container(self.dc)["Id"][:12]
94 status["hostname"] = self.dcli.inspect_container(self.dc)["Config"]['Hostname']
95 status["datacenter"] = (None if self.datacenter is None
96 else self.datacenter.label)
97
98 return status
99
100
101 class Datacenter(object):
102 """
103 Represents a logical data center to which compute resources
104 (Docker containers) can be added at runtime.
105
106 Will also implement resource bookkeeping in later versions.
107 """
108
109 DC_COUNTER = 1
110
111 def __init__(self, label, metadata={}, resource_log_path=None):
112 self.net = None # DCNetwork to which we belong
113 # each node (DC) has a short internal name used by Mininet
114 # this is caused by Mininets naming limitations for swtiches etc.
115 self.name = "dc%d" % Datacenter.DC_COUNTER
116 Datacenter.DC_COUNTER += 1
117 # use this for user defined names that can be longer than self.name
118 self.label = label
119 # dict to store arbitrary metadata (e.g. latitude and longitude)
120 self.metadata = metadata
121 # path to which resource information should be logged (e.g. for experiments). None = no logging
122 self.resource_log_path = resource_log_path
123 # first prototype assumes one "bigswitch" per DC
124 self.switch = None
125 # keep track of running containers
126 self.containers = {}
127 # pointer to assigned resource model
128 self._resource_model = None
129
130 def __repr__(self):
131 return self.label
132
133 def _get_next_dc_dpid(self):
134 global DCDPID_BASE
135 DCDPID_BASE += 1
136 return DCDPID_BASE
137
138 def create(self):
139 """
140 Each data center is represented by a single switch to which
141 compute resources can be connected at run time.
142
143 TODO: This will be changed in the future to support multiple networks
144 per data center
145 """
146 self.switch = self.net.addSwitch(
147 "%s.s1" % self.name, dpid=hex(self._get_next_dc_dpid())[2:])
148 LOG.debug("created data center switch: %s" % str(self.switch))
149
150 def start(self):
151 pass
152
153 def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny", **params):
154 """
155 Create a new container as compute resource and connect it to this
156 data center.
157 :param name: name (string)
158 :param image: image name (string)
159 :param command: command (string)
160 :param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
161 :param flavor_name: name of the flavor for this compute container
162 :return:
163 """
164 assert name is not None
165 # no duplications
166 if name in [c.name for c in self.net.getAllContainers()]:
167 raise Exception("Container with name %s already exists." % name)
168 # set default parameter
169 if image is None:
170 image = "ubuntu:trusty"
171 if network is None:
172 network = {} # {"ip": "10.0.0.254/8"}
173 if isinstance(network, dict):
174 network = [network] # if we have only one network, put it in a list
175 if isinstance(network, list):
176 if len(network) < 1:
177 network.append({})
178
179 # apply hard-set resource limits=0
180 cpu_percentage = params.get('cpu_percent')
181 if cpu_percentage:
182 params['cpu_period'] = self.net.cpu_period
183 params['cpu_quota'] = self.net.cpu_period * float(cpu_percentage)
184
185 # create the container
186 d = self.net.addDocker(
187 "%s" % (name),
188 dimage=image,
189 dcmd=command,
190 datacenter=self,
191 flavor_name=flavor_name,
192 environment = {'VNF_NAME':name},
193 **params
194 )
195
196
197
198 # apply resource limits to container if a resource model is defined
199 if self._resource_model is not None:
200 try:
201 self._resource_model.allocate(d)
202 self._resource_model.write_allocation_log(d, self.resource_log_path)
203 except NotEnoughResourcesAvailable as ex:
204 LOG.warning("Allocation of container %r was blocked by resource model." % name)
205 LOG.info(ex.message)
206 # ensure that we remove the container
207 self.net.removeDocker(name)
208 return None
209
210 # connect all given networks
211 # if no --net option is given, network = [{}], so 1 empty dict in the list
212 # this results in 1 default interface with a default ip address
213 for nw in network:
214 # clean up network configuration (e.g. RTNETLINK does not allow ':' in intf names
215 if nw.get("id") is not None:
216 nw["id"] = self._clean_ifname(nw["id"])
217 # TODO we cannot use TCLink here (see: https://github.com/mpeuster/containernet/issues/3)
218 self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id'))
219 # do bookkeeping
220 self.containers[name] = d
221 return d # we might use UUIDs for naming later on
222
223 def stopCompute(self, name):
224 """
225 Stop and remove a container from this data center.
226 """
227 assert name is not None
228 if name not in self.containers:
229 raise Exception("Container with name %s not found." % name)
230 LOG.debug("Stopping compute instance %r in data center %r" % (name, str(self)))
231
232 # stop the monitored metrics
233 if self.net.monitor_agent is not None:
234 self.net.monitor_agent.stop_metric(name)
235
236 # call resource model and free resources
237 if self._resource_model is not None:
238 self._resource_model.free(self.containers[name])
239 self._resource_model.write_free_log(self.containers[name], self.resource_log_path)
240
241 # remove links
242 self.net.removeLink(
243 link=None, node1=self.containers[name], node2=self.switch)
244
245 # remove container
246 self.net.removeDocker("%s" % (name))
247 del self.containers[name]
248
249 return True
250
251 def attachExternalSAP(self, sap_name, sap_ip):
252 # create SAP as OVS internal interface
253 sap_intf = self.switch.attachInternalIntf(sap_name, sap_ip)
254
255 # add this as a link to the DCnetwork graph, so it is available for routing
256 attr_dict2 = {'src_port_id': sap_name, 'src_port_nr': None,
257 'src_port_name': sap_name,
258 'dst_port_id': self.switch.ports[sap_intf], 'dst_port_nr': self.switch.ports[sap_intf],
259 'dst_port_name': sap_intf.name}
260 self.net.DCNetwork_graph.add_edge(sap_name, self.switch.name, attr_dict=attr_dict2)
261
262 attr_dict2 = {'dst_port_id': sap_name, 'dst_port_nr': None,
263 'dst_port_name': sap_name,
264 'src_port_id': self.switch.ports[sap_intf], 'src_port_nr': self.switch.ports[sap_intf],
265 'src_port_name': sap_intf.name}
266 self.net.DCNetwork_graph.add_edge(self.switch.name, sap_name, attr_dict=attr_dict2)
267
268
269 def listCompute(self):
270 """
271 Return a list of all running containers assigned to this
272 data center.
273 """
274 return list(self.containers.itervalues())
275
276 def getStatus(self):
277 """
278 Return a dict with status information about this DC.
279 """
280 return {
281 "label": self.label,
282 "internalname": self.name,
283 "switch": self.switch.name,
284 "n_running_containers": len(self.containers),
285 "metadata": self.metadata
286 }
287
288 def assignResourceModel(self, rm):
289 """
290 Assign a resource model to this DC.
291 :param rm: a BaseResourceModel object
292 :return:
293 """
294 if self._resource_model is not None:
295 raise Exception("There is already an resource model assigned to this DC.")
296 self._resource_model = rm
297 self.net.rm_registrar.register(self, rm)
298 LOG.info("Assigned RM: %r to DC: %r" % (rm, self))
299
300 @staticmethod
301 def _clean_ifname(name):
302 """
303 Cleans up given string to be a
304 RTNETLINK compatible interface name.
305 :param name: string
306 :return: string
307 """
308 if name is None:
309 return "if0"
310 name = name.replace(":", "-")
311 name = name.replace(" ", "-")
312 name = name.replace(".", "-")
313 name = name.replace("_", "-")
314 return name
315