print datacenter connected switch
[osm/vim-emu.git] / src / emuvim / dcemulator / node.py
1 """
2 Copyright (c) 2015 SONATA-NFV and Paderborn University
3 ALL RIGHTS RESERVED.
4
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
8
9 http://www.apache.org/licenses/LICENSE-2.0
10
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
16
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
20 permission.
21
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
27 """
28 from mininet.node import Docker
29 from mininet.link import Link
30 from emuvim.dcemulator.resourcemodel import NotEnoughResourcesAvailable
31 import logging
32 import time
33 import json
34
35 LOG = logging.getLogger("dcemulator.node")
36 LOG.setLevel(logging.DEBUG)
37
38
39 DCDPID_BASE = 1000 # start of switch dpid's used for data center switches
40
41
42 class EmulatorCompute(Docker):
43 """
44 Emulator specific compute node class.
45 Inherits from Containernet's Docker host class.
46 Represents a single container connected to a (logical)
47 data center.
48 We can add emulator specific helper functions to it.
49 """
50
51 def __init__(
52 self, name, dimage, **kwargs):
53 self.datacenter = kwargs.get("datacenter") # pointer to current DC
54 self.flavor_name = kwargs.get("flavor_name")
55 LOG.debug("Starting compute instance %r in data center %r" % (name, str(self.datacenter)))
56 # call original Docker.__init__
57 Docker.__init__(self, name, dimage, **kwargs)
58
59 def getNetworkStatus(self):
60 """
61 Helper method to receive information about the virtual networks
62 this compute instance is connected to.
63 """
64 # get all links and find dc switch interface
65 networkStatusList = []
66 for i in self.intfList():
67 vnf_name = self.name
68 vnf_interface = str(i)
69 dc_port_name = self.datacenter.net.find_connected_dc_interface(vnf_name, vnf_interface)
70 # format list of tuples (name, Ip, MAC, isUp, status, dc_portname)
71 intf_dict = {'intf_name': str(i), 'ip': i.IP(), 'mac': i.MAC(), 'up': i.isUp(), 'status': i.status(), 'dc_portname': dc_port_name}
72 networkStatusList.append(intf_dict)
73
74 return networkStatusList
75
76 def getStatus(self):
77 """
78 Helper method to receive information about this compute instance.
79 """
80 status = {}
81 status["name"] = self.name
82 status["network"] = self.getNetworkStatus()
83 status["docker_network"] = self.dcinfo['NetworkSettings']['IPAddress']
84 status["image"] = self.dimage
85 status["flavor_name"] = self.flavor_name
86 status["cpu_quota"] = self.cpu_quota
87 status["cpu_period"] = self.cpu_period
88 status["cpu_shares"] = self.cpu_shares
89 status["cpuset"] = self.cpuset
90 status["mem_limit"] = self.mem_limit
91 status["memswap_limit"] = self.memswap_limit
92 status["state"] = self.dcli.inspect_container(self.dc)["State"]
93 status["id"] = self.dcli.inspect_container(self.dc)["Id"]
94 status["datacenter"] = (None if self.datacenter is None
95 else self.datacenter.label)
96 return status
97
98
99 class Datacenter(object):
100 """
101 Represents a logical data center to which compute resources
102 (Docker containers) can be added at runtime.
103
104 Will also implement resource bookkeeping in later versions.
105 """
106
107 DC_COUNTER = 1
108
109 def __init__(self, label, metadata={}, resource_log_path=None):
110 self.net = None # DCNetwork to which we belong
111 # each node (DC) has a short internal name used by Mininet
112 # this is caused by Mininets naming limitations for swtiches etc.
113 self.name = "dc%d" % Datacenter.DC_COUNTER
114 Datacenter.DC_COUNTER += 1
115 # use this for user defined names that can be longer than self.name
116 self.label = label
117 # dict to store arbitrary metadata (e.g. latitude and longitude)
118 self.metadata = metadata
119 # path to which resource information should be logged (e.g. for experiments). None = no logging
120 self.resource_log_path = resource_log_path
121 # first prototype assumes one "bigswitch" per DC
122 self.switch = None
123 # keep track of running containers
124 self.containers = {}
125 # pointer to assigned resource model
126 self._resource_model = None
127
128 def __repr__(self):
129 return self.label
130
131 def _get_next_dc_dpid(self):
132 global DCDPID_BASE
133 DCDPID_BASE += 1
134 return DCDPID_BASE
135
136 def create(self):
137 """
138 Each data center is represented by a single switch to which
139 compute resources can be connected at run time.
140
141 TODO: This will be changed in the future to support multiple networks
142 per data center
143 """
144 self.switch = self.net.addSwitch(
145 "%s.s1" % self.name, dpid=hex(self._get_next_dc_dpid())[2:])
146 LOG.debug("created data center switch: %s" % str(self.switch))
147
148 def start(self):
149 pass
150
151 def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny"):
152 """
153 Create a new container as compute resource and connect it to this
154 data center.
155 :param name: name (string)
156 :param image: image name (string)
157 :param command: command (string)
158 :param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
159 :param flavor_name: name of the flavor for this compute container
160 :return:
161 """
162 assert name is not None
163 # no duplications
164 if name in [c.name for c in self.net.getAllContainers()]:
165 raise Exception("Container with name %s already exists." % name)
166 # set default parameter
167 if image is None:
168 image = "ubuntu:trusty"
169 if network is None:
170 network = {} # {"ip": "10.0.0.254/8"}
171 if isinstance(network, dict):
172 network = [network] # if we have only one network, put it in a list
173 if isinstance(network, list):
174 if len(network) < 1:
175 network.append({})
176
177 # create the container
178 d = self.net.addDocker(
179 "%s" % (name),
180 dimage=image,
181 dcmd=command,
182 datacenter=self,
183 flavor_name=flavor_name
184 )
185
186 # apply resource limits to container if a resource model is defined
187 if self._resource_model is not None:
188 try:
189 self._resource_model.allocate(d)
190 self._resource_model.write_allocation_log(d, self.resource_log_path)
191 except NotEnoughResourcesAvailable as ex:
192 LOG.warning("Allocation of container %r was blocked by resource model." % name)
193 LOG.info(ex.message)
194 # ensure that we remove the container
195 self.net.removeDocker(name)
196 return None
197
198 # connect all given networks
199 # if no --net option is given, network = [{}], so 1 empty dict in the list
200 # this results in 1 default interface with a default ip address
201 for nw in network:
202 # clean up network configuration (e.g. RTNETLINK does not allow ':' in intf names
203 if nw.get("id") is not None:
204 nw["id"] = self._clean_ifname(nw["id"])
205 # TODO we cannot use TCLink here (see: https://github.com/mpeuster/containernet/issues/3)
206 self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id'))
207 # do bookkeeping
208 self.containers[name] = d
209 return d # we might use UUIDs for naming later on
210
211 def stopCompute(self, name):
212 """
213 Stop and remove a container from this data center.
214 """
215 assert name is not None
216 if name not in self.containers:
217 raise Exception("Container with name %s not found." % name)
218 LOG.debug("Stopping compute instance %r in data center %r" % (name, str(self)))
219
220 # stop the monitored metrics
221 if self.net.monitor_agent is not None:
222 self.net.monitor_agent.stop_metric(name)
223
224 # call resource model and free resources
225 if self._resource_model is not None:
226 self._resource_model.free(self.containers[name])
227 self._resource_model.write_free_log(self.containers[name], self.resource_log_path)
228
229 # remove links
230 self.net.removeLink(
231 link=None, node1=self.containers[name], node2=self.switch)
232
233 # remove container
234 self.net.removeDocker("%s" % (name))
235 del self.containers[name]
236
237 return True
238
239 def listCompute(self):
240 """
241 Return a list of all running containers assigned to this
242 data center.
243 """
244 return list(self.containers.itervalues())
245
246 def getStatus(self):
247 """
248 Return a dict with status information about this DC.
249 """
250 return {
251 "label": self.label,
252 "internalname": self.name,
253 "switch": self.switch.name,
254 "n_running_containers": len(self.containers),
255 "metadata": self.metadata
256 }
257
258 def assignResourceModel(self, rm):
259 """
260 Assign a resource model to this DC.
261 :param rm: a BaseResourceModel object
262 :return:
263 """
264 if self._resource_model is not None:
265 raise Exception("There is already an resource model assigned to this DC.")
266 self._resource_model = rm
267 self.net.rm_registrar.register(self, rm)
268 LOG.info("Assigned RM: %r to DC: %r" % (rm, self))
269
270 @staticmethod
271 def _clean_ifname(name):
272 """
273 Cleans up given string to be a
274 RTNETLINK compatible interface name.
275 :param name: string
276 :return: string
277 """
278 if name is None:
279 return "if0"
280 name = name.replace(":", "-")
281 name = name.replace(" ", "-")
282 name = name.replace(".", "-")
283 name = name.replace("_", "-")
284 return name
285