First part of Containernet re-integration
[osm/vim-emu.git] / src / emuvim / dcemulator / node.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 from mininet.node import Docker
6 from mininet.link import Link
7 from emuvim.dcemulator.resourcemodel import NotEnoughResourcesAvailable
8 import logging
9 import time
10 import json
11
12 LOG = logging.getLogger("dcemulator")
13 LOG.setLevel(logging.DEBUG)
14
15
16 DCDPID_BASE = 1000 # start of switch dpid's used for data center switches
17
18
19 class EmulatorCompute(Docker):
20 """
21 Emulator specific compute node class.
22 Inherits from Containernet's Docker host class.
23 Represents a single container connected to a (logical)
24 data center.
25 We can add emulator specific helper functions to it.
26 """
27
28 def __init__(
29 self, name, dimage, **kwargs):
30 self.datacenter = kwargs.get("datacenter") # pointer to current DC
31 self.flavor_name = kwargs.get("flavor_name")
32 LOG.debug("Starting compute instance %r in data center %r" % (name, str(self.datacenter)))
33 # call original Docker.__init__
34 Docker.__init__(self, name, dimage, **kwargs)
35
36 def getNetworkStatus(self):
37 """
38 Helper method to receive information about the virtual networks
39 this compute instance is connected to.
40 """
41 # format list of tuples (name, Ip, MAC, isUp, status)
42 return [{'intf_name':str(i), 'ip':i.IP(), 'mac':i.MAC(), 'up':i.isUp(), 'status':i.status()}
43 for i in self.intfList()]
44
45 def getStatus(self):
46 """
47 Helper method to receive information about this compute instance.
48 """
49 status = {}
50 status["name"] = self.name
51 status["network"] = self.getNetworkStatus()
52 status["docker_network"] = self.dcinfo['NetworkSettings']['IPAddress']
53 status["image"] = self.dimage
54 status["flavor_name"] = self.flavor_name
55 status["cpu_quota"] = self.cpu_quota
56 status["cpu_period"] = self.cpu_period
57 status["cpu_shares"] = self.cpu_shares
58 status["cpuset"] = self.cpuset
59 status["mem_limit"] = self.mem_limit
60 status["memswap_limit"] = self.memswap_limit
61 status["state"] = self.dcli.inspect_container(self.dc)["State"]
62 status["id"] = self.dcli.inspect_container(self.dc)["Id"]
63 status["datacenter"] = (None if self.datacenter is None
64 else self.datacenter.label)
65 return status
66
67
68 class Datacenter(object):
69 """
70 Represents a logical data center to which compute resources
71 (Docker containers) can be added at runtime.
72
73 Will also implement resource bookkeeping in later versions.
74 """
75
76 DC_COUNTER = 1
77
78 def __init__(self, label, metadata={}, resource_log_path=None):
79 self.net = None # DCNetwork to which we belong
80 # each node (DC) has a short internal name used by Mininet
81 # this is caused by Mininets naming limitations for swtiches etc.
82 self.name = "dc%d" % Datacenter.DC_COUNTER
83 Datacenter.DC_COUNTER += 1
84 # use this for user defined names that can be longer than self.name
85 self.label = label
86 # dict to store arbitrary metadata (e.g. latitude and longitude)
87 self.metadata = metadata
88 # path to which resource information should be logged (e.g. for experiments). None = no logging
89 self.resource_log_path = resource_log_path
90 # first prototype assumes one "bigswitch" per DC
91 self.switch = None
92 # keep track of running containers
93 self.containers = {}
94 # pointer to assigned resource model
95 self._resource_model = None
96
97 def __repr__(self):
98 return self.label
99
100 def _get_next_dc_dpid(self):
101 global DCDPID_BASE
102 DCDPID_BASE += 1
103 return DCDPID_BASE
104
105 def create(self):
106 """
107 Each data center is represented by a single switch to which
108 compute resources can be connected at run time.
109
110 TODO: This will be changed in the future to support multiple networks
111 per data center
112 """
113 self.switch = self.net.addSwitch(
114 "%s.s1" % self.name, dpid=hex(self._get_next_dc_dpid())[2:])
115 LOG.debug("created data center switch: %s" % str(self.switch))
116
117 def start(self):
118 pass
119
120 def startCompute(self, name, image=None, command=None, network=None, flavor_name="tiny"):
121 """
122 Create a new container as compute resource and connect it to this
123 data center.
124 :param name: name (string)
125 :param image: image name (string)
126 :param command: command (string)
127 :param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
128 :param flavor_name: name of the flavor for this compute container
129 :return:
130 """
131 assert name is not None
132 # no duplications
133 if name in [c.name for c in self.net.getAllContainers()]:
134 raise Exception("Container with name %s already exists." % name)
135 # set default parameter
136 if image is None:
137 image = "ubuntu:trusty"
138 if network is None:
139 network = {} # {"ip": "10.0.0.254/8"}
140 if isinstance(network, dict):
141 network = [network] # if we have only one network, put it in a list
142 if isinstance(network, list):
143 if len(network) < 1:
144 network.append({})
145
146 # create the container
147 d = self.net.addDocker(
148 "%s" % (name),
149 dimage=image,
150 dcmd=command,
151 datacenter=self,
152 flavor_name=flavor_name
153 )
154
155 # apply resource limits to container if a resource model is defined
156 if self._resource_model is not None:
157 try:
158 self._resource_model.allocate(d)
159 self._resource_model.write_allocation_log(d, self.resource_log_path)
160 except NotEnoughResourcesAvailable as ex:
161 LOG.warning("Allocation of container %r was blocked by resource model." % name)
162 LOG.info(ex.message)
163 # ensure that we remove the container
164 self.net.removeDocker(name)
165 return None
166
167 # connect all given networks
168 # if no --net option is given, network = [{}], so 1 empty dict in the list
169 # this results in 1 default interface with a default ip address
170 for nw in network:
171 # TODO we cannot use TCLink here (see: https://github.com/mpeuster/containernet/issues/3)
172 self.net.addLink(d, self.switch, params1=nw, cls=Link, intfName1=nw.get('id'))
173 # do bookkeeping
174 self.containers[name] = d
175 return d # we might use UUIDs for naming later on
176
177 def stopCompute(self, name):
178 """
179 Stop and remove a container from this data center.
180 """
181 assert name is not None
182 if name not in self.containers:
183 raise Exception("Container with name %s not found." % name)
184 LOG.debug("Stopping compute instance %r in data center %r" % (name, str(self)))
185
186 # stop the monitored metrics
187 if self.net.monitor_agent is not None:
188 self.net.monitor_agent.stop_metric(name)
189
190 # call resource model and free resources
191 if self._resource_model is not None:
192 self._resource_model.free(self.containers[name])
193 self._resource_model.write_free_log(self.containers[name], self.resource_log_path)
194
195 # remove links
196 self.net.removeLink(
197 link=None, node1=self.containers[name], node2=self.switch)
198
199 # remove container
200 self.net.removeDocker("%s" % (name))
201 del self.containers[name]
202
203 return True
204
205 def listCompute(self):
206 """
207 Return a list of all running containers assigned to this
208 data center.
209 """
210 return list(self.containers.itervalues())
211
212 def getStatus(self):
213 """
214 Return a dict with status information about this DC.
215 """
216 return {
217 "label": self.label,
218 "internalname": self.name,
219 "switch": self.switch.name,
220 "n_running_containers": len(self.containers),
221 "metadata": self.metadata
222 }
223
224 def assignResourceModel(self, rm):
225 """
226 Assign a resource model to this DC.
227 :param rm: a BaseResourceModel object
228 :return:
229 """
230 if self._resource_model is not None:
231 raise Exception("There is already an resource model assigned to this DC.")
232 self._resource_model = rm
233 self.net.rm_registrar.register(self, rm)
234 LOG.info("Assigned RM: %r to DC: %r" % (rm, self))
235