Added prototpye of SONATA gatekeeper API. Can already accept uploaded packages and...
[osm/vim-emu.git] / src / emuvim / dcemulator / node.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 from mininet.node import Docker
6 from mininet.link import Link
7 import logging
8
9
10 DCDPID_BASE = 1000 # start of switch dpid's used for data center switches
11
12
13 class EmulatorCompute(Docker):
14 """
15 Emulator specific compute node class.
16 Inherits from Dockernet's Docker host class.
17 Represents a single container connected to a (logical)
18 data center.
19 We can add emulator specific helper functions to it.
20 """
21
22 def __init__(
23 self, name, dimage, **kwargs):
24 logging.debug("Create EmulatorCompute instance: %s" % name)
25 self.datacenter = None # pointer to current DC
26
27 # call original Docker.__init__
28 Docker.__init__(self, name, dimage, **kwargs)
29
30 def getNetworkStatus(self):
31 """
32 Helper method to receive information about the virtual networks
33 this compute instance is connected to.
34 """
35 # format list of tuples (name, Ip, MAC, isUp, status)
36 return [(str(i), i.IP(), i.MAC(), i.isUp(), i.status())
37 for i in self.intfList()]
38
39 def getStatus(self):
40 """
41 Helper method to receive information about this compute instance.
42 """
43 status = {}
44 status["name"] = self.name
45 status["network"] = self.getNetworkStatus()
46 status["image"] = self.dimage
47 status["cpu_quota"] = self.cpu_quota
48 status["cpu_period"] = self.cpu_period
49 status["cpu_shares"] = self.cpu_shares
50 status["cpuset"] = self.cpuset
51 status["mem_limit"] = self.mem_limit
52 status["memswap_limit"] = self.memswap_limit
53 status["state"] = self.dcli.inspect_container(self.dc)["State"]
54 status["id"] = self.dcli.inspect_container(self.dc)["Id"]
55 status["datacenter"] = (None if self.datacenter is None
56 else self.datacenter.label)
57 return status
58
59
60 class Datacenter(object):
61 """
62 Represents a logical data center to which compute resources
63 (Docker containers) can be added at runtime.
64
65 Will also implement resource bookkeeping in later versions.
66 """
67
68 DC_COUNTER = 1
69
70 def __init__(self, label, metadata={}):
71 self.net = None # DCNetwork to which we belong
72 # each node (DC) has a short internal name used by Mininet
73 # this is caused by Mininets naming limitations for swtiches etc.
74 self.name = "dc%d" % Datacenter.DC_COUNTER
75 Datacenter.DC_COUNTER += 1
76 # use this for user defined names that can be longer than self.name
77 self.label = label
78 # dict to store arbitrary metadata (e.g. latitude and longitude)
79 self.metadata = metadata
80 self.switch = None # first prototype assumes one "bigswitch" per DC
81 self.containers = {} # keep track of running containers
82
83 def __repr__(self):
84 return self.label
85
86 def _get_next_dc_dpid(self):
87 global DCDPID_BASE
88 DCDPID_BASE += 1
89 return DCDPID_BASE
90
91 def create(self):
92 """
93 Each data center is represented by a single switch to which
94 compute resources can be connected at run time.
95
96 TODO: This will be changed in the future to support multiple networks
97 per data center
98 """
99 self.switch = self.net.addSwitch(
100 "%s.s1" % self.name, dpid=hex(self._get_next_dc_dpid())[2:])
101 logging.debug("created data center switch: %s" % str(self.switch))
102
103 def start(self):
104 pass
105
106 def startCompute(self, name, image=None, command=None, network=None):
107 """
108 Create a new container as compute resource and connect it to this
109 data center.
110 :param name: name (string)
111 :param image: image name (string)
112 :param command: command (string)
113 :param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
114 :return:
115 """
116 assert name is not None
117 # no duplications
118 if name in [c.name for c in self.net.getAllContainers()]:
119 raise Exception("Container with name %s already exists." % name)
120 # set default parameter
121 if image is None:
122 image = "ubuntu"
123 if network is None:
124 network = {} # {"ip": "10.0.0.254/8"}
125 if isinstance(network, dict):
126 network = [network] # if we have only one network, put it in a list
127 if isinstance(network, list):
128 if len(network) < 1:
129 network.append({})
130
131 # create the container
132 d = self.net.addDocker("%s" % (name), dimage=image, dcmd=command)
133 # connect all given networks
134 for nw in network:
135 # TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3)
136 self.net.addLink(d, self.switch, params1=nw, cls=Link)
137 # do bookkeeping
138 self.containers[name] = d
139 d.datacenter = self
140 return d # we might use UUIDs for naming later on
141
142 def stopCompute(self, name):
143 """
144 Stop and remove a container from this data center.
145 """
146 assert name is not None
147 if name not in self.containers:
148 raise Exception("Container with name %s not found." % name)
149 self.net.removeLink(
150 link=None, node1=self.containers[name], node2=self.switch)
151 self.net.removeDocker("%s" % (name))
152 del self.containers[name]
153 return True
154
155 def listCompute(self):
156 """
157 Return a list of all running containers assigned to this
158 data center.
159 """
160 return list(self.containers.itervalues())
161
162 def getStatus(self):
163 """
164 Return a dict with status information about this DC.
165 """
166 return {
167 "label": self.label,
168 "internalname": self.name,
169 "switch": self.switch.name,
170 "n_running_containers": len(self.containers),
171 "metadata": self.metadata
172 }