Enabled TCLinks (e.g. delay property) between data centers.
[osm/vim-emu.git] / src / emuvim / dcemulator / node.py
1 """
2 Distributed Cloud Emulator (dcemulator)
3 (c) 2015 by Manuel Peuster <manuel.peuster@upb.de>
4 """
5 from mininet.node import Docker
6 from mininet.link import Link
7 import logging
8
9
10 DCDPID_BASE = 1000 # start of switch dpid's used for data center switches
11
12
13 class EmulatorCompute(Docker):
14 """
15 Emulator specific compute node class.
16 Inherits from Dockernet's Docker host class.
17 Represents a single container connected to a (logical)
18 data center.
19 We can add emulator specific helper functions to it.
20 """
21
22 def __init__(
23 self, name, dimage, **kwargs):
24 logging.debug("Create EmulatorCompute instance: %s" % name)
25 self.datacenter = None # pointer to current DC
26
27 # call original Docker.__init__
28 Docker.__init__(self, name, dimage, **kwargs)
29
30 def getNetworkStatus(self):
31 """
32 Helper method to receive information about the virtual networks
33 this compute instance is connected to.
34 """
35 # format list of tuples (name, Ip, MAC, isUp, status)
36 return [(str(i), i.IP(), i.MAC(), i.isUp(), i.status())
37 for i in self.intfList()]
38
39 def getStatus(self):
40 """
41 Helper method to receive information about this compute instance.
42 """
43 status = {}
44 status["name"] = self.name
45 status["network"] = self.getNetworkStatus()
46 status["image"] = self.dimage
47 status["cpu_quota"] = self.cpu_quota
48 status["cpu_period"] = self.cpu_period
49 status["cpu_shares"] = self.cpu_shares
50 status["cpuset"] = self.cpuset
51 status["mem_limit"] = self.mem_limit
52 status["memswap_limit"] = self.memswap_limit
53 status["state"] = self.dcli.inspect_container(self.dc)["State"]
54 status["id"] = self.dcli.inspect_container(self.dc)["Id"]
55 status["datacenter"] = (None if self.datacenter is None
56 else self.datacenter.label)
57 return status
58
59
60 class Datacenter(object):
61 """
62 Represents a logical data center to which compute resources
63 (Docker containers) can be added at runtime.
64
65 Will also implement resource bookkeeping in later versions.
66 """
67
68 DC_COUNTER = 1
69
70 def __init__(self, label, metadata={}):
71 self.net = None # DCNetwork to which we belong
72 # each node (DC) has a short internal name used by Mininet
73 # this is caused by Mininets naming limitations for swtiches etc.
74 self.name = "dc%d" % Datacenter.DC_COUNTER
75 Datacenter.DC_COUNTER += 1
76 # use this for user defined names that can be longer than self.name
77 self.label = label
78 # dict to store arbitrary metadata (e.g. latitude and longitude)
79 self.metadata = metadata
80 self.switch = None # first prototype assumes one "bigswitch" per DC
81 self.containers = {} # keep track of running containers
82
83 def _get_next_dc_dpid(self):
84 global DCDPID_BASE
85 DCDPID_BASE += 1
86 return DCDPID_BASE
87
88 def create(self):
89 """
90 Each data center is represented by a single switch to which
91 compute resources can be connected at run time.
92
93 TODO: This will be changed in the future to support multiple networks
94 per data center
95 """
96 self.switch = self.net.addSwitch(
97 "%s.s1" % self.name, dpid=hex(self._get_next_dc_dpid())[2:])
98 logging.debug("created data center switch: %s" % str(self.switch))
99
100 def start(self):
101 pass
102
103 def startCompute(self, name, image=None, command=None, network=None):
104 """
105 Create a new container as compute resource and connect it to this
106 data center.
107 :param name: name (string)
108 :param image: image name (string)
109 :param command: command (string)
110 :param network: networks list({"ip": "10.0.0.254/8"}, {"ip": "11.0.0.254/24"})
111 :return:
112 """
113 assert name is not None
114 # no duplications
115 if name in [c.name for c in self.net.getAllContainers()]:
116 raise Exception("Container with name %s already exists." % name)
117 # set default parameter
118 if image is None:
119 image = "ubuntu"
120 if network is None:
121 network = {} # {"ip": "10.0.0.254/8"}
122 if isinstance(network, dict):
123 network = [network] # if we have only one network, put it in a list
124 if isinstance(network, list):
125 if len(network) < 1:
126 network.append({})
127
128 # create the container
129 d = self.net.addDocker("%s" % (name), dimage=image, dcmd=command)
130 # connect all given networks
131 for nw in network:
132 # TODO we cannot use TCLink here (see: https://github.com/mpeuster/dockernet/issues/3)
133 self.net.addLink(d, self.switch, params1=nw, cls=Link)
134 # do bookkeeping
135 self.containers[name] = d
136 d.datacenter = self
137 return d # we might use UUIDs for naming later on
138
139 def stopCompute(self, name):
140 """
141 Stop and remove a container from this data center.
142 """
143 assert name is not None
144 if name not in self.containers:
145 raise Exception("Container with name %s not found." % name)
146 self.net.removeLink(
147 link=None, node1=self.containers[name], node2=self.switch)
148 self.net.removeDocker("%s" % (name))
149 del self.containers[name]
150 return True
151
152 def listCompute(self):
153 """
154 Return a list of all running containers assigned to this
155 data center.
156 """
157 return list(self.containers.itervalues())
158
159 def getStatus(self):
160 """
161 Return a dict with status information about this DC.
162 """
163 return {
164 "label": self.label,
165 "internalname": self.name,
166 "switch": self.switch.name,
167 "n_running_containers": len(self.containers),
168 "metadata": self.metadata
169 }