2 This module implements a simple REST API that behaves like SONATA's gatekeeper.
4 It is only used to support the development of SONATA's SDK tools and to demonstrate
5 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
14 from docker
import Client
as DockerClient
15 from flask
import Flask
, request
16 import flask_restful
as fr
17 from collections
import defaultdict
20 LOG
= logging
.getLogger("sonata-dummy-gatekeeper")
21 LOG
.setLevel(logging
.DEBUG
)
22 logging
.getLogger("werkzeug").setLevel(logging
.WARNING
)
24 GK_STORAGE
= "/tmp/son-dummy-gk/"
25 UPLOAD_FOLDER
= os
.path
.join(GK_STORAGE
, "uploads/")
26 CATALOG_FOLDER
= os
.path
.join(GK_STORAGE
, "catalog/")
28 # Enable Dockerfile build functionality
29 BUILD_DOCKERFILE
= False
31 # flag to indicate that we run without the emulator (only the bare API for integration testing)
32 GK_STANDALONE_MODE
= False
34 # should a new version of an image be pulled even if its available
37 class Gatekeeper(object):
40 self
.services
= dict()
42 self
.vnf_counter
= 0 # used to generate short names for VNFs (Mininet limitation)
43 LOG
.info("Create SONATA dummy gatekeeper.")
45 def register_service_package(self
, service_uuid
, service
):
47 register new service package
51 self
.services
[service_uuid
] = service
52 # lets perform all steps needed to onboard the service
55 def get_next_vnf_name(self
):
57 return "vnf%d" % self
.vnf_counter
60 class Service(object):
62 This class represents a NS uploaded as a *.son package to the
64 Can have multiple running instances of this service.
71 self
.uuid
= service_uuid
72 self
.package_file_hash
= package_file_hash
73 self
.package_file_path
= package_file_path
74 self
.package_content_path
= os
.path
.join(CATALOG_FOLDER
, "services/%s" % self
.uuid
)
78 self
.local_docker_files
= dict()
79 self
.remote_docker_image_urls
= dict()
80 self
.instances
= dict()
81 self
.vnfname2num
= dict()
85 Do all steps to prepare this service to be instantiated
88 # 1. extract the contents of the package and store them in our catalog
89 self
._unpack
_service
_package
()
90 # 2. read in all descriptor files
91 self
._load
_package
_descriptor
()
94 # 3. prepare container images (e.g. download or build Dockerfile)
96 self
._load
_docker
_files
()
97 self
._build
_images
_from
_dockerfiles
()
99 self
._load
_docker
_urls
()
100 self
._pull
_predefined
_dockerimages
()
101 LOG
.info("On-boarded service: %r" % self
.manifest
.get("package_name"))
103 def start_service(self
):
105 This methods creates and starts a new service instance.
106 It computes placements, iterates over all VNFDs, and starts
107 each VNFD as a Docker container in the data center selected
108 by the placement algorithm.
111 LOG
.info("Starting service %r" % self
.uuid
)
113 # 1. each service instance gets a new uuid to identify it
114 instance_uuid
= str(uuid
.uuid4())
115 # build a instances dict (a bit like a NSR :))
116 self
.instances
[instance_uuid
] = dict()
117 self
.instances
[instance_uuid
]["vnf_instances"] = list()
119 # 2. compute placement of this service instance (adds DC names to VNFDs)
120 if not GK_STANDALONE_MODE
:
121 self
._calculate
_placement
(FirstDcPlacement
)
122 # iterate over all vnfds that we have to start
123 for vnfd
in self
.vnfds
.itervalues():
125 if not GK_STANDALONE_MODE
:
126 vnfi
= self
._start
_vnfd
(vnfd
)
127 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
129 # 3. Configure the chaining of the network functions (currently only E-Line links supported)
130 nfid2name
= defaultdict(lambda :"NotExistingNode",
131 reduce(lambda x
,y
: dict(x
, **y
),
132 map(lambda d
:{d
["vnf_id"]:d
["vnf_name"]},
133 self
.nsd
["network_functions"])))
135 vlinks
= self
.nsd
["virtual_links"]
136 fwd_links
= self
.nsd
["forwarding_graphs"][0]["constituent_virtual_links"]
137 eline_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-Line")]
139 cookie
= 1 # not clear why this is needed - to check with Steven
140 for link
in eline_fwd_links
:
141 src_node
, src_port
= link
["connection_points_reference"][0].split(":")
142 dst_node
, dst_port
= link
["connection_points_reference"][1].split(":")
144 srcname
= nfid2name
[src_node
]
145 dstname
= nfid2name
[dst_node
]
146 LOG
.debug("src name: "+srcname
+" dst name: "+dstname
)
148 if (srcname
in self
.vnfds
) and (dstname
in self
.vnfds
) :
149 network
= self
.vnfds
[srcname
].get("dc").net
# there should be a cleaner way to find the DCNetwork
150 src_vnf
= self
.vnfname2num
[srcname
]
151 dst_vnf
= self
.vnfname2num
[dstname
]
152 ret
= network
.setChain(src_vnf
, dst_vnf
, vnf_src_interface
=src_port
, vnf_dst_interface
=dst_port
, bidirectional
= True, cmd
="add-flow", cookie
= cookie
)
155 # 4. run the emulator specific entrypoint scripts in the VNFIs of this service instance
156 self
._trigger
_emulator
_start
_scripts
_in
_vnfis
(self
.instances
[instance_uuid
]["vnf_instances"])
158 LOG
.info("Service started. Instance id: %r" % instance_uuid
)
161 def _start_vnfd(self
, vnfd
):
163 Start a single VNFD of this service
164 :param vnfd: vnfd descriptor dict
167 # iterate over all deployment units within each VNFDs
168 for u
in vnfd
.get("virtual_deployment_units"):
169 # 1. get the name of the docker image to start and the assigned DC
170 vnf_name
= vnfd
.get("name")
171 if vnf_name
not in self
.remote_docker_image_urls
:
172 raise Exception("No image name for %r found. Abort." % vnf_name
)
173 docker_name
= self
.remote_docker_image_urls
.get(vnf_name
)
174 target_dc
= vnfd
.get("dc")
175 # 2. perform some checks to ensure we can start the container
176 assert(docker_name
is not None)
177 assert(target_dc
is not None)
178 if not self
._check
_docker
_image
_exists
(docker_name
):
179 raise Exception("Docker image %r not found. Abort." % docker_name
)
180 # 3. do the dc.startCompute(name="foobar") call to run the container
181 # TODO consider flavors, and other annotations
182 intfs
= vnfd
.get("connection_points")
183 self
.vnfname2num
[vnf_name
] = GK
.get_next_vnf_name()
184 LOG
.info("VNF "+vnf_name
+" mapped to "+self
.vnfname2num
[vnf_name
]+" on dc "+str(vnfd
.get("dc")))
185 vnfi
= target_dc
.startCompute(self
.vnfname2num
[vnf_name
], network
=intfs
, image
=docker_name
, flavor_name
="small")
188 def _trigger_emulator_start_scripts_in_vnfis(self
, vnfi_list
):
189 for vnfi
in vnfi_list
:
190 config
= vnfi
.dcinfo
.get("Config", dict())
191 env
= config
.get("Env", list())
193 if "SON_EMU_CMD=" in env_var
:
194 cmd
= str(env_var
.split("=")[1])
195 LOG
.info("Executing entrypoint script in %r: %r" % (vnfi
.name
, cmd
))
198 def _unpack_service_package(self
):
200 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
202 LOG
.info("Unzipping: %r" % self
.package_file_path
)
203 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
204 z
.extractall(self
.package_content_path
)
207 def _load_package_descriptor(self
):
209 Load the main package descriptor YAML and keep it as dict.
212 self
.manifest
= load_yaml(
214 self
.package_content_path
, "META-INF/MANIFEST.MF"))
218 Load the entry NSD YAML and keep it as dict.
221 if "entry_service_template" in self
.manifest
:
222 nsd_path
= os
.path
.join(
223 self
.package_content_path
,
224 make_relative_path(self
.manifest
.get("entry_service_template")))
225 self
.nsd
= load_yaml(nsd_path
)
226 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("name"))
228 def _load_vnfd(self
):
230 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
233 if "package_content" in self
.manifest
:
234 for pc
in self
.manifest
.get("package_content"):
235 if pc
.get("content-type") == "application/sonata.function_descriptor":
236 vnfd_path
= os
.path
.join(
237 self
.package_content_path
,
238 make_relative_path(pc
.get("name")))
239 vnfd
= load_yaml(vnfd_path
)
240 self
.vnfds
[vnfd
.get("name")] = vnfd
241 LOG
.debug("Loaded VNFD: %r" % vnfd
.get("name"))
243 def _load_docker_files(self
):
245 Get all paths to Dockerfiles from VNFDs and store them in dict.
248 for k
, v
in self
.vnfds
.iteritems():
249 for vu
in v
.get("virtual_deployment_units"):
250 if vu
.get("vm_image_format") == "docker":
251 vm_image
= vu
.get("vm_image")
252 docker_path
= os
.path
.join(
253 self
.package_content_path
,
254 make_relative_path(vm_image
))
255 self
.local_docker_files
[k
] = docker_path
256 LOG
.debug("Found Dockerfile (%r): %r" % (k
, docker_path
))
258 def _load_docker_urls(self
):
260 Get all URLs to pre-build docker images in some repo.
263 for k
, v
in self
.vnfds
.iteritems():
264 for vu
in v
.get("virtual_deployment_units"):
265 if vu
.get("vm_image_format") == "docker":
266 url
= vu
.get("vm_image")
268 url
= url
.replace("http://", "")
269 self
.remote_docker_image_urls
[k
] = url
270 LOG
.debug("Found Docker image URL (%r): %r" % (k
, self
.remote_docker_image_urls
[k
]))
272 def _build_images_from_dockerfiles(self
):
274 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
276 if GK_STANDALONE_MODE
:
277 return # do not build anything in standalone mode
279 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(self
.local_docker_files
))
280 for k
, v
in self
.local_docker_files
.iteritems():
281 for line
in dc
.build(path
=v
.replace("Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
282 LOG
.debug("DOCKER BUILD: %s" % line
)
283 LOG
.info("Docker image created: %s" % k
)
285 def _pull_predefined_dockerimages(self
):
287 If the package contains URLs to pre-build Docker images, we download them with this method.
290 for url
in self
.remote_docker_image_urls
.itervalues():
291 if not FORCE_PULL
: # only pull if not present (speedup for development)
292 if len(dc
.images(name
=url
)) > 0:
293 LOG
.debug("Image %r present. Skipping pull." % url
)
295 LOG
.info("Pulling image: %r" % url
)
297 insecure_registry
=True)
299 def _check_docker_image_exists(self
, image_name
):
301 Query the docker service and check if the given image exists
302 :param image_name: name of the docker image
305 return len(DockerClient().images(image_name
)) > 0
307 def _calculate_placement(self
, algorithm
):
309 Do placement by adding the a field "dc" to
310 each VNFD that points to one of our
311 data center objects known to the gatekeeper.
313 assert(len(self
.vnfds
) > 0)
314 assert(len(GK
.dcs
) > 0)
315 # instantiate algorithm an place
317 p
.place(self
.nsd
, self
.vnfds
, GK
.dcs
)
318 LOG
.info("Using placement algorithm: %r" % p
.__class
__.__name
__)
319 # lets print the placement result
320 for name
, vnfd
in self
.vnfds
.iteritems():
321 LOG
.info("Placed VNF %r on DC %r" % (name
, str(vnfd
.get("dc"))))
325 Some (simple) placement algorithms
329 class FirstDcPlacement(object):
331 Placement: Always use one and the same data center from the GK.dcs dict.
333 def place(self
, nsd
, vnfds
, dcs
):
334 for name
, vnfd
in vnfds
.iteritems():
335 vnfd
["dc"] = list(dcs
.itervalues())[0]
339 Resource definitions and API endpoints
343 class Packages(fr
.Resource
):
347 Upload a *.son service package to the dummy gatekeeper.
349 We expect request with a *.son file and store it in UPLOAD_FOLDER
355 # lets search for the package in the request
356 if "package" in request
.files
:
357 son_file
= request
.files
["package"]
358 # elif "file" in request.files:
359 # son_file = request.files["file"]
361 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
362 # generate a uuid to reference this package
363 service_uuid
= str(uuid
.uuid4())
364 file_hash
= hashlib
.sha1(str(son_file
)).hexdigest()
365 # ensure that upload folder exists
366 ensure_dir(UPLOAD_FOLDER
)
367 upload_path
= os
.path
.join(UPLOAD_FOLDER
, "%s.son" % service_uuid
)
368 # store *.son file to disk
369 son_file
.save(upload_path
)
370 size
= os
.path
.getsize(upload_path
)
371 # create a service object and register it
372 s
= Service(service_uuid
, file_hash
, upload_path
)
373 GK
.register_service_package(service_uuid
, s
)
374 # generate the JSON result
375 return {"service_uuid": service_uuid
, "size": size
, "sha1": file_hash
, "error": None}
376 except Exception as ex
:
377 LOG
.exception("Service package upload failed:")
378 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
382 Return a list of UUID's of uploaded service packages.
385 return {"service_uuid_list": list(GK
.services
.iterkeys())}
388 class Instantiations(fr
.Resource
):
392 Instantiate a service specified by its UUID.
393 Will return a new UUID to identify the running service instance.
396 # try to extract the service uuid from the request
397 json_data
= request
.get_json(force
=True)
398 service_uuid
= json_data
.get("service_uuid")
400 # lets be a bit fuzzy here to make testing easier
401 if service_uuid
is None and len(GK
.services
) > 0:
402 # if we don't get a service uuid, we simple start the first service in the list
403 service_uuid
= list(GK
.services
.iterkeys())[0]
405 if service_uuid
in GK
.services
:
406 # ok, we have a service uuid, lets start the service
407 service_instance_uuid
= GK
.services
.get(service_uuid
).start_service()
408 return {"service_instance_uuid": service_instance_uuid
}
409 return "Service not found", 404
413 Returns a list of UUIDs containing all running services.
416 return {"service_instance_list": [
417 list(s
.instances
.iterkeys()) for s
in GK
.services
.itervalues()]}
420 # create a single, global GK object
423 app
= Flask(__name__
)
424 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
427 api
.add_resource(Packages
, '/packages')
428 api
.add_resource(Instantiations
, '/instantiations')
431 def start_rest_api(host
, port
, datacenters
=dict()):
433 # start the Flask server (not the best performance but ok for our use case)
437 use_reloader
=False # this is needed to run Flask in a non-main thread
441 def ensure_dir(name
):
442 if not os
.path
.exists(name
):
447 with
open(path
, "r") as f
:
450 except yaml
.YAMLError
as exc
:
451 LOG
.exception("YAML parse error")
456 def make_relative_path(path
):
457 if path
.startswith("file://"):
458 path
= path
.replace("file://", "", 1)
459 if path
.startswith("/"):
460 path
= path
.replace("/", "", 1)
464 if __name__
== '__main__':
466 Lets allow to run the API in standalone mode.
468 GK_STANDALONE_MODE
= True
469 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
470 start_rest_api("0.0.0.0", 8000)