2 Copyright (c) 2015 SONATA-NFV and Paderborn University
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
9 http://www.apache.org/licenses/LICENSE-2.0
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
41 from docker
import Client
as DockerClient
42 from flask
import Flask
, request
43 import flask_restful
as fr
44 from collections
import defaultdict
47 LOG
= logging
.getLogger("sonata-dummy-gatekeeper")
48 LOG
.setLevel(logging
.DEBUG
)
49 logging
.getLogger("werkzeug").setLevel(logging
.WARNING
)
51 GK_STORAGE
= "/tmp/son-dummy-gk/"
52 UPLOAD_FOLDER
= os
.path
.join(GK_STORAGE
, "uploads/")
53 CATALOG_FOLDER
= os
.path
.join(GK_STORAGE
, "catalog/")
55 # Enable Dockerfile build functionality
56 BUILD_DOCKERFILE
= False
58 # flag to indicate that we run without the emulator (only the bare API for integration testing)
59 GK_STANDALONE_MODE
= False
61 # should a new version of an image be pulled even if its available
64 class Gatekeeper(object):
67 self
.services
= dict()
69 self
.vnf_counter
= 0 # used to generate short names for VNFs (Mininet limitation)
70 LOG
.info("Create SONATA dummy gatekeeper.")
72 def register_service_package(self
, service_uuid
, service
):
74 register new service package
78 self
.services
[service_uuid
] = service
79 # lets perform all steps needed to onboard the service
82 def get_next_vnf_name(self
):
84 return "vnf%d" % self
.vnf_counter
87 class Service(object):
89 This class represents a NS uploaded as a *.son package to the
91 Can have multiple running instances of this service.
98 self
.uuid
= service_uuid
99 self
.package_file_hash
= package_file_hash
100 self
.package_file_path
= package_file_path
101 self
.package_content_path
= os
.path
.join(CATALOG_FOLDER
, "services/%s" % self
.uuid
)
105 self
.local_docker_files
= dict()
106 self
.remote_docker_image_urls
= dict()
107 self
.instances
= dict()
108 self
.vnfname2num
= dict()
112 Do all steps to prepare this service to be instantiated
115 # 1. extract the contents of the package and store them in our catalog
116 self
._unpack
_service
_package
()
117 # 2. read in all descriptor files
118 self
._load
_package
_descriptor
()
121 # 3. prepare container images (e.g. download or build Dockerfile)
123 self
._load
_docker
_files
()
124 self
._build
_images
_from
_dockerfiles
()
126 self
._load
_docker
_urls
()
127 self
._pull
_predefined
_dockerimages
()
128 LOG
.info("On-boarded service: %r" % self
.manifest
.get("package_name"))
130 def start_service(self
):
132 This methods creates and starts a new service instance.
133 It computes placements, iterates over all VNFDs, and starts
134 each VNFD as a Docker container in the data center selected
135 by the placement algorithm.
138 LOG
.info("Starting service %r" % self
.uuid
)
140 # 1. each service instance gets a new uuid to identify it
141 instance_uuid
= str(uuid
.uuid4())
142 # build a instances dict (a bit like a NSR :))
143 self
.instances
[instance_uuid
] = dict()
144 self
.instances
[instance_uuid
]["vnf_instances"] = list()
146 # 2. compute placement of this service instance (adds DC names to VNFDs)
147 if not GK_STANDALONE_MODE
:
148 self
._calculate
_placement
(FirstDcPlacement
)
149 # iterate over all vnfds that we have to start
150 for vnfd
in self
.vnfds
.itervalues():
152 if not GK_STANDALONE_MODE
:
153 vnfi
= self
._start
_vnfd
(vnfd
)
154 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
156 # 3. Configure the chaining of the network functions (currently only E-Line links supported)
157 nfid2name
= defaultdict(lambda :"NotExistingNode",
158 reduce(lambda x
,y
: dict(x
, **y
),
159 map(lambda d
:{d
["vnf_id"]:d
["vnf_name"]},
160 self
.nsd
["network_functions"])))
162 vlinks
= self
.nsd
["virtual_links"]
163 fwd_links
= self
.nsd
["forwarding_graphs"][0]["constituent_virtual_links"]
164 eline_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-Line")]
166 cookie
= 1 # not clear why this is needed - to check with Steven
167 for link
in eline_fwd_links
:
168 src_node
, src_port
= link
["connection_points_reference"][0].split(":")
169 dst_node
, dst_port
= link
["connection_points_reference"][1].split(":")
171 srcname
= nfid2name
[src_node
]
172 dstname
= nfid2name
[dst_node
]
173 LOG
.debug("src name: "+srcname
+" dst name: "+dstname
)
175 if (srcname
in self
.vnfds
) and (dstname
in self
.vnfds
) :
176 network
= self
.vnfds
[srcname
].get("dc").net
# there should be a cleaner way to find the DCNetwork
177 src_vnf
= self
.vnfname2num
[srcname
]
178 dst_vnf
= self
.vnfname2num
[dstname
]
179 ret
= network
.setChain(src_vnf
, dst_vnf
, vnf_src_interface
=src_port
, vnf_dst_interface
=dst_port
, bidirectional
= True, cmd
="add-flow", cookie
= cookie
)
182 # 4. run the emulator specific entrypoint scripts in the VNFIs of this service instance
183 self
._trigger
_emulator
_start
_scripts
_in
_vnfis
(self
.instances
[instance_uuid
]["vnf_instances"])
185 LOG
.info("Service started. Instance id: %r" % instance_uuid
)
188 def _start_vnfd(self
, vnfd
):
190 Start a single VNFD of this service
191 :param vnfd: vnfd descriptor dict
194 # iterate over all deployment units within each VNFDs
195 for u
in vnfd
.get("virtual_deployment_units"):
196 # 1. get the name of the docker image to start and the assigned DC
197 vnf_name
= vnfd
.get("name")
198 if vnf_name
not in self
.remote_docker_image_urls
:
199 raise Exception("No image name for %r found. Abort." % vnf_name
)
200 docker_name
= self
.remote_docker_image_urls
.get(vnf_name
)
201 target_dc
= vnfd
.get("dc")
202 # 2. perform some checks to ensure we can start the container
203 assert(docker_name
is not None)
204 assert(target_dc
is not None)
205 if not self
._check
_docker
_image
_exists
(docker_name
):
206 raise Exception("Docker image %r not found. Abort." % docker_name
)
207 # 3. do the dc.startCompute(name="foobar") call to run the container
208 # TODO consider flavors, and other annotations
209 intfs
= vnfd
.get("connection_points")
210 self
.vnfname2num
[vnf_name
] = GK
.get_next_vnf_name()
211 LOG
.info("VNF "+vnf_name
+" mapped to "+self
.vnfname2num
[vnf_name
]+" on dc "+str(vnfd
.get("dc")))
212 vnfi
= target_dc
.startCompute(self
.vnfname2num
[vnf_name
], network
=intfs
, image
=docker_name
, flavor_name
="small")
215 def _trigger_emulator_start_scripts_in_vnfis(self
, vnfi_list
):
216 for vnfi
in vnfi_list
:
217 config
= vnfi
.dcinfo
.get("Config", dict())
218 env
= config
.get("Env", list())
220 if "SON_EMU_CMD=" in env_var
:
221 cmd
= str(env_var
.split("=")[1])
222 LOG
.info("Executing entrypoint script in %r: %r" % (vnfi
.name
, cmd
))
225 def _unpack_service_package(self
):
227 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
229 LOG
.info("Unzipping: %r" % self
.package_file_path
)
230 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
231 z
.extractall(self
.package_content_path
)
234 def _load_package_descriptor(self
):
236 Load the main package descriptor YAML and keep it as dict.
239 self
.manifest
= load_yaml(
241 self
.package_content_path
, "META-INF/MANIFEST.MF"))
245 Load the entry NSD YAML and keep it as dict.
248 if "entry_service_template" in self
.manifest
:
249 nsd_path
= os
.path
.join(
250 self
.package_content_path
,
251 make_relative_path(self
.manifest
.get("entry_service_template")))
252 self
.nsd
= load_yaml(nsd_path
)
253 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("name"))
255 def _load_vnfd(self
):
257 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
260 if "package_content" in self
.manifest
:
261 for pc
in self
.manifest
.get("package_content"):
262 if pc
.get("content-type") == "application/sonata.function_descriptor":
263 vnfd_path
= os
.path
.join(
264 self
.package_content_path
,
265 make_relative_path(pc
.get("name")))
266 vnfd
= load_yaml(vnfd_path
)
267 self
.vnfds
[vnfd
.get("name")] = vnfd
268 LOG
.debug("Loaded VNFD: %r" % vnfd
.get("name"))
270 def _load_docker_files(self
):
272 Get all paths to Dockerfiles from VNFDs and store them in dict.
275 for k
, v
in self
.vnfds
.iteritems():
276 for vu
in v
.get("virtual_deployment_units"):
277 if vu
.get("vm_image_format") == "docker":
278 vm_image
= vu
.get("vm_image")
279 docker_path
= os
.path
.join(
280 self
.package_content_path
,
281 make_relative_path(vm_image
))
282 self
.local_docker_files
[k
] = docker_path
283 LOG
.debug("Found Dockerfile (%r): %r" % (k
, docker_path
))
285 def _load_docker_urls(self
):
287 Get all URLs to pre-build docker images in some repo.
290 for k
, v
in self
.vnfds
.iteritems():
291 for vu
in v
.get("virtual_deployment_units"):
292 if vu
.get("vm_image_format") == "docker":
293 url
= vu
.get("vm_image")
295 url
= url
.replace("http://", "")
296 self
.remote_docker_image_urls
[k
] = url
297 LOG
.debug("Found Docker image URL (%r): %r" % (k
, self
.remote_docker_image_urls
[k
]))
299 def _build_images_from_dockerfiles(self
):
301 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
303 if GK_STANDALONE_MODE
:
304 return # do not build anything in standalone mode
306 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(self
.local_docker_files
))
307 for k
, v
in self
.local_docker_files
.iteritems():
308 for line
in dc
.build(path
=v
.replace("Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
309 LOG
.debug("DOCKER BUILD: %s" % line
)
310 LOG
.info("Docker image created: %s" % k
)
312 def _pull_predefined_dockerimages(self
):
314 If the package contains URLs to pre-build Docker images, we download them with this method.
317 for url
in self
.remote_docker_image_urls
.itervalues():
318 if not FORCE_PULL
: # only pull if not present (speedup for development)
319 if len(dc
.images(name
=url
)) > 0:
320 LOG
.debug("Image %r present. Skipping pull." % url
)
322 LOG
.info("Pulling image: %r" % url
)
324 insecure_registry
=True)
326 def _check_docker_image_exists(self
, image_name
):
328 Query the docker service and check if the given image exists
329 :param image_name: name of the docker image
332 return len(DockerClient().images(image_name
)) > 0
334 def _calculate_placement(self
, algorithm
):
336 Do placement by adding the a field "dc" to
337 each VNFD that points to one of our
338 data center objects known to the gatekeeper.
340 assert(len(self
.vnfds
) > 0)
341 assert(len(GK
.dcs
) > 0)
342 # instantiate algorithm an place
344 p
.place(self
.nsd
, self
.vnfds
, GK
.dcs
)
345 LOG
.info("Using placement algorithm: %r" % p
.__class
__.__name
__)
346 # lets print the placement result
347 for name
, vnfd
in self
.vnfds
.iteritems():
348 LOG
.info("Placed VNF %r on DC %r" % (name
, str(vnfd
.get("dc"))))
352 Some (simple) placement algorithms
356 class FirstDcPlacement(object):
358 Placement: Always use one and the same data center from the GK.dcs dict.
360 def place(self
, nsd
, vnfds
, dcs
):
361 for name
, vnfd
in vnfds
.iteritems():
362 vnfd
["dc"] = list(dcs
.itervalues())[0]
366 Resource definitions and API endpoints
370 class Packages(fr
.Resource
):
374 Upload a *.son service package to the dummy gatekeeper.
376 We expect request with a *.son file and store it in UPLOAD_FOLDER
382 # lets search for the package in the request
383 if "package" in request
.files
:
384 son_file
= request
.files
["package"]
385 # elif "file" in request.files:
386 # son_file = request.files["file"]
388 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
389 # generate a uuid to reference this package
390 service_uuid
= str(uuid
.uuid4())
391 file_hash
= hashlib
.sha1(str(son_file
)).hexdigest()
392 # ensure that upload folder exists
393 ensure_dir(UPLOAD_FOLDER
)
394 upload_path
= os
.path
.join(UPLOAD_FOLDER
, "%s.son" % service_uuid
)
395 # store *.son file to disk
396 son_file
.save(upload_path
)
397 size
= os
.path
.getsize(upload_path
)
398 # create a service object and register it
399 s
= Service(service_uuid
, file_hash
, upload_path
)
400 GK
.register_service_package(service_uuid
, s
)
401 # generate the JSON result
402 return {"service_uuid": service_uuid
, "size": size
, "sha1": file_hash
, "error": None}
403 except Exception as ex
:
404 LOG
.exception("Service package upload failed:")
405 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
409 Return a list of UUID's of uploaded service packages.
412 return {"service_uuid_list": list(GK
.services
.iterkeys())}
415 class Instantiations(fr
.Resource
):
419 Instantiate a service specified by its UUID.
420 Will return a new UUID to identify the running service instance.
423 # try to extract the service uuid from the request
424 json_data
= request
.get_json(force
=True)
425 service_uuid
= json_data
.get("service_uuid")
427 # lets be a bit fuzzy here to make testing easier
428 if service_uuid
is None and len(GK
.services
) > 0:
429 # if we don't get a service uuid, we simple start the first service in the list
430 service_uuid
= list(GK
.services
.iterkeys())[0]
432 if service_uuid
in GK
.services
:
433 # ok, we have a service uuid, lets start the service
434 service_instance_uuid
= GK
.services
.get(service_uuid
).start_service()
435 return {"service_instance_uuid": service_instance_uuid
}
436 return "Service not found", 404
440 Returns a list of UUIDs containing all running services.
443 return {"service_instance_list": [
444 list(s
.instances
.iterkeys()) for s
in GK
.services
.itervalues()]}
447 # create a single, global GK object
450 app
= Flask(__name__
)
451 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
454 api
.add_resource(Packages
, '/packages')
455 api
.add_resource(Instantiations
, '/instantiations')
458 def start_rest_api(host
, port
, datacenters
=dict()):
460 # start the Flask server (not the best performance but ok for our use case)
464 use_reloader
=False # this is needed to run Flask in a non-main thread
468 def ensure_dir(name
):
469 if not os
.path
.exists(name
):
474 with
open(path
, "r") as f
:
477 except yaml
.YAMLError
as exc
:
478 LOG
.exception("YAML parse error")
483 def make_relative_path(path
):
484 if path
.startswith("file://"):
485 path
= path
.replace("file://", "", 1)
486 if path
.startswith("/"):
487 path
= path
.replace("/", "", 1)
491 if __name__
== '__main__':
493 Lets allow to run the API in standalone mode.
495 GK_STANDALONE_MODE
= True
496 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
497 start_rest_api("0.0.0.0", 8000)