2 Copyright (c) 2015 SONATA-NFV and Paderborn University
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
9 http://www.apache.org/licenses/LICENSE-2.0
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
41 from docker
import Client
as DockerClient
42 from flask
import Flask
, request
43 import flask_restful
as fr
44 from collections
import defaultdict
47 LOG
= logging
.getLogger("sonata-dummy-gatekeeper")
48 LOG
.setLevel(logging
.DEBUG
)
49 logging
.getLogger("werkzeug").setLevel(logging
.WARNING
)
51 GK_STORAGE
= "/tmp/son-dummy-gk/"
52 UPLOAD_FOLDER
= os
.path
.join(GK_STORAGE
, "uploads/")
53 CATALOG_FOLDER
= os
.path
.join(GK_STORAGE
, "catalog/")
55 # Enable Dockerfile build functionality
56 BUILD_DOCKERFILE
= False
58 # flag to indicate that we run without the emulator (only the bare API for integration testing)
59 GK_STANDALONE_MODE
= False
61 # should a new version of an image be pulled even if its available
64 class Gatekeeper(object):
67 self
.services
= dict()
69 self
.vnf_counter
= 0 # used to generate short names for VNFs (Mininet limitation)
70 LOG
.info("Create SONATA dummy gatekeeper.")
72 def register_service_package(self
, service_uuid
, service
):
74 register new service package
78 self
.services
[service_uuid
] = service
79 # lets perform all steps needed to onboard the service
82 def get_next_vnf_name(self
):
84 return "vnf%d" % self
.vnf_counter
87 class Service(object):
89 This class represents a NS uploaded as a *.son package to the
91 Can have multiple running instances of this service.
98 self
.uuid
= service_uuid
99 self
.package_file_hash
= package_file_hash
100 self
.package_file_path
= package_file_path
101 self
.package_content_path
= os
.path
.join(CATALOG_FOLDER
, "services/%s" % self
.uuid
)
105 self
.local_docker_files
= dict()
106 self
.remote_docker_image_urls
= dict()
107 self
.instances
= dict()
108 self
.vnf_name2docker_name
= dict()
109 # lets generate a set of subnet configurations used for e-line chaining setup
110 self
.eline_subnets_src
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=1)
111 self
.eline_subnets_dst
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=2)
115 Do all steps to prepare this service to be instantiated
118 # 1. extract the contents of the package and store them in our catalog
119 self
._unpack
_service
_package
()
120 # 2. read in all descriptor files
121 self
._load
_package
_descriptor
()
124 # 3. prepare container images (e.g. download or build Dockerfile)
126 self
._load
_docker
_files
()
127 self
._build
_images
_from
_dockerfiles
()
129 self
._load
_docker
_urls
()
130 self
._pull
_predefined
_dockerimages
()
131 LOG
.info("On-boarded service: %r" % self
.manifest
.get("package_name"))
133 def start_service(self
):
135 This methods creates and starts a new service instance.
136 It computes placements, iterates over all VNFDs, and starts
137 each VNFD as a Docker container in the data center selected
138 by the placement algorithm.
141 LOG
.info("Starting service %r" % self
.uuid
)
143 # 1. each service instance gets a new uuid to identify it
144 instance_uuid
= str(uuid
.uuid4())
145 # build a instances dict (a bit like a NSR :))
146 self
.instances
[instance_uuid
] = dict()
147 self
.instances
[instance_uuid
]["vnf_instances"] = list()
149 # 2. compute placement of this service instance (adds DC names to VNFDs)
150 if not GK_STANDALONE_MODE
:
151 self
._calculate
_placement
(FirstDcPlacement
)
152 # iterate over all vnfds that we have to start
153 for vnfd
in self
.vnfds
.itervalues():
155 if not GK_STANDALONE_MODE
:
156 vnfi
= self
._start
_vnfd
(vnfd
)
157 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
159 # 3. Configure the chaining of the network functions (currently only E-Line links supported)
160 vnf_id2vnf_name
= defaultdict(lambda: "NotExistingNode",
161 reduce(lambda x
, y
: dict(x
, **y
),
162 map(lambda d
: {d
["vnf_id"]: d
["vnf_name"]},
163 self
.nsd
["network_functions"])))
165 vlinks
= self
.nsd
["virtual_links"]
166 fwd_links
= self
.nsd
["forwarding_graphs"][0]["constituent_virtual_links"]
167 eline_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-Line")]
169 cookie
= 1 # not clear why this is needed - to check with Steven
170 for link
in eline_fwd_links
:
171 src_id
, src_if_name
= link
["connection_points_reference"][0].split(":")
172 dst_id
, dst_if_name
= link
["connection_points_reference"][1].split(":")
174 src_name
= vnf_id2vnf_name
[src_id
]
175 dst_name
= vnf_id2vnf_name
[dst_id
]
178 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
179 src_name
, src_id
, src_if_name
, dst_name
, dst_id
, dst_if_name
))
181 if (src_name
in self
.vnfds
) and (dst_name
in self
.vnfds
):
182 network
= self
.vnfds
[src_name
].get("dc").net
# there should be a cleaner way to find the DCNetwork
183 src_docker_name
= self
.vnf_name2docker_name
[src_name
]
184 dst_docker_name
= self
.vnf_name2docker_name
[dst_name
]
185 LOG
.debug(src_docker_name
)
186 ret
= network
.setChain(
187 src_docker_name
, dst_docker_name
,
188 vnf_src_interface
=src_if_name
, vnf_dst_interface
=dst_if_name
,
189 bidirectional
=True, cmd
="add-flow", cookie
=cookie
)
192 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
193 src_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, src_name
)
194 if src_vnfi
is not None:
195 self
._vnf
_reconfigure
_network
(src_vnfi
, src_if_name
, self
.eline_subnets_src
.pop(0))
196 dst_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, dst_name
)
197 if dst_vnfi
is not None:
198 self
._vnf
_reconfigure
_network
(dst_vnfi
, dst_if_name
, self
.eline_subnets_dst
.pop(0))
200 # 4. run the emulator specific entrypoint scripts in the VNFIs of this service instance
201 self
._trigger
_emulator
_start
_scripts
_in
_vnfis
(self
.instances
[instance_uuid
]["vnf_instances"])
203 LOG
.info("Service started. Instance id: %r" % instance_uuid
)
206 def _start_vnfd(self
, vnfd
):
208 Start a single VNFD of this service
209 :param vnfd: vnfd descriptor dict
212 # iterate over all deployment units within each VNFDs
213 for u
in vnfd
.get("virtual_deployment_units"):
214 # 1. get the name of the docker image to start and the assigned DC
215 vnf_name
= vnfd
.get("name")
216 if vnf_name
not in self
.remote_docker_image_urls
:
217 raise Exception("No image name for %r found. Abort." % vnf_name
)
218 docker_name
= self
.remote_docker_image_urls
.get(vnf_name
)
219 target_dc
= vnfd
.get("dc")
220 # 2. perform some checks to ensure we can start the container
221 assert(docker_name
is not None)
222 assert(target_dc
is not None)
223 if not self
._check
_docker
_image
_exists
(docker_name
):
224 raise Exception("Docker image %r not found. Abort." % docker_name
)
225 # 3. do the dc.startCompute(name="foobar") call to run the container
226 # TODO consider flavors, and other annotations
227 intfs
= vnfd
.get("connection_points")
228 # mgmt connection points can be skipped, this is considered to be the connection the default docker0 bridge
229 intfs
= [intf
for intf
in intfs
if 'mgmt' not in intf
['id']]
231 # use the vnf_id in the nsd as docker name
232 # so deployed containers can be easily mapped back to the nsd
233 vnf_name2id
= defaultdict(lambda: "NotExistingNode",
234 reduce(lambda x
, y
: dict(x
, **y
),
235 map(lambda d
: {d
["vnf_name"]: d
["vnf_id"]},
236 self
.nsd
["network_functions"])))
237 self
.vnf_name2docker_name
[vnf_name
] = vnf_name2id
[vnf_name
]
238 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
240 LOG
.info("Starting %r as %r in DC %r" % (vnf_name
, self
.vnf_name2docker_name
[vnf_name
], vnfd
.get("dc")))
241 LOG
.debug("Interfaces for %r: %r" % (vnf_name
, intfs
))
242 vnfi
= target_dc
.startCompute(self
.vnf_name2docker_name
[vnf_name
], network
=intfs
, image
=docker_name
, flavor_name
="small")
245 def _get_vnf_instance(self
, instance_uuid
, name
):
247 Returns the Docker object for the given VNF name (or Docker name).
248 :param instance_uuid: UUID of the service instance to search in.
249 :param name: VNF name or Docker name. We are fuzzy here.
253 if name
in self
.vnf_name2docker_name
:
254 dn
= self
.vnf_name2docker_name
[name
]
255 for vnfi
in self
.instances
[instance_uuid
]["vnf_instances"]:
258 LOG
.warning("No container with name: %r found.")
262 def _vnf_reconfigure_network(vnfi
, if_name
, net_str
):
264 Reconfigure the network configuration of a specific interface
265 of a running container.
266 :param vnfi: container instacne
267 :param if_name: interface name
268 :param net_str: network configuration string, e.g., 1.2.3.4/24
271 intf
= vnfi
.intf(intf
=if_name
)
274 LOG
.debug("Reconfigured network of %s:%s to %r" % (vnfi
.name
, if_name
, net_str
))
276 LOG
.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi
.name
, if_name
))
279 def _trigger_emulator_start_scripts_in_vnfis(self
, vnfi_list
):
280 for vnfi
in vnfi_list
:
281 config
= vnfi
.dcinfo
.get("Config", dict())
282 env
= config
.get("Env", list())
284 if "SON_EMU_CMD=" in env_var
:
285 cmd
= str(env_var
.split("=")[1])
286 LOG
.info("Executing entrypoint script in %r: %r" % (vnfi
.name
, cmd
))
289 def _unpack_service_package(self
):
291 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
293 LOG
.info("Unzipping: %r" % self
.package_file_path
)
294 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
295 z
.extractall(self
.package_content_path
)
298 def _load_package_descriptor(self
):
300 Load the main package descriptor YAML and keep it as dict.
303 self
.manifest
= load_yaml(
305 self
.package_content_path
, "META-INF/MANIFEST.MF"))
309 Load the entry NSD YAML and keep it as dict.
312 if "entry_service_template" in self
.manifest
:
313 nsd_path
= os
.path
.join(
314 self
.package_content_path
,
315 make_relative_path(self
.manifest
.get("entry_service_template")))
316 self
.nsd
= load_yaml(nsd_path
)
317 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("name"))
319 def _load_vnfd(self
):
321 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
324 if "package_content" in self
.manifest
:
325 for pc
in self
.manifest
.get("package_content"):
326 if pc
.get("content-type") == "application/sonata.function_descriptor":
327 vnfd_path
= os
.path
.join(
328 self
.package_content_path
,
329 make_relative_path(pc
.get("name")))
330 vnfd
= load_yaml(vnfd_path
)
331 self
.vnfds
[vnfd
.get("name")] = vnfd
332 LOG
.debug("Loaded VNFD: %r" % vnfd
.get("name"))
334 def _load_docker_files(self
):
336 Get all paths to Dockerfiles from VNFDs and store them in dict.
339 for k
, v
in self
.vnfds
.iteritems():
340 for vu
in v
.get("virtual_deployment_units"):
341 if vu
.get("vm_image_format") == "docker":
342 vm_image
= vu
.get("vm_image")
343 docker_path
= os
.path
.join(
344 self
.package_content_path
,
345 make_relative_path(vm_image
))
346 self
.local_docker_files
[k
] = docker_path
347 LOG
.debug("Found Dockerfile (%r): %r" % (k
, docker_path
))
349 def _load_docker_urls(self
):
351 Get all URLs to pre-build docker images in some repo.
354 for k
, v
in self
.vnfds
.iteritems():
355 for vu
in v
.get("virtual_deployment_units"):
356 if vu
.get("vm_image_format") == "docker":
357 url
= vu
.get("vm_image")
359 url
= url
.replace("http://", "")
360 self
.remote_docker_image_urls
[k
] = url
361 LOG
.debug("Found Docker image URL (%r): %r" % (k
, self
.remote_docker_image_urls
[k
]))
363 def _build_images_from_dockerfiles(self
):
365 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
367 if GK_STANDALONE_MODE
:
368 return # do not build anything in standalone mode
370 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(self
.local_docker_files
))
371 for k
, v
in self
.local_docker_files
.iteritems():
372 for line
in dc
.build(path
=v
.replace("Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
373 LOG
.debug("DOCKER BUILD: %s" % line
)
374 LOG
.info("Docker image created: %s" % k
)
376 def _pull_predefined_dockerimages(self
):
378 If the package contains URLs to pre-build Docker images, we download them with this method.
381 for url
in self
.remote_docker_image_urls
.itervalues():
382 if not FORCE_PULL
: # only pull if not present (speedup for development)
383 if len(dc
.images(name
=url
)) > 0:
384 LOG
.debug("Image %r present. Skipping pull." % url
)
386 LOG
.info("Pulling image: %r" % url
)
388 insecure_registry
=True)
390 def _check_docker_image_exists(self
, image_name
):
392 Query the docker service and check if the given image exists
393 :param image_name: name of the docker image
396 return len(DockerClient().images(image_name
)) > 0
398 def _calculate_placement(self
, algorithm
):
400 Do placement by adding the a field "dc" to
401 each VNFD that points to one of our
402 data center objects known to the gatekeeper.
404 assert(len(self
.vnfds
) > 0)
405 assert(len(GK
.dcs
) > 0)
406 # instantiate algorithm an place
408 p
.place(self
.nsd
, self
.vnfds
, GK
.dcs
)
409 LOG
.info("Using placement algorithm: %r" % p
.__class
__.__name
__)
410 # lets print the placement result
411 for name
, vnfd
in self
.vnfds
.iteritems():
412 LOG
.info("Placed VNF %r on DC %r" % (name
, str(vnfd
.get("dc"))))
416 Some (simple) placement algorithms
420 class FirstDcPlacement(object):
422 Placement: Always use one and the same data center from the GK.dcs dict.
424 def place(self
, nsd
, vnfds
, dcs
):
425 for name
, vnfd
in vnfds
.iteritems():
426 vnfd
["dc"] = list(dcs
.itervalues())[0]
430 Resource definitions and API endpoints
434 class Packages(fr
.Resource
):
438 Upload a *.son service package to the dummy gatekeeper.
440 We expect request with a *.son file and store it in UPLOAD_FOLDER
446 # lets search for the package in the request
447 if "package" in request
.files
:
448 son_file
= request
.files
["package"]
449 # elif "file" in request.files:
450 # son_file = request.files["file"]
452 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
453 # generate a uuid to reference this package
454 service_uuid
= str(uuid
.uuid4())
455 file_hash
= hashlib
.sha1(str(son_file
)).hexdigest()
456 # ensure that upload folder exists
457 ensure_dir(UPLOAD_FOLDER
)
458 upload_path
= os
.path
.join(UPLOAD_FOLDER
, "%s.son" % service_uuid
)
459 # store *.son file to disk
460 son_file
.save(upload_path
)
461 size
= os
.path
.getsize(upload_path
)
462 # create a service object and register it
463 s
= Service(service_uuid
, file_hash
, upload_path
)
464 GK
.register_service_package(service_uuid
, s
)
465 # generate the JSON result
466 return {"service_uuid": service_uuid
, "size": size
, "sha1": file_hash
, "error": None}
467 except Exception as ex
:
468 LOG
.exception("Service package upload failed:")
469 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
473 Return a list of UUID's of uploaded service packages.
476 LOG
.info("GET /packages")
477 return {"service_uuid_list": list(GK
.services
.iterkeys())}
480 class Instantiations(fr
.Resource
):
484 Instantiate a service specified by its UUID.
485 Will return a new UUID to identify the running service instance.
488 # try to extract the service uuid from the request
489 json_data
= request
.get_json(force
=True)
490 service_uuid
= json_data
.get("service_uuid")
492 # lets be a bit fuzzy here to make testing easier
493 if service_uuid
is None and len(GK
.services
) > 0:
494 # if we don't get a service uuid, we simple start the first service in the list
495 service_uuid
= list(GK
.services
.iterkeys())[0]
497 if service_uuid
in GK
.services
:
498 # ok, we have a service uuid, lets start the service
499 service_instance_uuid
= GK
.services
.get(service_uuid
).start_service()
500 return {"service_instance_uuid": service_instance_uuid
}
501 return "Service not found", 404
505 Returns a list of UUIDs containing all running services.
508 LOG
.info("GET /instantiations")
509 return {"service_instantiations_list": [
510 list(s
.instances
.iterkeys()) for s
in GK
.services
.itervalues()]}
513 # create a single, global GK object
516 app
= Flask(__name__
)
517 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
520 api
.add_resource(Packages
, '/packages')
521 api
.add_resource(Instantiations
, '/instantiations')
524 def start_rest_api(host
, port
, datacenters
=dict()):
526 # start the Flask server (not the best performance but ok for our use case)
530 use_reloader
=False # this is needed to run Flask in a non-main thread
534 def ensure_dir(name
):
535 if not os
.path
.exists(name
):
540 with
open(path
, "r") as f
:
543 except yaml
.YAMLError
as exc
:
544 LOG
.exception("YAML parse error")
549 def make_relative_path(path
):
550 if path
.startswith("file://"):
551 path
= path
.replace("file://", "", 1)
552 if path
.startswith("/"):
553 path
= path
.replace("/", "", 1)
557 def generate_subnet_strings(n
, start
=1, subnet_size
=24, ip
=0):
559 Helper to generate different network configuration strings.
562 for i
in range(start
, start
+ n
):
563 r
.append("%d.0.0.%d/%d" % (i
, ip
, subnet_size
))
567 if __name__
== '__main__':
569 Lets allow to run the API in standalone mode.
571 GK_STANDALONE_MODE
= True
572 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
573 start_rest_api("0.0.0.0", 8000)