2 Copyright (c) 2015 SONATA-NFV and Paderborn University
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
9 http://www.apache.org/licenses/LICENSE-2.0
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
42 from docker
import Client
as DockerClient
43 from flask
import Flask
, request
44 import flask_restful
as fr
45 from collections
import defaultdict
49 LOG
= logging
.getLogger("sonata-dummy-gatekeeper")
50 LOG
.setLevel(logging
.DEBUG
)
51 logging
.getLogger("werkzeug").setLevel(logging
.WARNING
)
53 GK_STORAGE
= "/tmp/son-dummy-gk/"
54 UPLOAD_FOLDER
= os
.path
.join(GK_STORAGE
, "uploads/")
55 CATALOG_FOLDER
= os
.path
.join(GK_STORAGE
, "catalog/")
57 # Enable Dockerfile build functionality
58 BUILD_DOCKERFILE
= False
60 # flag to indicate that we run without the emulator (only the bare API for integration testing)
61 GK_STANDALONE_MODE
= False
63 # should a new version of an image be pulled even if its available
66 # Automatically deploy SAPs (endpoints) of the service as new containers
69 class Gatekeeper(object):
72 self
.services
= dict()
74 self
.vnf_counter
= 0 # used to generate short names for VNFs (Mininet limitation)
75 LOG
.info("Create SONATA dummy gatekeeper.")
77 def register_service_package(self
, service_uuid
, service
):
79 register new service package
83 self
.services
[service_uuid
] = service
84 # lets perform all steps needed to onboard the service
87 def get_next_vnf_name(self
):
89 return "vnf%d" % self
.vnf_counter
92 class Service(object):
94 This class represents a NS uploaded as a *.son package to the
96 Can have multiple running instances of this service.
103 self
.uuid
= service_uuid
104 self
.package_file_hash
= package_file_hash
105 self
.package_file_path
= package_file_path
106 self
.package_content_path
= os
.path
.join(CATALOG_FOLDER
, "services/%s" % self
.uuid
)
110 self
.local_docker_files
= dict()
111 self
.remote_docker_image_urls
= dict()
112 self
.instances
= dict()
113 self
.vnf_name2docker_name
= dict()
114 self
.sap_identifiers
= set()
115 # lets generate a set of subnet configurations used for e-line chaining setup
116 self
.eline_subnets_src
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=1)
117 self
.eline_subnets_dst
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=2)
122 Do all steps to prepare this service to be instantiated
125 # 1. extract the contents of the package and store them in our catalog
126 self
._unpack
_service
_package
()
127 # 2. read in all descriptor files
128 self
._load
_package
_descriptor
()
133 # 3. prepare container images (e.g. download or build Dockerfile)
135 self
._load
_docker
_files
()
136 self
._build
_images
_from
_dockerfiles
()
138 self
._load
_docker
_urls
()
139 self
._pull
_predefined
_dockerimages
()
140 LOG
.info("On-boarded service: %r" % self
.manifest
.get("name"))
142 def start_service(self
):
144 This methods creates and starts a new service instance.
145 It computes placements, iterates over all VNFDs, and starts
146 each VNFD as a Docker container in the data center selected
147 by the placement algorithm.
150 LOG
.info("Starting service %r" % self
.uuid
)
152 # 1. each service instance gets a new uuid to identify it
153 instance_uuid
= str(uuid
.uuid4())
154 # build a instances dict (a bit like a NSR :))
155 self
.instances
[instance_uuid
] = dict()
156 self
.instances
[instance_uuid
]["vnf_instances"] = list()
158 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
159 vnf_id2vnf_name
= defaultdict(lambda: "NotExistingNode",
160 reduce(lambda x
, y
: dict(x
, **y
),
161 map(lambda d
: {d
["vnf_id"]: d
["vnf_name"]},
162 self
.nsd
["network_functions"])))
164 # 3. compute placement of this service instance (adds DC names to VNFDs)
165 if not GK_STANDALONE_MODE
:
166 self
._calculate
_placement
(FirstDcPlacement
)
167 # iterate over all vnfds that we have to start
168 for vnfd
in self
.vnfds
.itervalues():
170 if not GK_STANDALONE_MODE
:
171 vnfi
= self
._start
_vnfd
(vnfd
)
172 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
174 vlinks
= self
.nsd
["virtual_links"]
175 fwd_links
= self
.nsd
["forwarding_graphs"][0]["constituent_virtual_links"]
176 eline_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-Line")]
177 elan_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-LAN")]
179 # 4a. deploy E-Line links
180 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
181 # eg. different services get a unique cookie for their flowrules
183 for link
in eline_fwd_links
:
184 src_id
, src_if_name
= link
["connection_points_reference"][0].split(":")
185 dst_id
, dst_if_name
= link
["connection_points_reference"][1].split(":")
187 # check if there is a SAP in the link
188 if src_id
in self
.sap_identifiers
:
189 src_docker_name
= "{0}_{1}".format(src_id
, src_if_name
)
190 src_id
= src_docker_name
192 src_docker_name
= src_id
194 if dst_id
in self
.sap_identifiers
:
195 dst_docker_name
= "{0}_{1}".format(dst_id
, dst_if_name
)
196 dst_id
= dst_docker_name
198 dst_docker_name
= dst_id
200 src_name
= vnf_id2vnf_name
[src_id
]
201 dst_name
= vnf_id2vnf_name
[dst_id
]
204 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
205 src_name
, src_id
, src_if_name
, dst_name
, dst_id
, dst_if_name
))
207 if (src_name
in self
.vnfds
) and (dst_name
in self
.vnfds
):
208 network
= self
.vnfds
[src_name
].get("dc").net
# there should be a cleaner way to find the DCNetwork
209 LOG
.debug(src_docker_name
)
210 ret
= network
.setChain(
211 src_docker_name
, dst_docker_name
,
212 vnf_src_interface
=src_if_name
, vnf_dst_interface
=dst_if_name
,
213 bidirectional
=True, cmd
="add-flow", cookie
=cookie
, priority
=10)
215 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
216 src_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, src_name
)
217 if src_vnfi
is not None:
218 self
._vnf
_reconfigure
_network
(src_vnfi
, src_if_name
, self
.eline_subnets_src
.pop(0))
219 dst_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, dst_name
)
220 if dst_vnfi
is not None:
221 self
._vnf
_reconfigure
_network
(dst_vnfi
, dst_if_name
, self
.eline_subnets_dst
.pop(0))
223 # 4b. deploy E-LAN links
225 for link
in elan_fwd_links
:
226 # generate lan ip address
228 for intf
in link
["connection_points_reference"]:
229 ip_address
= generate_lan_string("10.0", base
, subnet_size
=24, ip
=ip
)
230 vnf_id
, intf_name
= intf
.split(":")
231 if vnf_id
in self
.sap_identifiers
:
232 src_docker_name
= "{0}_{1}".format(vnf_id
, intf_name
)
233 vnf_id
= src_docker_name
234 vnf_name
= vnf_id2vnf_name
[vnf_id
]
236 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
237 vnf_name
, vnf_id
, intf_name
, ip_address
))
239 if vnf_name
in self
.vnfds
:
240 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
241 # E-LAN relies on the learning switch capability of the infrastructure switch in dockernet,
242 # so no explicit chaining is necessary
243 vnfi
= self
._get
_vnf
_instance
(instance_uuid
, vnf_name
)
245 self
._vnf
_reconfigure
_network
(vnfi
, intf_name
, ip_address
)
246 # increase for the next ip address on this E-LAN
248 # increase the base ip address for the next E-LAN
251 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
252 self
._trigger
_emulator
_start
_scripts
_in
_vnfis
(self
.instances
[instance_uuid
]["vnf_instances"])
254 LOG
.info("Service started. Instance id: %r" % instance_uuid
)
257 def _start_vnfd(self
, vnfd
):
259 Start a single VNFD of this service
260 :param vnfd: vnfd descriptor dict
263 # iterate over all deployment units within each VNFDs
264 for u
in vnfd
.get("virtual_deployment_units"):
265 # 1. get the name of the docker image to start and the assigned DC
266 vnf_name
= vnfd
.get("name")
267 if vnf_name
not in self
.remote_docker_image_urls
:
268 raise Exception("No image name for %r found. Abort." % vnf_name
)
269 docker_name
= self
.remote_docker_image_urls
.get(vnf_name
)
270 target_dc
= vnfd
.get("dc")
271 # 2. perform some checks to ensure we can start the container
272 assert(docker_name
is not None)
273 assert(target_dc
is not None)
274 if not self
._check
_docker
_image
_exists
(docker_name
):
275 raise Exception("Docker image %r not found. Abort." % docker_name
)
276 # 3. do the dc.startCompute(name="foobar") call to run the container
277 # TODO consider flavors, and other annotations
278 intfs
= vnfd
.get("connection_points")
280 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
281 # use the vnf_id in the nsd as docker name
282 # so deployed containers can be easily mapped back to the nsd
283 vnf_name2id
= defaultdict(lambda: "NotExistingNode",
284 reduce(lambda x
, y
: dict(x
, **y
),
285 map(lambda d
: {d
["vnf_name"]: d
["vnf_id"]},
286 self
.nsd
["network_functions"])))
287 self
.vnf_name2docker_name
[vnf_name
] = vnf_name2id
[vnf_name
]
288 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
290 LOG
.info("Starting %r as %r in DC %r" % (vnf_name
, self
.vnf_name2docker_name
[vnf_name
], vnfd
.get("dc")))
291 LOG
.debug("Interfaces for %r: %r" % (vnf_name
, intfs
))
292 vnfi
= target_dc
.startCompute(self
.vnf_name2docker_name
[vnf_name
], network
=intfs
, image
=docker_name
, flavor_name
="small")
295 def _get_vnf_instance(self
, instance_uuid
, name
):
297 Returns the Docker object for the given VNF name (or Docker name).
298 :param instance_uuid: UUID of the service instance to search in.
299 :param name: VNF name or Docker name. We are fuzzy here.
303 if name
in self
.vnf_name2docker_name
:
304 dn
= self
.vnf_name2docker_name
[name
]
305 for vnfi
in self
.instances
[instance_uuid
]["vnf_instances"]:
308 LOG
.warning("No container with name: %r found.")
312 def _vnf_reconfigure_network(vnfi
, if_name
, net_str
):
314 Reconfigure the network configuration of a specific interface
315 of a running container.
316 :param vnfi: container instacne
317 :param if_name: interface name
318 :param net_str: network configuration string, e.g., 1.2.3.4/24
321 intf
= vnfi
.intf(intf
=if_name
)
324 LOG
.debug("Reconfigured network of %s:%s to %r" % (vnfi
.name
, if_name
, net_str
))
326 LOG
.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi
.name
, if_name
))
329 def _trigger_emulator_start_scripts_in_vnfis(self
, vnfi_list
):
330 for vnfi
in vnfi_list
:
331 config
= vnfi
.dcinfo
.get("Config", dict())
332 env
= config
.get("Env", list())
334 if "SON_EMU_CMD=" in env_var
:
335 cmd
= str(env_var
.split("=")[1])
336 LOG
.info("Executing entry point script in %r: %r" % (vnfi
.name
, cmd
))
337 # execute command in new thread to ensure that GK is not blocked by VNF
338 t
= threading
.Thread(target
=vnfi
.cmdPrint
, args
=(cmd
,))
342 def _unpack_service_package(self
):
344 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
346 LOG
.info("Unzipping: %r" % self
.package_file_path
)
347 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
348 z
.extractall(self
.package_content_path
)
351 def _load_package_descriptor(self
):
353 Load the main package descriptor YAML and keep it as dict.
356 self
.manifest
= load_yaml(
358 self
.package_content_path
, "META-INF/MANIFEST.MF"))
362 Load the entry NSD YAML and keep it as dict.
365 if "entry_service_template" in self
.manifest
:
366 nsd_path
= os
.path
.join(
367 self
.package_content_path
,
368 make_relative_path(self
.manifest
.get("entry_service_template")))
369 self
.nsd
= load_yaml(nsd_path
)
370 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("name"))
372 def _load_vnfd(self
):
374 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
377 if "package_content" in self
.manifest
:
378 for pc
in self
.manifest
.get("package_content"):
379 if pc
.get("content-type") == "application/sonata.function_descriptor":
380 vnfd_path
= os
.path
.join(
381 self
.package_content_path
,
382 make_relative_path(pc
.get("name")))
383 vnfd
= load_yaml(vnfd_path
)
384 self
.vnfds
[vnfd
.get("name")] = vnfd
385 LOG
.debug("Loaded VNFD: %r" % vnfd
.get("name"))
387 def _load_saps(self
):
388 # Each Service Access Point (connection_point) in the nsd is getting its own container
389 SAPs
= [p
["id"] for p
in self
.nsd
["connection_points"] if p
["type"] == "interface"]
391 # endpoints needed in this service
392 sap_vnf_id
, sap_vnf_interface
= sap
.split(':')
393 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
394 self
.sap_identifiers
.add(sap_vnf_id
)
396 sap_docker_name
= sap
.replace(':', '_')
398 # add SAP to self.vnfds
399 sapfile
= pkg_resources
.resource_filename(__name__
, "sap_vnfd.yml")
400 sap_vnfd
= load_yaml(sapfile
)
401 sap_vnfd
["connection_points"][0]["id"] = sap_vnf_interface
402 sap_vnfd
["name"] = sap_docker_name
403 self
.vnfds
[sap_docker_name
] = sap_vnfd
404 # add SAP vnf to list in the NSD so it is deployed later on
405 # each SAP get a unique VNFD and vnf_id in the NSD
406 self
.nsd
["network_functions"].append({"vnf_id": sap_docker_name
, "vnf_name": sap_docker_name
})
407 LOG
.debug("Loaded SAP: %r" % sap_vnfd
.get("name"))
409 def _load_docker_files(self
):
411 Get all paths to Dockerfiles from VNFDs and store them in dict.
414 for k
, v
in self
.vnfds
.iteritems():
415 for vu
in v
.get("virtual_deployment_units"):
416 if vu
.get("vm_image_format") == "docker":
417 vm_image
= vu
.get("vm_image")
418 docker_path
= os
.path
.join(
419 self
.package_content_path
,
420 make_relative_path(vm_image
))
421 self
.local_docker_files
[k
] = docker_path
422 LOG
.debug("Found Dockerfile (%r): %r" % (k
, docker_path
))
424 def _load_docker_urls(self
):
426 Get all URLs to pre-build docker images in some repo.
429 for k
, v
in self
.vnfds
.iteritems():
430 for vu
in v
.get("virtual_deployment_units"):
431 if vu
.get("vm_image_format") == "docker":
432 url
= vu
.get("vm_image")
434 url
= url
.replace("http://", "")
435 self
.remote_docker_image_urls
[k
] = url
436 LOG
.debug("Found Docker image URL (%r): %r" % (k
, self
.remote_docker_image_urls
[k
]))
438 def _build_images_from_dockerfiles(self
):
440 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
442 if GK_STANDALONE_MODE
:
443 return # do not build anything in standalone mode
445 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(self
.local_docker_files
))
446 for k
, v
in self
.local_docker_files
.iteritems():
447 for line
in dc
.build(path
=v
.replace("Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
448 LOG
.debug("DOCKER BUILD: %s" % line
)
449 LOG
.info("Docker image created: %s" % k
)
451 def _pull_predefined_dockerimages(self
):
453 If the package contains URLs to pre-build Docker images, we download them with this method.
456 for url
in self
.remote_docker_image_urls
.itervalues():
457 if not FORCE_PULL
: # only pull if not present (speedup for development)
458 if len(dc
.images(name
=url
)) > 0:
459 LOG
.debug("Image %r present. Skipping pull." % url
)
461 LOG
.info("Pulling image: %r" % url
)
463 insecure_registry
=True)
465 def _check_docker_image_exists(self
, image_name
):
467 Query the docker service and check if the given image exists
468 :param image_name: name of the docker image
471 return len(DockerClient().images(image_name
)) > 0
473 def _calculate_placement(self
, algorithm
):
475 Do placement by adding the a field "dc" to
476 each VNFD that points to one of our
477 data center objects known to the gatekeeper.
479 assert(len(self
.vnfds
) > 0)
480 assert(len(GK
.dcs
) > 0)
481 # instantiate algorithm an place
483 p
.place(self
.nsd
, self
.vnfds
, GK
.dcs
)
484 LOG
.info("Using placement algorithm: %r" % p
.__class
__.__name
__)
485 # lets print the placement result
486 for name
, vnfd
in self
.vnfds
.iteritems():
487 LOG
.info("Placed VNF %r on DC %r" % (name
, str(vnfd
.get("dc"))))
491 Some (simple) placement algorithms
495 class FirstDcPlacement(object):
497 Placement: Always use one and the same data center from the GK.dcs dict.
499 def place(self
, nsd
, vnfds
, dcs
):
500 for name
, vnfd
in vnfds
.iteritems():
501 vnfd
["dc"] = list(dcs
.itervalues())[0]
505 Resource definitions and API endpoints
509 class Packages(fr
.Resource
):
513 Upload a *.son service package to the dummy gatekeeper.
515 We expect request with a *.son file and store it in UPLOAD_FOLDER
521 # lets search for the package in the request
522 if "package" in request
.files
:
523 son_file
= request
.files
["package"]
524 # elif "file" in request.files:
525 # son_file = request.files["file"]
527 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
528 # generate a uuid to reference this package
529 service_uuid
= str(uuid
.uuid4())
530 file_hash
= hashlib
.sha1(str(son_file
)).hexdigest()
531 # ensure that upload folder exists
532 ensure_dir(UPLOAD_FOLDER
)
533 upload_path
= os
.path
.join(UPLOAD_FOLDER
, "%s.son" % service_uuid
)
534 # store *.son file to disk
535 son_file
.save(upload_path
)
536 size
= os
.path
.getsize(upload_path
)
537 # create a service object and register it
538 s
= Service(service_uuid
, file_hash
, upload_path
)
539 GK
.register_service_package(service_uuid
, s
)
540 # generate the JSON result
541 return {"service_uuid": service_uuid
, "size": size
, "sha1": file_hash
, "error": None}
542 except Exception as ex
:
543 LOG
.exception("Service package upload failed:")
544 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
548 Return a list of UUID's of uploaded service packages.
551 LOG
.info("GET /packages")
552 return {"service_uuid_list": list(GK
.services
.iterkeys())}
555 class Instantiations(fr
.Resource
):
559 Instantiate a service specified by its UUID.
560 Will return a new UUID to identify the running service instance.
563 # try to extract the service uuid from the request
564 json_data
= request
.get_json(force
=True)
565 service_uuid
= json_data
.get("service_uuid")
567 # lets be a bit fuzzy here to make testing easier
568 if service_uuid
is None and len(GK
.services
) > 0:
569 # if we don't get a service uuid, we simple start the first service in the list
570 service_uuid
= list(GK
.services
.iterkeys())[0]
572 if service_uuid
in GK
.services
:
573 # ok, we have a service uuid, lets start the service
574 service_instance_uuid
= GK
.services
.get(service_uuid
).start_service()
575 return {"service_instance_uuid": service_instance_uuid
}
576 return "Service not found", 404
580 Returns a list of UUIDs containing all running services.
583 LOG
.info("GET /instantiations")
584 return {"service_instantiations_list": [
585 list(s
.instances
.iterkeys()) for s
in GK
.services
.itervalues()]}
588 # create a single, global GK object
591 app
= Flask(__name__
)
592 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
595 api
.add_resource(Packages
, '/packages')
596 api
.add_resource(Instantiations
, '/instantiations')
599 def start_rest_api(host
, port
, datacenters
=dict()):
601 # start the Flask server (not the best performance but ok for our use case)
605 use_reloader
=False # this is needed to run Flask in a non-main thread
609 def ensure_dir(name
):
610 if not os
.path
.exists(name
):
615 with
open(path
, "r") as f
:
618 except yaml
.YAMLError
as exc
:
619 LOG
.exception("YAML parse error")
624 def make_relative_path(path
):
625 if path
.startswith("file://"):
626 path
= path
.replace("file://", "", 1)
627 if path
.startswith("/"):
628 path
= path
.replace("/", "", 1)
632 def generate_lan_string(prefix
, base
, subnet_size
=24, ip
=0):
634 Helper to generate different network configuration strings.
636 r
= "%s.%d.%d/%d" % (prefix
, base
, ip
, subnet_size
)
640 def generate_subnet_strings(n
, start
=1, subnet_size
=24, ip
=0):
642 Helper to generate different network configuration strings.
645 for i
in range(start
, start
+ n
):
646 r
.append("%d.0.0.%d/%d" % (i
, ip
, subnet_size
))
650 if __name__
== '__main__':
652 Lets allow to run the API in standalone mode.
654 GK_STANDALONE_MODE
= True
655 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
656 start_rest_api("0.0.0.0", 8000)