2 Copyright (c) 2015 SONATA-NFV and Paderborn University
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
9 http://www.apache.org/licenses/LICENSE-2.0
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
42 from docker
import Client
as DockerClient
43 from flask
import Flask
, request
44 import flask_restful
as fr
45 from collections
import defaultdict
49 LOG
= logging
.getLogger("sonata-dummy-gatekeeper")
50 LOG
.setLevel(logging
.DEBUG
)
51 logging
.getLogger("werkzeug").setLevel(logging
.WARNING
)
53 GK_STORAGE
= "/tmp/son-dummy-gk/"
54 UPLOAD_FOLDER
= os
.path
.join(GK_STORAGE
, "uploads/")
55 CATALOG_FOLDER
= os
.path
.join(GK_STORAGE
, "catalog/")
57 # Enable Dockerfile build functionality
58 BUILD_DOCKERFILE
= False
60 # flag to indicate that we run without the emulator (only the bare API for integration testing)
61 GK_STANDALONE_MODE
= False
63 # should a new version of an image be pulled even if its available
66 # Automatically deploy SAPs (endpoints) of the service as new containers
67 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
70 class Gatekeeper(object):
73 self
.services
= dict()
75 self
.vnf_counter
= 0 # used to generate short names for VNFs (Mininet limitation)
76 LOG
.info("Create SONATA dummy gatekeeper.")
78 def register_service_package(self
, service_uuid
, service
):
80 register new service package
84 self
.services
[service_uuid
] = service
85 # lets perform all steps needed to onboard the service
88 def get_next_vnf_name(self
):
90 return "vnf%d" % self
.vnf_counter
93 class Service(object):
95 This class represents a NS uploaded as a *.son package to the
97 Can have multiple running instances of this service.
104 self
.uuid
= service_uuid
105 self
.package_file_hash
= package_file_hash
106 self
.package_file_path
= package_file_path
107 self
.package_content_path
= os
.path
.join(CATALOG_FOLDER
, "services/%s" % self
.uuid
)
111 self
.local_docker_files
= dict()
112 self
.remote_docker_image_urls
= dict()
113 self
.instances
= dict()
114 self
.vnf_name2docker_name
= dict()
115 self
.sap_identifiers
= set()
116 # lets generate a set of subnet configurations used for e-line chaining setup
117 self
.eline_subnets_src
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=1)
118 self
.eline_subnets_dst
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=2)
123 Do all steps to prepare this service to be instantiated
126 # 1. extract the contents of the package and store them in our catalog
127 self
._unpack
_service
_package
()
128 # 2. read in all descriptor files
129 self
._load
_package
_descriptor
()
134 # 3. prepare container images (e.g. download or build Dockerfile)
136 self
._load
_docker
_files
()
137 self
._build
_images
_from
_dockerfiles
()
139 self
._load
_docker
_urls
()
140 self
._pull
_predefined
_dockerimages
()
141 LOG
.info("On-boarded service: %r" % self
.manifest
.get("name"))
143 def start_service(self
):
145 This methods creates and starts a new service instance.
146 It computes placements, iterates over all VNFDs, and starts
147 each VNFD as a Docker container in the data center selected
148 by the placement algorithm.
151 LOG
.info("Starting service %r" % self
.uuid
)
153 # 1. each service instance gets a new uuid to identify it
154 instance_uuid
= str(uuid
.uuid4())
155 # build a instances dict (a bit like a NSR :))
156 self
.instances
[instance_uuid
] = dict()
157 self
.instances
[instance_uuid
]["vnf_instances"] = list()
159 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
160 vnf_id2vnf_name
= defaultdict(lambda: "NotExistingNode",
161 reduce(lambda x
, y
: dict(x
, **y
),
162 map(lambda d
: {d
["vnf_id"]: d
["vnf_name"]},
163 self
.nsd
["network_functions"])))
165 # 3. compute placement of this service instance (adds DC names to VNFDs)
166 if not GK_STANDALONE_MODE
:
167 #self._calculate_placement(FirstDcPlacement)
168 self
._calculate
_placement
(RoundRobinDcPlacement
)
169 # iterate over all vnfds that we have to start
170 for vnfd
in self
.vnfds
.itervalues():
172 if not GK_STANDALONE_MODE
:
173 vnfi
= self
._start
_vnfd
(vnfd
)
174 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
176 vlinks
= self
.nsd
["virtual_links"]
177 fwd_links
= self
.nsd
["forwarding_graphs"][0]["constituent_virtual_links"]
178 eline_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-Line")]
179 elan_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-LAN")]
181 # 4a. deploy E-Line links
182 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
183 # eg. different services get a unique cookie for their flowrules
185 for link
in eline_fwd_links
:
186 src_id
, src_if_name
= link
["connection_points_reference"][0].split(":")
187 dst_id
, dst_if_name
= link
["connection_points_reference"][1].split(":")
189 # check if there is a SAP in the link
190 if src_id
in self
.sap_identifiers
:
191 src_docker_name
= "{0}_{1}".format(src_id
, src_if_name
)
192 src_id
= src_docker_name
194 src_docker_name
= src_id
196 if dst_id
in self
.sap_identifiers
:
197 dst_docker_name
= "{0}_{1}".format(dst_id
, dst_if_name
)
198 dst_id
= dst_docker_name
200 dst_docker_name
= dst_id
202 src_name
= vnf_id2vnf_name
[src_id
]
203 dst_name
= vnf_id2vnf_name
[dst_id
]
206 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
207 src_name
, src_id
, src_if_name
, dst_name
, dst_id
, dst_if_name
))
209 if (src_name
in self
.vnfds
) and (dst_name
in self
.vnfds
):
210 network
= self
.vnfds
[src_name
].get("dc").net
# there should be a cleaner way to find the DCNetwork
211 LOG
.debug(src_docker_name
)
212 ret
= network
.setChain(
213 src_docker_name
, dst_docker_name
,
214 vnf_src_interface
=src_if_name
, vnf_dst_interface
=dst_if_name
,
215 bidirectional
=True, cmd
="add-flow", cookie
=cookie
, priority
=10)
217 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
218 src_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, src_name
)
219 if src_vnfi
is not None:
220 self
._vnf
_reconfigure
_network
(src_vnfi
, src_if_name
, self
.eline_subnets_src
.pop(0))
221 dst_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, dst_name
)
222 if dst_vnfi
is not None:
223 self
._vnf
_reconfigure
_network
(dst_vnfi
, dst_if_name
, self
.eline_subnets_dst
.pop(0))
225 # 4b. deploy E-LAN links
227 for link
in elan_fwd_links
:
228 # generate lan ip address
230 for intf
in link
["connection_points_reference"]:
231 ip_address
= generate_lan_string("10.0", base
, subnet_size
=24, ip
=ip
)
232 vnf_id
, intf_name
= intf
.split(":")
233 if vnf_id
in self
.sap_identifiers
:
234 src_docker_name
= "{0}_{1}".format(vnf_id
, intf_name
)
235 vnf_id
= src_docker_name
236 vnf_name
= vnf_id2vnf_name
[vnf_id
]
238 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
239 vnf_name
, vnf_id
, intf_name
, ip_address
))
241 if vnf_name
in self
.vnfds
:
242 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
243 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
244 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
245 vnfi
= self
._get
_vnf
_instance
(instance_uuid
, vnf_name
)
247 self
._vnf
_reconfigure
_network
(vnfi
, intf_name
, ip_address
)
248 # increase for the next ip address on this E-LAN
250 # increase the base ip address for the next E-LAN
253 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
254 self
._trigger
_emulator
_start
_scripts
_in
_vnfis
(self
.instances
[instance_uuid
]["vnf_instances"])
256 LOG
.info("Service started. Instance id: %r" % instance_uuid
)
259 def _start_vnfd(self
, vnfd
):
261 Start a single VNFD of this service
262 :param vnfd: vnfd descriptor dict
265 # iterate over all deployment units within each VNFDs
266 for u
in vnfd
.get("virtual_deployment_units"):
267 # 1. get the name of the docker image to start and the assigned DC
268 vnf_name
= vnfd
.get("name")
269 if vnf_name
not in self
.remote_docker_image_urls
:
270 raise Exception("No image name for %r found. Abort." % vnf_name
)
271 docker_name
= self
.remote_docker_image_urls
.get(vnf_name
)
272 target_dc
= vnfd
.get("dc")
273 # 2. perform some checks to ensure we can start the container
274 assert(docker_name
is not None)
275 assert(target_dc
is not None)
276 if not self
._check
_docker
_image
_exists
(docker_name
):
277 raise Exception("Docker image %r not found. Abort." % docker_name
)
278 # 3. do the dc.startCompute(name="foobar") call to run the container
279 # TODO consider flavors, and other annotations
280 intfs
= vnfd
.get("connection_points")
282 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
283 # use the vnf_id in the nsd as docker name
284 # so deployed containers can be easily mapped back to the nsd
285 vnf_name2id
= defaultdict(lambda: "NotExistingNode",
286 reduce(lambda x
, y
: dict(x
, **y
),
287 map(lambda d
: {d
["vnf_name"]: d
["vnf_id"]},
288 self
.nsd
["network_functions"])))
289 self
.vnf_name2docker_name
[vnf_name
] = vnf_name2id
[vnf_name
]
290 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
292 LOG
.info("Starting %r as %r in DC %r" % (vnf_name
, self
.vnf_name2docker_name
[vnf_name
], vnfd
.get("dc")))
293 LOG
.debug("Interfaces for %r: %r" % (vnf_name
, intfs
))
294 vnfi
= target_dc
.startCompute(self
.vnf_name2docker_name
[vnf_name
], network
=intfs
, image
=docker_name
, flavor_name
="small")
297 def _get_vnf_instance(self
, instance_uuid
, name
):
299 Returns the Docker object for the given VNF name (or Docker name).
300 :param instance_uuid: UUID of the service instance to search in.
301 :param name: VNF name or Docker name. We are fuzzy here.
305 if name
in self
.vnf_name2docker_name
:
306 dn
= self
.vnf_name2docker_name
[name
]
307 for vnfi
in self
.instances
[instance_uuid
]["vnf_instances"]:
310 LOG
.warning("No container with name: %r found.")
314 def _vnf_reconfigure_network(vnfi
, if_name
, net_str
):
316 Reconfigure the network configuration of a specific interface
317 of a running container.
318 :param vnfi: container instacne
319 :param if_name: interface name
320 :param net_str: network configuration string, e.g., 1.2.3.4/24
323 intf
= vnfi
.intf(intf
=if_name
)
326 LOG
.debug("Reconfigured network of %s:%s to %r" % (vnfi
.name
, if_name
, net_str
))
328 LOG
.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi
.name
, if_name
))
331 def _trigger_emulator_start_scripts_in_vnfis(self
, vnfi_list
):
332 for vnfi
in vnfi_list
:
333 config
= vnfi
.dcinfo
.get("Config", dict())
334 env
= config
.get("Env", list())
336 if "SON_EMU_CMD=" in env_var
:
337 cmd
= str(env_var
.split("=")[1])
338 LOG
.info("Executing entry point script in %r: %r" % (vnfi
.name
, cmd
))
339 # execute command in new thread to ensure that GK is not blocked by VNF
340 t
= threading
.Thread(target
=vnfi
.cmdPrint
, args
=(cmd
,))
344 def _unpack_service_package(self
):
346 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
348 LOG
.info("Unzipping: %r" % self
.package_file_path
)
349 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
350 z
.extractall(self
.package_content_path
)
353 def _load_package_descriptor(self
):
355 Load the main package descriptor YAML and keep it as dict.
358 self
.manifest
= load_yaml(
360 self
.package_content_path
, "META-INF/MANIFEST.MF"))
364 Load the entry NSD YAML and keep it as dict.
367 if "entry_service_template" in self
.manifest
:
368 nsd_path
= os
.path
.join(
369 self
.package_content_path
,
370 make_relative_path(self
.manifest
.get("entry_service_template")))
371 self
.nsd
= load_yaml(nsd_path
)
372 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("name"))
374 def _load_vnfd(self
):
376 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
379 if "package_content" in self
.manifest
:
380 for pc
in self
.manifest
.get("package_content"):
381 if pc
.get("content-type") == "application/sonata.function_descriptor":
382 vnfd_path
= os
.path
.join(
383 self
.package_content_path
,
384 make_relative_path(pc
.get("name")))
385 vnfd
= load_yaml(vnfd_path
)
386 self
.vnfds
[vnfd
.get("name")] = vnfd
387 LOG
.debug("Loaded VNFD: %r" % vnfd
.get("name"))
389 def _load_saps(self
):
390 # Each Service Access Point (connection_point) in the nsd is getting its own container
391 SAPs
= [p
["id"] for p
in self
.nsd
["connection_points"] if p
["type"] == "interface"]
393 # endpoints needed in this service
394 sap_vnf_id
, sap_vnf_interface
= sap
.split(':')
395 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
396 self
.sap_identifiers
.add(sap_vnf_id
)
398 sap_docker_name
= sap
.replace(':', '_')
400 # add SAP to self.vnfds
401 sapfile
= pkg_resources
.resource_filename(__name__
, "sap_vnfd.yml")
402 sap_vnfd
= load_yaml(sapfile
)
403 sap_vnfd
["connection_points"][0]["id"] = sap_vnf_interface
404 sap_vnfd
["name"] = sap_docker_name
405 self
.vnfds
[sap_docker_name
] = sap_vnfd
406 # add SAP vnf to list in the NSD so it is deployed later on
407 # each SAP get a unique VNFD and vnf_id in the NSD
408 self
.nsd
["network_functions"].append({"vnf_id": sap_docker_name
, "vnf_name": sap_docker_name
})
409 LOG
.debug("Loaded SAP: %r" % sap_vnfd
.get("name"))
411 def _load_docker_files(self
):
413 Get all paths to Dockerfiles from VNFDs and store them in dict.
416 for k
, v
in self
.vnfds
.iteritems():
417 for vu
in v
.get("virtual_deployment_units"):
418 if vu
.get("vm_image_format") == "docker":
419 vm_image
= vu
.get("vm_image")
420 docker_path
= os
.path
.join(
421 self
.package_content_path
,
422 make_relative_path(vm_image
))
423 self
.local_docker_files
[k
] = docker_path
424 LOG
.debug("Found Dockerfile (%r): %r" % (k
, docker_path
))
426 def _load_docker_urls(self
):
428 Get all URLs to pre-build docker images in some repo.
431 for k
, v
in self
.vnfds
.iteritems():
432 for vu
in v
.get("virtual_deployment_units"):
433 if vu
.get("vm_image_format") == "docker":
434 url
= vu
.get("vm_image")
436 url
= url
.replace("http://", "")
437 self
.remote_docker_image_urls
[k
] = url
438 LOG
.debug("Found Docker image URL (%r): %r" % (k
, self
.remote_docker_image_urls
[k
]))
440 def _build_images_from_dockerfiles(self
):
442 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
444 if GK_STANDALONE_MODE
:
445 return # do not build anything in standalone mode
447 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(self
.local_docker_files
))
448 for k
, v
in self
.local_docker_files
.iteritems():
449 for line
in dc
.build(path
=v
.replace("Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
450 LOG
.debug("DOCKER BUILD: %s" % line
)
451 LOG
.info("Docker image created: %s" % k
)
453 def _pull_predefined_dockerimages(self
):
455 If the package contains URLs to pre-build Docker images, we download them with this method.
458 for url
in self
.remote_docker_image_urls
.itervalues():
459 if not FORCE_PULL
: # only pull if not present (speedup for development)
460 if len(dc
.images(name
=url
)) > 0:
461 LOG
.debug("Image %r present. Skipping pull." % url
)
463 LOG
.info("Pulling image: %r" % url
)
465 insecure_registry
=True)
467 def _check_docker_image_exists(self
, image_name
):
469 Query the docker service and check if the given image exists
470 :param image_name: name of the docker image
473 return len(DockerClient().images(image_name
)) > 0
475 def _calculate_placement(self
, algorithm
):
477 Do placement by adding the a field "dc" to
478 each VNFD that points to one of our
479 data center objects known to the gatekeeper.
481 assert(len(self
.vnfds
) > 0)
482 assert(len(GK
.dcs
) > 0)
483 # instantiate algorithm an place
485 p
.place(self
.nsd
, self
.vnfds
, GK
.dcs
)
486 LOG
.info("Using placement algorithm: %r" % p
.__class
__.__name
__)
487 # lets print the placement result
488 for name
, vnfd
in self
.vnfds
.iteritems():
489 LOG
.info("Placed VNF %r on DC %r" % (name
, str(vnfd
.get("dc"))))
493 Some (simple) placement algorithms
497 class FirstDcPlacement(object):
499 Placement: Always use one and the same data center from the GK.dcs dict.
501 def place(self
, nsd
, vnfds
, dcs
):
502 for name
, vnfd
in vnfds
.iteritems():
503 vnfd
["dc"] = list(dcs
.itervalues())[0]
506 class RoundRobinDcPlacement(object):
508 Placement: Distribute VNFs across all available DCs in a round robin fashion.
511 def place(self
, nsd
, vnfds
, dcs
):
513 dcs_list
= list(dcs
.itervalues())
514 for name
, vnfd
in vnfds
.iteritems():
515 vnfd
["dc"] = dcs_list
[c
% len(dcs_list
)]
516 c
+= 1 # inc. c to use next DC
522 Resource definitions and API endpoints
526 class Packages(fr
.Resource
):
530 Upload a *.son service package to the dummy gatekeeper.
532 We expect request with a *.son file and store it in UPLOAD_FOLDER
538 # lets search for the package in the request
539 if "package" in request
.files
:
540 son_file
= request
.files
["package"]
541 # elif "file" in request.files:
542 # son_file = request.files["file"]
544 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
545 # generate a uuid to reference this package
546 service_uuid
= str(uuid
.uuid4())
547 file_hash
= hashlib
.sha1(str(son_file
)).hexdigest()
548 # ensure that upload folder exists
549 ensure_dir(UPLOAD_FOLDER
)
550 upload_path
= os
.path
.join(UPLOAD_FOLDER
, "%s.son" % service_uuid
)
551 # store *.son file to disk
552 son_file
.save(upload_path
)
553 size
= os
.path
.getsize(upload_path
)
554 # create a service object and register it
555 s
= Service(service_uuid
, file_hash
, upload_path
)
556 GK
.register_service_package(service_uuid
, s
)
557 # generate the JSON result
558 return {"service_uuid": service_uuid
, "size": size
, "sha1": file_hash
, "error": None}
559 except Exception as ex
:
560 LOG
.exception("Service package upload failed:")
561 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
565 Return a list of UUID's of uploaded service packages.
568 LOG
.info("GET /packages")
569 return {"service_uuid_list": list(GK
.services
.iterkeys())}
572 class Instantiations(fr
.Resource
):
576 Instantiate a service specified by its UUID.
577 Will return a new UUID to identify the running service instance.
580 # try to extract the service uuid from the request
581 json_data
= request
.get_json(force
=True)
582 service_uuid
= json_data
.get("service_uuid")
584 # lets be a bit fuzzy here to make testing easier
585 if service_uuid
is None and len(GK
.services
) > 0:
586 # if we don't get a service uuid, we simple start the first service in the list
587 service_uuid
= list(GK
.services
.iterkeys())[0]
589 if service_uuid
in GK
.services
:
590 # ok, we have a service uuid, lets start the service
591 service_instance_uuid
= GK
.services
.get(service_uuid
).start_service()
592 return {"service_instance_uuid": service_instance_uuid
}
593 return "Service not found", 404
597 Returns a list of UUIDs containing all running services.
600 LOG
.info("GET /instantiations")
601 return {"service_instantiations_list": [
602 list(s
.instances
.iterkeys()) for s
in GK
.services
.itervalues()]}
605 # create a single, global GK object
608 app
= Flask(__name__
)
609 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
612 api
.add_resource(Packages
, '/packages')
613 api
.add_resource(Instantiations
, '/instantiations')
616 def start_rest_api(host
, port
, datacenters
=dict()):
618 # start the Flask server (not the best performance but ok for our use case)
622 use_reloader
=False # this is needed to run Flask in a non-main thread
626 def ensure_dir(name
):
627 if not os
.path
.exists(name
):
632 with
open(path
, "r") as f
:
635 except yaml
.YAMLError
as exc
:
636 LOG
.exception("YAML parse error")
641 def make_relative_path(path
):
642 if path
.startswith("file://"):
643 path
= path
.replace("file://", "", 1)
644 if path
.startswith("/"):
645 path
= path
.replace("/", "", 1)
649 def generate_lan_string(prefix
, base
, subnet_size
=24, ip
=0):
651 Helper to generate different network configuration strings.
653 r
= "%s.%d.%d/%d" % (prefix
, base
, ip
, subnet_size
)
657 def generate_subnet_strings(n
, start
=1, subnet_size
=24, ip
=0):
659 Helper to generate different network configuration strings.
662 for i
in range(start
, start
+ n
):
663 r
.append("%d.0.0.%d/%d" % (i
, ip
, subnet_size
))
667 if __name__
== '__main__':
669 Lets allow to run the API in standalone mode.
671 GK_STANDALONE_MODE
= True
672 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
673 start_rest_api("0.0.0.0", 8000)