2 Copyright (c) 2015 SONATA-NFV and Paderborn University
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
9 http://www.apache.org/licenses/LICENSE-2.0
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
41 from docker
import Client
as DockerClient
42 from flask
import Flask
, request
43 import flask_restful
as fr
44 from collections
import defaultdict
48 LOG
= logging
.getLogger("sonata-dummy-gatekeeper")
49 LOG
.setLevel(logging
.DEBUG
)
50 logging
.getLogger("werkzeug").setLevel(logging
.WARNING
)
52 GK_STORAGE
= "/tmp/son-dummy-gk/"
53 UPLOAD_FOLDER
= os
.path
.join(GK_STORAGE
, "uploads/")
54 CATALOG_FOLDER
= os
.path
.join(GK_STORAGE
, "catalog/")
56 # Enable Dockerfile build functionality
57 BUILD_DOCKERFILE
= False
59 # flag to indicate that we run without the emulator (only the bare API for integration testing)
60 GK_STANDALONE_MODE
= False
62 # should a new version of an image be pulled even if its available
65 # Automatically deploy SAPs (endpoints) of the service as new containers
68 class Gatekeeper(object):
71 self
.services
= dict()
73 self
.vnf_counter
= 0 # used to generate short names for VNFs (Mininet limitation)
74 LOG
.info("Create SONATA dummy gatekeeper.")
76 def register_service_package(self
, service_uuid
, service
):
78 register new service package
82 self
.services
[service_uuid
] = service
83 # lets perform all steps needed to onboard the service
86 def get_next_vnf_name(self
):
88 return "vnf%d" % self
.vnf_counter
91 class Service(object):
93 This class represents a NS uploaded as a *.son package to the
95 Can have multiple running instances of this service.
102 self
.uuid
= service_uuid
103 self
.package_file_hash
= package_file_hash
104 self
.package_file_path
= package_file_path
105 self
.package_content_path
= os
.path
.join(CATALOG_FOLDER
, "services/%s" % self
.uuid
)
109 self
.local_docker_files
= dict()
110 self
.remote_docker_image_urls
= dict()
111 self
.instances
= dict()
112 self
.vnf_name2docker_name
= dict()
113 self
.sap_identifiers
= set()
114 # lets generate a set of subnet configurations used for e-line chaining setup
115 self
.eline_subnets_src
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=1)
116 self
.eline_subnets_dst
= generate_subnet_strings(50, start
=200, subnet_size
=24, ip
=2)
121 Do all steps to prepare this service to be instantiated
124 # 1. extract the contents of the package and store them in our catalog
125 self
._unpack
_service
_package
()
126 # 2. read in all descriptor files
127 self
._load
_package
_descriptor
()
132 # 3. prepare container images (e.g. download or build Dockerfile)
134 self
._load
_docker
_files
()
135 self
._build
_images
_from
_dockerfiles
()
137 self
._load
_docker
_urls
()
138 self
._pull
_predefined
_dockerimages
()
139 LOG
.info("On-boarded service: %r" % self
.manifest
.get("name"))
141 def start_service(self
):
143 This methods creates and starts a new service instance.
144 It computes placements, iterates over all VNFDs, and starts
145 each VNFD as a Docker container in the data center selected
146 by the placement algorithm.
149 LOG
.info("Starting service %r" % self
.uuid
)
151 # 1. each service instance gets a new uuid to identify it
152 instance_uuid
= str(uuid
.uuid4())
153 # build a instances dict (a bit like a NSR :))
154 self
.instances
[instance_uuid
] = dict()
155 self
.instances
[instance_uuid
]["vnf_instances"] = list()
157 # 2. Configure the chaining of the network functions (currently only E-Line and E-LAN links supported)
158 vnf_id2vnf_name
= defaultdict(lambda: "NotExistingNode",
159 reduce(lambda x
, y
: dict(x
, **y
),
160 map(lambda d
: {d
["vnf_id"]: d
["vnf_name"]},
161 self
.nsd
["network_functions"])))
163 # 3. compute placement of this service instance (adds DC names to VNFDs)
164 if not GK_STANDALONE_MODE
:
165 self
._calculate
_placement
(FirstDcPlacement
)
166 # iterate over all vnfds that we have to start
167 for vnfd
in self
.vnfds
.itervalues():
169 if not GK_STANDALONE_MODE
:
170 vnfi
= self
._start
_vnfd
(vnfd
)
171 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
173 vlinks
= self
.nsd
["virtual_links"]
174 fwd_links
= self
.nsd
["forwarding_graphs"][0]["constituent_virtual_links"]
175 eline_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-Line")]
176 elan_fwd_links
= [l
for l
in vlinks
if (l
["id"] in fwd_links
) and (l
["connectivity_type"] == "E-LAN")]
178 # 4a. deploy E-Line links
179 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
180 # eg. different services get a unique cookie for their flowrules
182 for link
in eline_fwd_links
:
183 src_id
, src_if_name
= link
["connection_points_reference"][0].split(":")
184 dst_id
, dst_if_name
= link
["connection_points_reference"][1].split(":")
186 # check if there is a SAP in the link
187 if src_id
in self
.sap_identifiers
:
188 src_docker_name
= "{0}_{1}".format(src_id
, src_if_name
)
189 src_id
= src_docker_name
191 src_docker_name
= src_id
193 if dst_id
in self
.sap_identifiers
:
194 dst_docker_name
= "{0}_{1}".format(dst_id
, dst_if_name
)
195 dst_id
= dst_docker_name
197 dst_docker_name
= dst_id
199 src_name
= vnf_id2vnf_name
[src_id
]
200 dst_name
= vnf_id2vnf_name
[dst_id
]
203 "Setting up E-Line link. %s(%s:%s) -> %s(%s:%s)" % (
204 src_name
, src_id
, src_if_name
, dst_name
, dst_id
, dst_if_name
))
206 if (src_name
in self
.vnfds
) and (dst_name
in self
.vnfds
):
207 network
= self
.vnfds
[src_name
].get("dc").net
# there should be a cleaner way to find the DCNetwork
208 LOG
.debug(src_docker_name
)
209 ret
= network
.setChain(
210 src_docker_name
, dst_docker_name
,
211 vnf_src_interface
=src_if_name
, vnf_dst_interface
=dst_if_name
,
212 bidirectional
=True, cmd
="add-flow", cookie
=cookie
, priority
=10)
214 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
215 src_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, src_name
)
216 if src_vnfi
is not None:
217 self
._vnf
_reconfigure
_network
(src_vnfi
, src_if_name
, self
.eline_subnets_src
.pop(0))
218 dst_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, dst_name
)
219 if dst_vnfi
is not None:
220 self
._vnf
_reconfigure
_network
(dst_vnfi
, dst_if_name
, self
.eline_subnets_dst
.pop(0))
222 # 4b. deploy E-LAN links
224 for link
in elan_fwd_links
:
225 # generate lan ip address
227 for intf
in link
["connection_points_reference"]:
228 ip_address
= generate_lan_string("10.0", base
, subnet_size
=24, ip
=ip
)
229 vnf_id
, intf_name
= intf
.split(":")
230 if vnf_id
in self
.sap_identifiers
:
231 src_docker_name
= "{0}_{1}".format(vnf_id
, intf_name
)
232 vnf_id
= src_docker_name
233 vnf_name
= vnf_id2vnf_name
[vnf_id
]
235 "Setting up E-LAN link. %s(%s:%s) -> %s" % (
236 vnf_name
, vnf_id
, intf_name
, ip_address
))
238 if vnf_name
in self
.vnfds
:
239 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
240 # E-LAN relies on the learning switch capability of the infrastructure switch in dockernet,
241 # so no explicit chaining is necessary
242 vnfi
= self
._get
_vnf
_instance
(instance_uuid
, vnf_name
)
244 self
._vnf
_reconfigure
_network
(vnfi
, intf_name
, ip_address
)
245 # increase for the next ip address on this E-LAN
247 # increase the base ip address for the next E-LAN
250 # 5. run the emulator specific entrypoint scripts in the VNFIs of this service instance
251 self
._trigger
_emulator
_start
_scripts
_in
_vnfis
(self
.instances
[instance_uuid
]["vnf_instances"])
253 LOG
.info("Service started. Instance id: %r" % instance_uuid
)
256 def _start_vnfd(self
, vnfd
):
258 Start a single VNFD of this service
259 :param vnfd: vnfd descriptor dict
262 # iterate over all deployment units within each VNFDs
263 for u
in vnfd
.get("virtual_deployment_units"):
264 # 1. get the name of the docker image to start and the assigned DC
265 vnf_name
= vnfd
.get("name")
266 if vnf_name
not in self
.remote_docker_image_urls
:
267 raise Exception("No image name for %r found. Abort." % vnf_name
)
268 docker_name
= self
.remote_docker_image_urls
.get(vnf_name
)
269 target_dc
= vnfd
.get("dc")
270 # 2. perform some checks to ensure we can start the container
271 assert(docker_name
is not None)
272 assert(target_dc
is not None)
273 if not self
._check
_docker
_image
_exists
(docker_name
):
274 raise Exception("Docker image %r not found. Abort." % docker_name
)
275 # 3. do the dc.startCompute(name="foobar") call to run the container
276 # TODO consider flavors, and other annotations
277 intfs
= vnfd
.get("connection_points")
279 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
280 # use the vnf_id in the nsd as docker name
281 # so deployed containers can be easily mapped back to the nsd
282 vnf_name2id
= defaultdict(lambda: "NotExistingNode",
283 reduce(lambda x
, y
: dict(x
, **y
),
284 map(lambda d
: {d
["vnf_name"]: d
["vnf_id"]},
285 self
.nsd
["network_functions"])))
286 self
.vnf_name2docker_name
[vnf_name
] = vnf_name2id
[vnf_name
]
287 # self.vnf_name2docker_name[vnf_name] = GK.get_next_vnf_name()
289 LOG
.info("Starting %r as %r in DC %r" % (vnf_name
, self
.vnf_name2docker_name
[vnf_name
], vnfd
.get("dc")))
290 LOG
.debug("Interfaces for %r: %r" % (vnf_name
, intfs
))
291 vnfi
= target_dc
.startCompute(self
.vnf_name2docker_name
[vnf_name
], network
=intfs
, image
=docker_name
, flavor_name
="small")
294 def _get_vnf_instance(self
, instance_uuid
, name
):
296 Returns the Docker object for the given VNF name (or Docker name).
297 :param instance_uuid: UUID of the service instance to search in.
298 :param name: VNF name or Docker name. We are fuzzy here.
302 if name
in self
.vnf_name2docker_name
:
303 dn
= self
.vnf_name2docker_name
[name
]
304 for vnfi
in self
.instances
[instance_uuid
]["vnf_instances"]:
307 LOG
.warning("No container with name: %r found.")
311 def _vnf_reconfigure_network(vnfi
, if_name
, net_str
):
313 Reconfigure the network configuration of a specific interface
314 of a running container.
315 :param vnfi: container instacne
316 :param if_name: interface name
317 :param net_str: network configuration string, e.g., 1.2.3.4/24
320 intf
= vnfi
.intf(intf
=if_name
)
323 LOG
.debug("Reconfigured network of %s:%s to %r" % (vnfi
.name
, if_name
, net_str
))
325 LOG
.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi
.name
, if_name
))
328 def _trigger_emulator_start_scripts_in_vnfis(self
, vnfi_list
):
329 for vnfi
in vnfi_list
:
330 config
= vnfi
.dcinfo
.get("Config", dict())
331 env
= config
.get("Env", list())
333 if "SON_EMU_CMD=" in env_var
:
334 cmd
= str(env_var
.split("=")[1])
335 LOG
.info("Executing entrypoint script in %r: %r" % (vnfi
.name
, cmd
))
338 def _unpack_service_package(self
):
340 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
342 LOG
.info("Unzipping: %r" % self
.package_file_path
)
343 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
344 z
.extractall(self
.package_content_path
)
347 def _load_package_descriptor(self
):
349 Load the main package descriptor YAML and keep it as dict.
352 self
.manifest
= load_yaml(
354 self
.package_content_path
, "META-INF/MANIFEST.MF"))
358 Load the entry NSD YAML and keep it as dict.
361 if "entry_service_template" in self
.manifest
:
362 nsd_path
= os
.path
.join(
363 self
.package_content_path
,
364 make_relative_path(self
.manifest
.get("entry_service_template")))
365 self
.nsd
= load_yaml(nsd_path
)
366 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("name"))
368 def _load_vnfd(self
):
370 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
373 if "package_content" in self
.manifest
:
374 for pc
in self
.manifest
.get("package_content"):
375 if pc
.get("content-type") == "application/sonata.function_descriptor":
376 vnfd_path
= os
.path
.join(
377 self
.package_content_path
,
378 make_relative_path(pc
.get("name")))
379 vnfd
= load_yaml(vnfd_path
)
380 self
.vnfds
[vnfd
.get("name")] = vnfd
381 LOG
.debug("Loaded VNFD: %r" % vnfd
.get("name"))
383 def _load_saps(self
):
384 # Each Service Access Point (connection_point) in the nsd is getting its own container
385 SAPs
= [p
["id"] for p
in self
.nsd
["connection_points"] if p
["type"] == "interface"]
387 # endpoints needed in this service
388 sap_vnf_id
, sap_vnf_interface
= sap
.split(':')
389 # set of the connection_point ids found in the nsd (in the examples this is 'ns')
390 self
.sap_identifiers
.add(sap_vnf_id
)
392 sap_docker_name
= sap
.replace(':', '_')
394 # add SAP to self.vnfds
395 sapfile
= pkg_resources
.resource_filename(__name__
, "sap_vnfd.yml")
396 sap_vnfd
= load_yaml(sapfile
)
397 sap_vnfd
["connection_points"][0]["id"] = sap_vnf_interface
398 sap_vnfd
["name"] = sap_docker_name
399 self
.vnfds
[sap_docker_name
] = sap_vnfd
400 # add SAP vnf to list in the NSD so it is deployed later on
401 # each SAP get a unique VNFD and vnf_id in the NSD
402 self
.nsd
["network_functions"].append({"vnf_id": sap_docker_name
, "vnf_name": sap_docker_name
})
403 LOG
.debug("Loaded SAP: %r" % sap_vnfd
.get("name"))
405 def _load_docker_files(self
):
407 Get all paths to Dockerfiles from VNFDs and store them in dict.
410 for k
, v
in self
.vnfds
.iteritems():
411 for vu
in v
.get("virtual_deployment_units"):
412 if vu
.get("vm_image_format") == "docker":
413 vm_image
= vu
.get("vm_image")
414 docker_path
= os
.path
.join(
415 self
.package_content_path
,
416 make_relative_path(vm_image
))
417 self
.local_docker_files
[k
] = docker_path
418 LOG
.debug("Found Dockerfile (%r): %r" % (k
, docker_path
))
420 def _load_docker_urls(self
):
422 Get all URLs to pre-build docker images in some repo.
425 for k
, v
in self
.vnfds
.iteritems():
426 for vu
in v
.get("virtual_deployment_units"):
427 if vu
.get("vm_image_format") == "docker":
428 url
= vu
.get("vm_image")
430 url
= url
.replace("http://", "")
431 self
.remote_docker_image_urls
[k
] = url
432 LOG
.debug("Found Docker image URL (%r): %r" % (k
, self
.remote_docker_image_urls
[k
]))
434 def _build_images_from_dockerfiles(self
):
436 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
438 if GK_STANDALONE_MODE
:
439 return # do not build anything in standalone mode
441 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(self
.local_docker_files
))
442 for k
, v
in self
.local_docker_files
.iteritems():
443 for line
in dc
.build(path
=v
.replace("Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
444 LOG
.debug("DOCKER BUILD: %s" % line
)
445 LOG
.info("Docker image created: %s" % k
)
447 def _pull_predefined_dockerimages(self
):
449 If the package contains URLs to pre-build Docker images, we download them with this method.
452 for url
in self
.remote_docker_image_urls
.itervalues():
453 if not FORCE_PULL
: # only pull if not present (speedup for development)
454 if len(dc
.images(name
=url
)) > 0:
455 LOG
.debug("Image %r present. Skipping pull." % url
)
457 LOG
.info("Pulling image: %r" % url
)
459 insecure_registry
=True)
461 def _check_docker_image_exists(self
, image_name
):
463 Query the docker service and check if the given image exists
464 :param image_name: name of the docker image
467 return len(DockerClient().images(image_name
)) > 0
469 def _calculate_placement(self
, algorithm
):
471 Do placement by adding the a field "dc" to
472 each VNFD that points to one of our
473 data center objects known to the gatekeeper.
475 assert(len(self
.vnfds
) > 0)
476 assert(len(GK
.dcs
) > 0)
477 # instantiate algorithm an place
479 p
.place(self
.nsd
, self
.vnfds
, GK
.dcs
)
480 LOG
.info("Using placement algorithm: %r" % p
.__class
__.__name
__)
481 # lets print the placement result
482 for name
, vnfd
in self
.vnfds
.iteritems():
483 LOG
.info("Placed VNF %r on DC %r" % (name
, str(vnfd
.get("dc"))))
487 Some (simple) placement algorithms
491 class FirstDcPlacement(object):
493 Placement: Always use one and the same data center from the GK.dcs dict.
495 def place(self
, nsd
, vnfds
, dcs
):
496 for name
, vnfd
in vnfds
.iteritems():
497 vnfd
["dc"] = list(dcs
.itervalues())[0]
501 Resource definitions and API endpoints
505 class Packages(fr
.Resource
):
509 Upload a *.son service package to the dummy gatekeeper.
511 We expect request with a *.son file and store it in UPLOAD_FOLDER
517 # lets search for the package in the request
518 if "package" in request
.files
:
519 son_file
= request
.files
["package"]
520 # elif "file" in request.files:
521 # son_file = request.files["file"]
523 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
524 # generate a uuid to reference this package
525 service_uuid
= str(uuid
.uuid4())
526 file_hash
= hashlib
.sha1(str(son_file
)).hexdigest()
527 # ensure that upload folder exists
528 ensure_dir(UPLOAD_FOLDER
)
529 upload_path
= os
.path
.join(UPLOAD_FOLDER
, "%s.son" % service_uuid
)
530 # store *.son file to disk
531 son_file
.save(upload_path
)
532 size
= os
.path
.getsize(upload_path
)
533 # create a service object and register it
534 s
= Service(service_uuid
, file_hash
, upload_path
)
535 GK
.register_service_package(service_uuid
, s
)
536 # generate the JSON result
537 return {"service_uuid": service_uuid
, "size": size
, "sha1": file_hash
, "error": None}
538 except Exception as ex
:
539 LOG
.exception("Service package upload failed:")
540 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
544 Return a list of UUID's of uploaded service packages.
547 LOG
.info("GET /packages")
548 return {"service_uuid_list": list(GK
.services
.iterkeys())}
551 class Instantiations(fr
.Resource
):
555 Instantiate a service specified by its UUID.
556 Will return a new UUID to identify the running service instance.
559 # try to extract the service uuid from the request
560 json_data
= request
.get_json(force
=True)
561 service_uuid
= json_data
.get("service_uuid")
563 # lets be a bit fuzzy here to make testing easier
564 if service_uuid
is None and len(GK
.services
) > 0:
565 # if we don't get a service uuid, we simple start the first service in the list
566 service_uuid
= list(GK
.services
.iterkeys())[0]
568 if service_uuid
in GK
.services
:
569 # ok, we have a service uuid, lets start the service
570 service_instance_uuid
= GK
.services
.get(service_uuid
).start_service()
571 return {"service_instance_uuid": service_instance_uuid
}
572 return "Service not found", 404
576 Returns a list of UUIDs containing all running services.
579 LOG
.info("GET /instantiations")
580 return {"service_instantiations_list": [
581 list(s
.instances
.iterkeys()) for s
in GK
.services
.itervalues()]}
584 # create a single, global GK object
587 app
= Flask(__name__
)
588 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
591 api
.add_resource(Packages
, '/packages')
592 api
.add_resource(Instantiations
, '/instantiations')
595 def start_rest_api(host
, port
, datacenters
=dict()):
597 # start the Flask server (not the best performance but ok for our use case)
601 use_reloader
=False # this is needed to run Flask in a non-main thread
605 def ensure_dir(name
):
606 if not os
.path
.exists(name
):
611 with
open(path
, "r") as f
:
614 except yaml
.YAMLError
as exc
:
615 LOG
.exception("YAML parse error")
620 def make_relative_path(path
):
621 if path
.startswith("file://"):
622 path
= path
.replace("file://", "", 1)
623 if path
.startswith("/"):
624 path
= path
.replace("/", "", 1)
628 def generate_lan_string(prefix
, base
, subnet_size
=24, ip
=0):
630 Helper to generate different network configuration strings.
632 r
= "%s.%d.%d/%d" % (prefix
, base
, ip
, subnet_size
)
636 def generate_subnet_strings(n
, start
=1, subnet_size
=24, ip
=0):
638 Helper to generate different network configuration strings.
641 for i
in range(start
, start
+ n
):
642 r
.append("%d.0.0.%d/%d" % (i
, ip
, subnet_size
))
646 if __name__
== '__main__':
648 Lets allow to run the API in standalone mode.
650 GK_STANDALONE_MODE
= True
651 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
652 start_rest_api("0.0.0.0", 8000)