2 Copyright (c) 2015 SONATA-NFV and Paderborn University
5 Licensed under the Apache License, Version 2.0 (the "License");
6 you may not use this file except in compliance with the License.
7 You may obtain a copy of the License at
9 http://www.apache.org/licenses/LICENSE-2.0
11 Unless required by applicable law or agreed to in writing, software
12 distributed under the License is distributed on an "AS IS" BASIS,
13 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 See the License for the specific language governing permissions and
15 limitations under the License.
17 Neither the name of the SONATA-NFV [, ANY ADDITIONAL AFFILIATION]
18 nor the names of its contributors may be used to endorse or promote
19 products derived from this software without specific prior written
22 This work has been performed in the framework of the SONATA project,
23 funded by the European Commission under Grant number 671517 through
24 the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 acknowledge the contributions of their colleagues of the SONATA
26 partner consortium (www.sonata-nfv.eu).
29 This module implements a simple REST API that behaves like SONATA's gatekeeper.
31 It is only used to support the development of SONATA's SDK tools and to demonstrate
32 the year 1 version of the emulator until the integration with WP4's orchestrator is done.
42 from docker
import DockerClient
, APIClient
43 from flask
import Flask
, request
44 import flask_restful
as fr
45 from collections
import defaultdict
47 from subprocess
import Popen
48 from random
import randint
53 LOG
= logging
.getLogger("sonata-dummy-gatekeeper")
54 LOG
.setLevel(logging
.DEBUG
)
55 logging
.getLogger("werkzeug").setLevel(logging
.WARNING
)
57 GK_STORAGE
= "/tmp/son-dummy-gk/"
58 UPLOAD_FOLDER
= os
.path
.join(GK_STORAGE
, "uploads/")
59 CATALOG_FOLDER
= os
.path
.join(GK_STORAGE
, "catalog/")
61 # Enable Dockerfile build functionality
62 BUILD_DOCKERFILE
= False
64 # flag to indicate that we run without the emulator (only the bare API for integration testing)
65 GK_STANDALONE_MODE
= False
67 # should a new version of an image be pulled even if its available
70 # Automatically deploy SAPs (endpoints) of the service as new containers
71 # Attention: This is not a configuration switch but a global variable! Don't change its default value.
74 # flag to indicate if we use bidirectional forwarding rules in the automatic chaining process
75 BIDIRECTIONAL_CHAIN
= False
77 # override the management interfaces in the descriptors with default docker0 interfaces in the containers
78 USE_DOCKER_MGMT
= False
80 # automatically deploy uploaded packages (no need to execute son-access deploy --latest separately)
83 # and also automatically terminate any other running services
86 def generate_subnets(prefix
, base
, subnet_size
=50, mask
=24):
87 # Generate a list of ipaddress in subnets
89 for net
in range(base
, base
+ subnet_size
):
90 subnet
= "{0}.{1}.0/{2}".format(prefix
, net
, mask
)
91 r
.append(ipaddress
.ip_network(unicode(subnet
)))
93 # private subnet definitions for the generated interfaces
95 SAP_SUBNETS
= generate_subnets('10.10', 0, subnet_size
=50, mask
=30)
97 ELAN_SUBNETS
= generate_subnets('10.20', 0, subnet_size
=50, mask
=24)
99 ELINE_SUBNETS
= generate_subnets('10.30', 0, subnet_size
=50, mask
=30)
101 # path to the VNFD for the SAP VNF that is deployed as internal SAP point
104 class Gatekeeper(object):
107 self
.services
= dict()
110 self
.vnf_counter
= 0 # used to generate short names for VNFs (Mininet limitation)
111 LOG
.info("Create SONATA dummy gatekeeper.")
113 def register_service_package(self
, service_uuid
, service
):
115 register new service package
117 :param service object
119 self
.services
[service_uuid
] = service
120 # lets perform all steps needed to onboard the service
123 def get_next_vnf_name(self
):
124 self
.vnf_counter
+= 1
125 return "vnf%d" % self
.vnf_counter
128 class Service(object):
130 This class represents a NS uploaded as a *.son package to the
132 Can have multiple running instances of this service.
139 self
.uuid
= service_uuid
140 self
.package_file_hash
= package_file_hash
141 self
.package_file_path
= package_file_path
142 self
.package_content_path
= os
.path
.join(CATALOG_FOLDER
, "services/%s" % self
.uuid
)
147 self
.saps_ext
= list()
148 self
.saps_int
= list()
149 self
.local_docker_files
= dict()
150 self
.remote_docker_image_urls
= dict()
151 self
.instances
= dict()
152 #self.vnf_name2docker_name = dict()
153 # dict to find the vnf_name for any vnf id
154 self
.vnf_id2vnf_name
= dict()
158 Do all steps to prepare this service to be instantiated
161 # 1. extract the contents of the package and store them in our catalog
162 self
._unpack
_service
_package
()
163 # 2. read in all descriptor files
164 self
._load
_package
_descriptor
()
169 # 3. prepare container images (e.g. download or build Dockerfile)
171 self
._load
_docker
_files
()
172 self
._build
_images
_from
_dockerfiles
()
174 self
._load
_docker
_urls
()
175 self
._pull
_predefined
_dockerimages
()
176 LOG
.info("On-boarded service: %r" % self
.manifest
.get("name"))
178 def start_service(self
):
180 This methods creates and starts a new service instance.
181 It computes placements, iterates over all VNFDs, and starts
182 each VNFD as a Docker container in the data center selected
183 by the placement algorithm.
186 LOG
.info("Starting service %r" % self
.uuid
)
188 # 1. each service instance gets a new uuid to identify it
189 instance_uuid
= str(uuid
.uuid4())
190 # build a instances dict (a bit like a NSR :))
191 self
.instances
[instance_uuid
] = dict()
192 self
.instances
[instance_uuid
]["vnf_instances"] = list()
194 # 2. compute placement of this service instance (adds DC names to VNFDs)
195 if not GK_STANDALONE_MODE
:
196 #self._calculate_placement(FirstDcPlacement)
197 self
._calculate
_placement
(RoundRobinDcPlacementWithSAPs
)
198 # 3. start all vnfds that we have in the service (except SAPs)
199 for vnf_id
in self
.vnf_id2vnf_name
:
200 vnf_name
= self
.vnf_id2vnf_name
[vnf_id
]
201 vnfd
= self
.vnfds
[vnf_name
]
203 if not GK_STANDALONE_MODE
:
204 vnfi
= self
._start
_vnfd
(vnfd
, vnf_id
)
205 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
207 # 4. start all SAPs in the service
208 for sap
in self
.saps
:
209 self
._start
_sap
(self
.saps
[sap
], instance_uuid
)
211 # 5. Deploy E-Line and E_LAN links
212 if "virtual_links" in self
.nsd
:
213 vlinks
= self
.nsd
["virtual_links"]
214 # constituent virtual links are not checked
215 #fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
216 eline_fwd_links
= [l
for l
in vlinks
if (l
["connectivity_type"] == "E-Line")]
217 elan_fwd_links
= [l
for l
in vlinks
if (l
["connectivity_type"] == "E-LAN")]
219 GK
.net
.deployed_elines
.extend(eline_fwd_links
)
220 GK
.net
.deployed_elans
.extend(elan_fwd_links
)
222 # 5a. deploy E-Line links
223 self
._connect
_elines
(eline_fwd_links
, instance_uuid
)
225 # 5b. deploy E-LAN links
226 self
._connect
_elans
(elan_fwd_links
, instance_uuid
)
228 # 6. run the emulator specific entrypoint scripts in the VNFIs of this service instance
229 self
._trigger
_emulator
_start
_scripts
_in
_vnfis
(self
.instances
[instance_uuid
]["vnf_instances"])
231 LOG
.info("Service started. Instance id: %r" % instance_uuid
)
234 def stop_service(self
, instance_uuid
):
236 This method stops a running service instance.
237 It iterates over all VNF instances, stopping them each
238 and removing them from their data center.
240 :param instance_uuid: the uuid of the service instance to be stopped
242 LOG
.info("Stopping service %r" % self
.uuid
)
243 # get relevant information
244 # instance_uuid = str(self.uuid.uuid4())
245 vnf_instances
= self
.instances
[instance_uuid
]["vnf_instances"]
247 for v
in vnf_instances
:
250 for sap_name
in self
.saps_ext
:
251 ext_sap
= self
.saps
[sap_name
]
252 target_dc
= ext_sap
.get("dc")
253 target_dc
.removeExternalSAP(sap_name
, ext_sap
['net'])
254 LOG
.info("Stopping the SAP instance: %r in DC %r" % (sap_name
, target_dc
))
256 if not GK_STANDALONE_MODE
:
258 # self._remove_placement(RoundRobinPlacement)
261 # last step: remove the instance from the list of all instances
262 del self
.instances
[instance_uuid
]
264 def _start_vnfd(self
, vnfd
, vnf_id
):
266 Start a single VNFD of this service
267 :param vnfd: vnfd descriptor dict
268 :param vnf-id: vnfd descriptor dict
271 # the vnf_name refers to the container to be deployed
272 vnf_name
= vnfd
.get("name")
275 # iterate over all deployment units within each VNFDs
276 for u
in vnfd
.get("virtual_deployment_units"):
277 # 1. get the name of the docker image to start and the assigned DC
278 if vnf_name
not in self
.remote_docker_image_urls
:
279 raise Exception("No image name for %r found. Abort." % vnf_name
)
280 docker_name
= self
.remote_docker_image_urls
.get(vnf_name
)
281 target_dc
= vnfd
.get("dc")
282 # 2. perform some checks to ensure we can start the container
283 assert(docker_name
is not None)
284 assert(target_dc
is not None)
285 if not self
._check
_docker
_image
_exists
(docker_name
):
286 raise Exception("Docker image %r not found. Abort." % docker_name
)
288 # 3. get the resource limits
289 res_req
= u
.get("resource_requirements")
290 cpu_list
= res_req
.get("cpu").get("cores")
291 if not cpu_list
or len(cpu_list
)==0:
293 cpu_bw
= res_req
.get("cpu").get("cpu_bw")
296 mem_num
= str(res_req
.get("memory").get("size"))
299 mem_unit
= str(res_req
.get("memory").get("size_unit"))
302 mem_limit
= float(mem_num
)
304 mem_limit
=mem_limit
*1024*1024*1024
306 mem_limit
=mem_limit
*1024*1024
308 mem_limit
=mem_limit
*1024
309 mem_lim
= int(mem_limit
)
310 cpu_period
, cpu_quota
= self
._calculate
_cpu
_cfs
_values
(float(cpu_bw
))
313 # vnf_name2id = defaultdict(lambda: "NotExistingNode",
314 # reduce(lambda x, y: dict(x, **y),
315 # map(lambda d: {d["vnf_name"]: d["vnf_id"]},
316 # self.nsd["network_functions"])))
318 # vnf_id2name = defaultdict(lambda: "NotExistingNode",
319 # reduce(lambda x, y: dict(x, **y),
320 # map(lambda d: {d["vnf_id"]: d["vnf_name"]},
321 # self.nsd["network_functions"])))
323 # check if we need to deploy the management ports (defined as type:management both on in the vnfd and nsd)
324 intfs
= vnfd
.get("connection_points", [])
327 #vnf_id = vnf_name2id[vnf_name]
328 mgmt_intfs
= [vnf_id
+ ':' + intf
['id'] for intf
in intfs
if intf
.get('type') == 'management']
329 # check if any of these management interfaces are used in a management-type network in the nsd
330 for nsd_intf_name
in mgmt_intfs
:
331 vlinks
= [ l
["connection_points_reference"] for l
in self
.nsd
.get("virtual_links", [])]
333 if nsd_intf_name
in link
and self
.check_mgmt_interface(link
):
334 # this is indeed a management interface and can be skipped
335 vnf_id
, vnf_interface
, vnf_sap_docker_name
= parse_interface(nsd_intf_name
)
336 found_interfaces
= [intf
for intf
in intfs
if intf
.get('id') == vnf_interface
]
337 intfs
.remove(found_interfaces
[0])
338 mgmt_intf_names
.append(vnf_interface
)
340 # 4. generate the volume paths for the docker container
342 # a volume to extract log files
343 docker_log_path
= "/tmp/results/%s/%s"%(self
.uuid
,vnf_name
)
344 LOG
.debug("LOG path for vnf %s is %s."%(vnf_name
,docker_log_path
))
345 if not os
.path
.exists(docker_log_path
):
346 LOG
.debug("Creating folder %s"%docker
_log
_path
)
347 os
.makedirs(docker_log_path
)
349 volumes
.append(docker_log_path
+":/mnt/share/")
352 # 5. do the dc.startCompute(name="foobar") call to run the container
353 # TODO consider flavors, and other annotations
354 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
355 # use the vnf_id in the nsd as docker name
356 # so deployed containers can be easily mapped back to the nsd
358 #self.vnf_name2docker_name[vnf_name] = vnf_id
360 #LOG.info("Starting %r as %r in DC %r" % (vnf_name, self.vnf_name2docker_name[vnf_name], vnfd.get("dc")))
361 LOG
.info("Starting %r as %r in DC %r" % (vnf_name
, vnf_id
, vnfd
.get("dc")))
362 LOG
.debug("Interfaces for %r: %r" % (vnf_id
, intfs
))
363 vnfi
= target_dc
.startCompute(
369 cpu_period
=cpu_period
,
374 # rename the docker0 interfaces (eth0) to the management port name defined in the VNFD
376 for intf_name
in mgmt_intf_names
:
377 self
._vnf
_reconfigure
_network
(vnfi
, 'eth0', new_name
=intf_name
)
381 def _stop_vnfi(self
, vnfi
):
385 :param vnfi: vnf instance to be stopped
387 # Find the correct datacenter
388 status
= vnfi
.getStatus()
392 LOG
.info("Stopping the vnf instance contained in %r in DC %r" % (status
["name"], dc
))
393 dc
.stopCompute(status
["name"])
395 def _get_vnf_instance(self
, instance_uuid
, vnf_id
):
397 Returns the Docker object for the given VNF id (or Docker name).
398 :param instance_uuid: UUID of the service instance to search in.
399 :param name: VNF name or Docker name. We are fuzzy here.
403 #if vnf_id in self.vnf_name2docker_name:
404 # dn = self.vnf_name2docker_name[name]
405 for vnfi
in self
.instances
[instance_uuid
]["vnf_instances"]:
408 LOG
.warning("No container with name: {0} found.".format(dn
))
412 def _vnf_reconfigure_network(vnfi
, if_name
, net_str
=None, new_name
=None):
414 Reconfigure the network configuration of a specific interface
415 of a running container.
416 :param vnfi: container instance
417 :param if_name: interface name
418 :param net_str: network configuration string, e.g., 1.2.3.4/24
422 # assign new ip address
423 if net_str
is not None:
424 intf
= vnfi
.intf(intf
=if_name
)
427 LOG
.debug("Reconfigured network of %s:%s to %r" % (vnfi
.name
, if_name
, net_str
))
429 LOG
.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (vnfi
.name
, if_name
))
431 if new_name
is not None:
432 vnfi
.cmd('ip link set', if_name
, 'down')
433 vnfi
.cmd('ip link set', if_name
, 'name', new_name
)
434 vnfi
.cmd('ip link set', new_name
, 'up')
435 LOG
.debug("Reconfigured interface name of %s:%s to %s" % (vnfi
.name
, if_name
, new_name
))
439 def _trigger_emulator_start_scripts_in_vnfis(self
, vnfi_list
):
440 for vnfi
in vnfi_list
:
441 config
= vnfi
.dcinfo
.get("Config", dict())
442 env
= config
.get("Env", list())
444 var
, cmd
= map(str.strip
, map(str, env_var
.split('=', 1)))
445 LOG
.debug("%r = %r" % (var
, cmd
))
446 if var
=="SON_EMU_CMD":
447 LOG
.info("Executing entry point script in %r: %r" % (vnfi
.name
, cmd
))
448 # execute command in new thread to ensure that GK is not blocked by VNF
449 t
= threading
.Thread(target
=vnfi
.cmdPrint
, args
=(cmd
,))
453 def _unpack_service_package(self
):
455 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
457 LOG
.info("Unzipping: %r" % self
.package_file_path
)
458 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
459 z
.extractall(self
.package_content_path
)
462 def _load_package_descriptor(self
):
464 Load the main package descriptor YAML and keep it as dict.
467 self
.manifest
= load_yaml(
469 self
.package_content_path
, "META-INF/MANIFEST.MF"))
473 Load the entry NSD YAML and keep it as dict.
476 if "entry_service_template" in self
.manifest
:
477 nsd_path
= os
.path
.join(
478 self
.package_content_path
,
479 make_relative_path(self
.manifest
.get("entry_service_template")))
480 self
.nsd
= load_yaml(nsd_path
)
481 GK
.net
.deployed_nsds
.append(self
.nsd
)
482 # create dict to find the vnf_name for any vnf id
483 self
.vnf_id2vnf_name
= defaultdict(lambda: "NotExistingNode",
484 reduce(lambda x
, y
: dict(x
, **y
),
485 map(lambda d
: {d
["vnf_id"]: d
["vnf_name"]},
486 self
.nsd
["network_functions"])))
488 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("name"))
490 def _load_vnfd(self
):
492 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
495 if "package_content" in self
.manifest
:
496 for pc
in self
.manifest
.get("package_content"):
497 if pc
.get("content-type") == "application/sonata.function_descriptor":
498 vnfd_path
= os
.path
.join(
499 self
.package_content_path
,
500 make_relative_path(pc
.get("name")))
501 vnfd
= load_yaml(vnfd_path
)
502 self
.vnfds
[vnfd
.get("name")] = vnfd
503 LOG
.debug("Loaded VNFD: %r" % vnfd
.get("id"))
505 def _load_saps(self
):
506 # create list of all SAPs
507 # check if we need to deploy management ports
509 SAPs
= [p
for p
in self
.nsd
["connection_points"] if 'management' not in p
.get('type')]
511 SAPs
= [p
for p
in self
.nsd
["connection_points"]]
514 # endpoint needed in this service
515 sap_id
, sap_interface
, sap_docker_name
= parse_interface(sap
['id'])
516 # make sure SAP has type set (default internal)
517 sap
["type"] = sap
.get("type", 'internal')
519 # Each Service Access Point (connection_point) in the nsd is an IP address on the host
520 if sap
["type"] == "external":
521 # add to vnfds to calculate placement later on
522 sap_net
= SAP_SUBNETS
.pop(0)
523 self
.saps
[sap_docker_name
] = {"name": sap_docker_name
, "type": "external", "net": sap_net
}
524 # add SAP vnf to list in the NSD so it is deployed later on
525 # each SAP get a unique VNFD and vnf_id in the NSD and custom type (only defined in the dummygatekeeper)
526 self
.nsd
["network_functions"].append(
527 {"vnf_id": sap_docker_name
, "vnf_name": sap_docker_name
, "vnf_type": "sap_ext"})
529 # Each Service Access Point (connection_point) in the nsd is getting its own container (default)
530 elif sap
["type"] == "internal" or sap
["type"] == "management":
531 # add SAP to self.vnfds
533 sapfile
= pkg_resources
.resource_filename(__name__
, "sap_vnfd.yml")
536 sap_vnfd
= load_yaml(sapfile
)
537 sap_vnfd
["connection_points"][0]["id"] = sap_interface
538 sap_vnfd
["name"] = sap_docker_name
539 sap_vnfd
["type"] = "internal"
540 # add to vnfds to calculate placement later on and deploy
541 self
.saps
[sap_docker_name
] = sap_vnfd
542 # add SAP vnf to list in the NSD so it is deployed later on
543 # each SAP get a unique VNFD and vnf_id in the NSD
544 self
.nsd
["network_functions"].append(
545 {"vnf_id": sap_docker_name
, "vnf_name": sap_docker_name
, "vnf_type": "sap_int"})
547 LOG
.debug("Loaded SAP: name: {0}, type: {1}".format(sap_docker_name
, sap
['type']))
550 self
.saps_ext
= [self
.saps
[sap
]['name'] for sap
in self
.saps
if self
.saps
[sap
]["type"] == "external"]
551 self
.saps_int
= [self
.saps
[sap
]['name'] for sap
in self
.saps
if self
.saps
[sap
]["type"] == "internal"]
553 def _start_sap(self
, sap
, instance_uuid
):
557 LOG
.info('start SAP: {0} ,type: {1}'.format(sap
['name'],sap
['type']))
558 if sap
["type"] == "internal":
560 if not GK_STANDALONE_MODE
:
561 vnfi
= self
._start
_vnfd
(sap
)
562 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
564 elif sap
["type"] == "external":
565 target_dc
= sap
.get("dc")
566 # add interface to dc switch
567 target_dc
.attachExternalSAP(sap
['name'], sap
['net'])
569 def _connect_elines(self
, eline_fwd_links
, instance_uuid
):
571 Connect all E-LINE links in the NSD
572 :param eline_fwd_links: list of E-LINE links in the NSD
573 :param: instance_uuid of the service
576 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
577 # eg. different services get a unique cookie for their flowrules
579 for link
in eline_fwd_links
:
580 # check if we need to deploy this link when its a management link:
582 if self
.check_mgmt_interface(link
["connection_points_reference"]):
585 src_id
, src_if_name
, src_sap_id
= parse_interface(link
["connection_points_reference"][0])
586 dst_id
, dst_if_name
, dst_sap_id
= parse_interface(link
["connection_points_reference"][1])
589 # check if there is a SAP in the link and chain everything together
590 if src_sap_id
in self
.saps
and dst_sap_id
in self
.saps
:
591 LOG
.info('2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id
, dst_sap_id
))
594 elif src_sap_id
in self
.saps_ext
:
596 # set intf name to None so the chaining function will choose the first one
598 #src_name = self.vnf_id2vnf_name[src_id]
599 #dst_name = self.vnf_id2vnf_name[dst_id]
600 dst_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, dst_id
)
601 if dst_vnfi
is not None:
602 # choose first ip address in sap subnet
603 sap_net
= self
.saps
[src_sap_id
]['net']
604 sap_ip
= "{0}/{1}".format(str(sap_net
[2]), sap_net
.prefixlen
)
605 self
._vnf
_reconfigure
_network
(dst_vnfi
, dst_if_name
, sap_ip
)
608 elif dst_sap_id
in self
.saps_ext
:
610 # set intf name to None so the chaining function will choose the first one
612 #src_name = self.vnf_id2vnf_name[src_id]
613 #dst_name = self.vnf_id2vnf_name[dst_id]
614 src_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, src_id
)
615 if src_vnfi
is not None:
616 sap_net
= self
.saps
[dst_sap_id
]['net']
617 sap_ip
= "{0}/{1}".format(str(sap_net
[2]), sap_net
.prefixlen
)
618 self
._vnf
_reconfigure
_network
(src_vnfi
, src_if_name
, sap_ip
)
621 # Link between 2 VNFs
623 # make sure we use the correct sap vnf name
624 if src_sap_id
in self
.saps_int
:
626 if dst_sap_id
in self
.saps_int
:
628 #src_name = self.vnf_id2vnf_name[src_id]
629 #dst_name = self.vnf_id2vnf_name[dst_id]
630 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-Link
631 src_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, src_id
)
632 dst_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, dst_id
)
633 if src_vnfi
is not None and dst_vnfi
is not None:
634 eline_net
= ELINE_SUBNETS
.pop(0)
635 ip1
= "{0}/{1}".format(str(eline_net
[1]), eline_net
.prefixlen
)
636 ip2
= "{0}/{1}".format(str(eline_net
[2]), eline_net
.prefixlen
)
637 self
._vnf
_reconfigure
_network
(src_vnfi
, src_if_name
, ip1
)
638 self
._vnf
_reconfigure
_network
(dst_vnfi
, dst_if_name
, ip2
)
643 ret
= GK
.net
.setChain(
645 vnf_src_interface
=src_if_name
, vnf_dst_interface
=dst_if_name
,
646 bidirectional
=BIDIRECTIONAL_CHAIN
, cmd
="add-flow", cookie
=cookie
, priority
=10)
648 "Setting up E-Line link. (%s:%s) -> (%s:%s)" % (
649 src_id
, src_if_name
, dst_id
, dst_if_name
))
652 def _connect_elans(self
, elan_fwd_links
, instance_uuid
):
654 Connect all E-LAN links in the NSD
655 :param elan_fwd_links: list of E-LAN links in the NSD
656 :param: instance_uuid of the service
659 for link
in elan_fwd_links
:
660 # check if we need to deploy this link when its a management link:
662 if self
.check_mgmt_interface(link
["connection_points_reference"]):
666 # check if an external SAP is in the E-LAN (then a subnet is already defined)
667 intfs_elan
= [intf
for intf
in link
["connection_points_reference"]]
668 lan_sap
= self
.check_ext_saps(intfs_elan
)
670 lan_net
= self
.saps
[lan_sap
]['net']
671 lan_hosts
= list(lan_net
.hosts())
672 sap_ip
= str(lan_hosts
.pop(0))
674 lan_net
= ELAN_SUBNETS
.pop(0)
675 lan_hosts
= list(lan_net
.hosts())
677 # generate lan ip address for all interfaces except external SAPs
678 for intf
in link
["connection_points_reference"]:
680 # skip external SAPs, they already have an ip
681 vnf_id
, vnf_interface
, vnf_sap_docker_name
= parse_interface(intf
)
682 if vnf_sap_docker_name
in self
.saps_ext
:
683 elan_vnf_list
.append({'name': vnf_sap_docker_name
, 'interface': vnf_interface
})
686 ip_address
= "{0}/{1}".format(str(lan_hosts
.pop(0)), lan_net
.prefixlen
)
687 vnf_id
, intf_name
, vnf_sap_id
= parse_interface(intf
)
689 # make sure we use the correct sap vnf name
690 src_docker_name
= vnf_id
691 if vnf_sap_id
in self
.saps_int
:
692 src_docker_name
= vnf_sap_id
695 #vnf_name = self.vnf_id2vnf_name[vnf_id]
697 "Setting up E-LAN interface. %s(%s:%s) -> %s" % (
698 vnf_id
, intf_name
, ip_address
))
700 if vnf_id
in self
.vnfds
:
701 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
702 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
703 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
704 vnfi
= self
._get
_vnf
_instance
(instance_uuid
, vnf_id
)
706 self
._vnf
_reconfigure
_network
(vnfi
, intf_name
, ip_address
)
707 # add this vnf and interface to the E-LAN for tagging
708 elan_vnf_list
.append({'name': src_docker_name
, 'interface': intf_name
})
710 # install the VLAN tags for this E-LAN
711 GK
.net
.setLAN(elan_vnf_list
)
714 def _load_docker_files(self
):
716 Get all paths to Dockerfiles from VNFDs and store them in dict.
719 for k
, v
in self
.vnfds
.iteritems():
720 for vu
in v
.get("virtual_deployment_units"):
721 if vu
.get("vm_image_format") == "docker":
722 vm_image
= vu
.get("vm_image")
723 docker_path
= os
.path
.join(
724 self
.package_content_path
,
725 make_relative_path(vm_image
))
726 self
.local_docker_files
[k
] = docker_path
727 LOG
.debug("Found Dockerfile (%r): %r" % (k
, docker_path
))
729 def _load_docker_urls(self
):
731 Get all URLs to pre-build docker images in some repo.
734 # also merge sap dicts, because internal saps also need a docker container
735 all_vnfs
= self
.vnfds
.copy()
736 all_vnfs
.update(self
.saps
)
738 for k
, v
in all_vnfs
.iteritems():
739 for vu
in v
.get("virtual_deployment_units", {}):
740 if vu
.get("vm_image_format") == "docker":
741 url
= vu
.get("vm_image")
743 url
= url
.replace("http://", "")
744 self
.remote_docker_image_urls
[k
] = url
745 LOG
.debug("Found Docker image URL (%r): %r" % (k
, self
.remote_docker_image_urls
[k
]))
747 def _build_images_from_dockerfiles(self
):
749 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
751 if GK_STANDALONE_MODE
:
752 return # do not build anything in standalone mode
754 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(self
.local_docker_files
))
755 for k
, v
in self
.local_docker_files
.iteritems():
756 for line
in dc
.build(path
=v
.replace("Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
757 LOG
.debug("DOCKER BUILD: %s" % line
)
758 LOG
.info("Docker image created: %s" % k
)
760 def _pull_predefined_dockerimages(self
):
762 If the package contains URLs to pre-build Docker images, we download them with this method.
765 for url
in self
.remote_docker_image_urls
.itervalues():
766 if not FORCE_PULL
: # only pull if not present (speedup for development)
767 if len(dc
.images
.list(name
=url
)) > 0:
768 LOG
.debug("Image %r present. Skipping pull." % url
)
770 LOG
.info("Pulling image: %r" % url
)
771 # this seems to fail with latest docker api version 2.0.2
772 # dc.images.pull(url,
773 # insecure_registry=True)
774 #using docker cli instead
784 def _check_docker_image_exists(self
, image_name
):
786 Query the docker service and check if the given image exists
787 :param image_name: name of the docker image
790 return len(DockerClient().images
.list(name
=image_name
)) > 0
792 def _calculate_placement(self
, algorithm
):
794 Do placement by adding the a field "dc" to
795 each VNFD that points to one of our
796 data center objects known to the gatekeeper.
798 assert(len(self
.vnfds
) > 0)
799 assert(len(GK
.dcs
) > 0)
800 # instantiate algorithm an place
802 p
.place(self
.nsd
, self
.vnfds
, self
.saps
, GK
.dcs
)
803 LOG
.info("Using placement algorithm: %r" % p
.__class
__.__name
__)
804 # lets print the placement result
805 for name
, vnfd
in self
.vnfds
.iteritems():
806 LOG
.info("Placed VNF %r on DC %r" % (name
, str(vnfd
.get("dc"))))
807 for sap
in self
.saps
:
808 sap_dict
= self
.saps
[sap
]
809 LOG
.info("Placed SAP %r on DC %r" % (sap
, str(sap_dict
.get("dc"))))
812 def _calculate_cpu_cfs_values(self
, cpu_time_percentage
):
814 Calculate cpu period and quota for CFS
815 :param cpu_time_percentage: percentage of overall CPU to be used
816 :return: cpu_period, cpu_quota
818 if cpu_time_percentage
is None:
820 if cpu_time_percentage
< 0:
822 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
823 # Attention minimum cpu_quota is 1ms (micro)
824 cpu_period
= 1000000 # lets consider a fixed period of 1000000 microseconds for now
825 LOG
.debug("cpu_period is %r, cpu_percentage is %r" % (cpu_period
, cpu_time_percentage
))
826 cpu_quota
= cpu_period
* cpu_time_percentage
# calculate the fraction of cpu time for this container
827 # ATTENTION >= 1000 to avoid a invalid argument system error ... no idea why
829 LOG
.debug("cpu_quota before correcting: %r" % cpu_quota
)
831 LOG
.warning("Increased CPU quota to avoid system error.")
832 LOG
.debug("Calculated: cpu_period=%f / cpu_quota=%f" % (cpu_period
, cpu_quota
))
833 return int(cpu_period
), int(cpu_quota
)
835 def check_ext_saps(self
, intf_list
):
836 # check if the list of interfacs contains an externl SAP
837 saps_ext
= [self
.saps
[sap
]['name'] for sap
in self
.saps
if self
.saps
[sap
]["type"] == "external"]
838 for intf_name
in intf_list
:
839 vnf_id
, vnf_interface
, vnf_sap_docker_name
= parse_interface(intf_name
)
840 if vnf_sap_docker_name
in saps_ext
:
841 return vnf_sap_docker_name
843 def check_mgmt_interface(self
, intf_list
):
844 SAPs_mgmt
= [p
.get('id') for p
in self
.nsd
["connection_points"] if 'management' in p
.get('type')]
845 for intf_name
in intf_list
:
846 if intf_name
in SAPs_mgmt
:
850 Some (simple) placement algorithms
854 class FirstDcPlacement(object):
856 Placement: Always use one and the same data center from the GK.dcs dict.
858 def place(self
, nsd
, vnfds
, saps
, dcs
):
859 for name
, vnfd
in vnfds
.iteritems():
860 vnfd
["dc"] = list(dcs
.itervalues())[0]
863 class RoundRobinDcPlacement(object):
865 Placement: Distribute VNFs across all available DCs in a round robin fashion.
867 def place(self
, nsd
, vnfds
, saps
, dcs
):
869 dcs_list
= list(dcs
.itervalues())
870 for name
, vnfd
in vnfds
.iteritems():
871 vnfd
["dc"] = dcs_list
[c
% len(dcs_list
)]
872 c
+= 1 # inc. c to use next DC
874 class RoundRobinDcPlacementWithSAPs(object):
876 Placement: Distribute VNFs across all available DCs in a round robin fashion,
877 every SAP is instantiated on the same DC as the connected VNF.
879 def place(self
, nsd
, vnfds
, saps
, dcs
):
883 dcs_list
= list(dcs
.itervalues())
884 for name
, vnfd
in vnfds
.iteritems():
885 vnfd
["dc"] = dcs_list
[c
% len(dcs_list
)]
886 c
+= 1 # inc. c to use next DC
889 vlinks
= nsd
.get("virtual_links", [])
890 eline_fwd_links
= [l
for l
in vlinks
if (l
["connectivity_type"] == "E-Line")]
891 elan_fwd_links
= [l
for l
in vlinks
if (l
["connectivity_type"] == "E-LAN")]
893 # vnf_id2vnf_name = defaultdict(lambda: "NotExistingNode",
894 # reduce(lambda x, y: dict(x, **y),
895 # map(lambda d: {d["vnf_id"]: d["vnf_name"]},
896 # nsd["network_functions"])))
898 # SAPs on E-Line links are placed on the same DC as the VNF on the E-Line
899 for link
in eline_fwd_links
:
900 src_id
, src_if_name
, src_sap_id
= parse_interface(link
["connection_points_reference"][0])
901 dst_id
, dst_if_name
, dst_sap_id
= parse_interface(link
["connection_points_reference"][1])
903 # check if there is a SAP in the link
904 if src_sap_id
in saps
:
905 #dst_vnf_name = vnf_id2vnf_name[dst_id]
906 # get dc where connected vnf is mapped to
907 dc
= vnfds
[dst_id
]['dc']
908 saps
[src_sap_id
]['dc'] = dc
910 if dst_sap_id
in saps
:
911 #src_vnf_name = vnf_id2vnf_name[src_id]
912 # get dc where connected vnf is mapped to
913 dc
= vnfds
[src_id
]['dc']
914 saps
[dst_sap_id
]['dc'] = dc
916 # SAPs on E-LANs are placed on a random DC
917 dcs_list
= list(dcs
.itervalues())
918 dc_len
= len(dcs_list
)
919 for link
in elan_fwd_links
:
920 for intf
in link
["connection_points_reference"]:
921 # find SAP interfaces
922 intf_id
, intf_name
, intf_sap_id
= parse_interface(intf
)
923 if intf_sap_id
in saps
:
924 dc
= dcs_list
[randint(0, dc_len
-1)]
925 saps
[intf_sap_id
]['dc'] = dc
930 Resource definitions and API endpoints
934 class Packages(fr
.Resource
):
938 Upload a *.son service package to the dummy gatekeeper.
940 We expect request with a *.son file and store it in UPLOAD_FOLDER
945 LOG
.info("POST /packages called")
946 # lets search for the package in the request
947 is_file_object
= False # make API more robust: file can be in data or in files field
948 if "package" in request
.files
:
949 son_file
= request
.files
["package"]
950 is_file_object
= True
951 elif len(request
.data
) > 0:
952 son_file
= request
.data
954 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed. file not found."}, 500
955 # generate a uuid to reference this package
956 service_uuid
= str(uuid
.uuid4())
957 file_hash
= hashlib
.sha1(str(son_file
)).hexdigest()
958 # ensure that upload folder exists
959 ensure_dir(UPLOAD_FOLDER
)
960 upload_path
= os
.path
.join(UPLOAD_FOLDER
, "%s.son" % service_uuid
)
961 # store *.son file to disk
963 son_file
.save(upload_path
)
965 with
open(upload_path
, 'wb') as f
:
967 size
= os
.path
.getsize(upload_path
)
969 # first stop and delete any other running services
971 service_list
= copy
.copy(GK
.services
)
972 for service_uuid
in service_list
:
973 instances_list
= copy
.copy(GK
.services
[service_uuid
].instances
)
974 for instance_uuid
in instances_list
:
975 # valid service and instance UUID, stop service
976 GK
.services
.get(service_uuid
).stop_service(instance_uuid
)
977 LOG
.info("service instance with uuid %r stopped." % instance_uuid
)
979 # create a service object and register it
980 s
= Service(service_uuid
, file_hash
, upload_path
)
981 GK
.register_service_package(service_uuid
, s
)
983 # automatically deploy the service
985 # ok, we have a service uuid, lets start the service
987 service_instance_uuid
= GK
.services
.get(service_uuid
).start_service()
989 # generate the JSON result
990 return {"service_uuid": service_uuid
, "size": size
, "sha1": file_hash
, "error": None}, 201
991 except Exception as ex
:
992 LOG
.exception("Service package upload failed:")
993 return {"service_uuid": None, "size": 0, "sha1": None, "error": "upload failed"}, 500
997 Return a list of UUID's of uploaded service packages.
1000 LOG
.info("GET /packages")
1001 return {"service_uuid_list": list(GK
.services
.iterkeys())}
1004 class Instantiations(fr
.Resource
):
1008 Instantiate a service specified by its UUID.
1009 Will return a new UUID to identify the running service instance.
1012 LOG
.info("POST /instantiations (or /requests) called")
1013 # try to extract the service uuid from the request
1014 json_data
= request
.get_json(force
=True)
1015 service_uuid
= json_data
.get("service_uuid")
1017 # lets be a bit fuzzy here to make testing easier
1018 if (service_uuid
is None or service_uuid
=="latest") and len(GK
.services
) > 0:
1019 # if we don't get a service uuid, we simple start the first service in the list
1020 service_uuid
= list(GK
.services
.iterkeys())[0]
1021 if service_uuid
in GK
.services
:
1022 # ok, we have a service uuid, lets start the service
1023 service_instance_uuid
= GK
.services
.get(service_uuid
).start_service()
1024 return {"service_instance_uuid": service_instance_uuid
}, 201
1025 return "Service not found", 404
1029 Returns a list of UUIDs containing all running services.
1030 :return: dict / list
1032 LOG
.info("GET /instantiations")
1033 return {"service_instantiations_list": [
1034 list(s
.instances
.iterkeys()) for s
in GK
.services
.itervalues()]}
1038 Stops a running service specified by its service and instance UUID.
1040 # try to extract the service and instance UUID from the request
1041 json_data
= request
.get_json(force
=True)
1042 service_uuid
= json_data
.get("service_uuid")
1043 instance_uuid
= json_data
.get("service_instance_uuid")
1046 if service_uuid
is None and len(GK
.services
) > 0:
1047 #if we don't get a service uuid, we simply stop the last service in the list
1048 service_uuid
= list(GK
.services
.iterkeys())[0]
1049 if instance_uuid
is None and len(GK
.services
[service_uuid
].instances
) > 0:
1050 instance_uuid
= list(GK
.services
[service_uuid
].instances
.iterkeys())[0]
1052 if service_uuid
in GK
.services
and instance_uuid
in GK
.services
[service_uuid
].instances
:
1053 # valid service and instance UUID, stop service
1054 GK
.services
.get(service_uuid
).stop_service(instance_uuid
)
1055 return "service instance with uuid %r stopped." % instance_uuid
,200
1056 return "Service not found", 404
1058 class Exit(fr
.Resource
):
1062 Stop the running Containernet instance regardless of data transmitted
1064 list(GK
.dcs
.values())[0].net
.stop()
1067 def initialize_GK():
1073 # create a single, global GK object
1077 app
= Flask(__name__
)
1078 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
1081 api
.add_resource(Packages
, '/packages', '/api/v2/packages')
1082 api
.add_resource(Instantiations
, '/instantiations', '/api/v2/instantiations', '/api/v2/requests')
1083 api
.add_resource(Exit
, '/emulator/exit')
1087 def start_rest_api(host
, port
, datacenters
=dict()):
1088 GK
.dcs
= datacenters
1089 GK
.net
= get_dc_network()
1090 # start the Flask server (not the best performance but ok for our use case)
1094 use_reloader
=False # this is needed to run Flask in a non-main thread
1098 def ensure_dir(name
):
1099 if not os
.path
.exists(name
):
1103 def load_yaml(path
):
1104 with
open(path
, "r") as f
:
1107 except yaml
.YAMLError
as exc
:
1108 LOG
.exception("YAML parse error")
1113 def make_relative_path(path
):
1114 if path
.startswith("file://"):
1115 path
= path
.replace("file://", "", 1)
1116 if path
.startswith("/"):
1117 path
= path
.replace("/", "", 1)
1121 def get_dc_network():
1123 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
1124 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
1127 assert (len(GK
.dcs
) > 0)
1128 return GK
.dcs
.values()[0].net
1131 def parse_interface(interface_name
):
1133 convert the interface name in the nsd to the according vnf_id, vnf_interface names
1134 :param interface_name:
1138 if ':' in interface_name
:
1139 vnf_id
, vnf_interface
= interface_name
.split(':')
1140 vnf_sap_docker_name
= interface_name
.replace(':', '_')
1142 vnf_id
= interface_name
1143 vnf_interface
= interface_name
1144 vnf_sap_docker_name
= interface_name
1146 return vnf_id
, vnf_interface
, vnf_sap_docker_name
1148 def reset_subnets():
1149 # private subnet definitions for the generated interfaces
1152 SAP_SUBNETS
= generate_subnets('10.10', 0, subnet_size
=50, mask
=30)
1155 ELAN_SUBNETS
= generate_subnets('10.20', 0, subnet_size
=50, mask
=24)
1157 global ELINE_SUBNETS
1158 ELINE_SUBNETS
= generate_subnets('10.30', 0, subnet_size
=50, mask
=30)
1160 if __name__
== '__main__':
1162 Lets allow to run the API in standalone mode.
1164 GK_STANDALONE_MODE
= True
1165 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
1166 start_rest_api("0.0.0.0", 8000)