1 # Copyright (c) 2018 SONATA-NFV, 5GTANGO and Paderborn University
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
8 # http://www.apache.org/licenses/LICENSE-2.0
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
16 # Neither the name of the SONATA-NFV, 5GTANGO, Paderborn University
17 # nor the names of its contributors may be used to endorse or promote
18 # products derived from this software without specific prior written
21 # This work has been performed in the framework of the SONATA project,
22 # funded by the European Commission under Grant number 671517 through
23 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
24 # acknowledge the contributions of their colleagues of the SONATA
25 # partner consortium (www.sonata-nfv.eu).
27 # This work has also been performed in the framework of the 5GTANGO project,
28 # funded by the European Commission under Grant number 761493 through
29 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
30 # acknowledge the contributions of their colleagues of the 5GTANGO
31 # partner consortium (www.5gtango.eu).
39 from docker
import DockerClient
40 from flask
import Flask
, request
41 import flask_restful
as fr
42 from collections
import defaultdict
44 from subprocess
import Popen
45 from random
import randint
49 from functools
import reduce
52 LOG
= logging
.getLogger("5gtango.llcm")
53 LOG
.setLevel(logging
.INFO
)
56 GK_STORAGE
= "/tmp/vim-emu-tango-llcm/"
57 UPLOAD_FOLDER
= os
.path
.join(GK_STORAGE
, "uploads/")
58 CATALOG_FOLDER
= os
.path
.join(GK_STORAGE
, "catalog/")
60 # Enable Dockerfile build functionality
61 BUILD_DOCKERFILE
= False
63 # flag to indicate that we run without the emulator (only the bare API for
64 # integration testing)
65 GK_STANDALONE_MODE
= False
67 # should a new version of an image be pulled even if its available
70 # Automatically deploy SAPs (endpoints) of the service as new containers
71 # Attention: This is not a configuration switch but a global variable!
72 # Don't change its default value.
75 # flag to indicate if we use bidirectional forwarding rules in the
76 # automatic chaining process
77 BIDIRECTIONAL_CHAIN
= False
79 # override the management interfaces in the descriptors with default
80 # docker0 interfaces in the containers
81 USE_DOCKER_MGMT
= False
83 # automatically deploy uploaded packages (no need to execute son-access
84 # deploy --latest separately)
87 # and also automatically terminate any other running services
91 def generate_subnets(prefix
, base
, subnet_size
=50, mask
=24):
92 # Generate a list of ipaddress in subnets
94 for net
in range(base
, base
+ subnet_size
):
95 subnet
= "{0}.{1}.0/{2}".format(prefix
, net
, mask
)
96 r
.append(ipaddress
.ip_network(unicode(subnet
)))
100 # private subnet definitions for the generated interfaces
102 SAP_SUBNETS
= generate_subnets('10.10', 0, subnet_size
=50, mask
=30)
104 ELAN_SUBNETS
= generate_subnets('10.20', 0, subnet_size
=50, mask
=24)
106 ELINE_SUBNETS
= generate_subnets('10.30', 0, subnet_size
=50, mask
=30)
108 # path to the VNFD for the SAP VNF that is deployed as internal SAP point
111 # Time in seconds to wait for vnf stop scripts to execute fully
112 VNF_STOP_WAIT_TIME
= 5
115 class OnBoardingException(BaseException
):
119 class Gatekeeper(object):
122 self
.services
= dict()
125 # used to generate short names for VNFs (Mininet limitation)
127 LOG
.info("Initialized 5GTANGO LLCM module.")
129 def register_service_package(self
, service_uuid
, service
):
131 register new service package
133 :param service object
135 self
.services
[service_uuid
] = service
136 # lets perform all steps needed to onboard the service
139 def get_next_vnf_name(self
):
140 self
.vnf_counter
+= 1
141 return "vnf%d" % self
.vnf_counter
144 class Service(object):
146 This class represents a NS uploaded as a *.son package to the
148 Can have multiple running instances of this service.
155 self
.uuid
= service_uuid
156 self
.package_file_hash
= package_file_hash
157 self
.package_file_path
= package_file_path
158 self
.package_content_path
= os
.path
.join(
159 CATALOG_FOLDER
, "services/%s" % self
.uuid
)
164 self
.saps_ext
= list()
165 self
.saps_int
= list()
166 self
.local_docker_files
= dict()
167 self
.remote_docker_image_urls
= dict()
168 self
.instances
= dict()
169 # dict to find the vnf_name for any vnf id
170 self
.vnf_id2vnf_name
= dict()
174 Do all steps to prepare this service to be instantiated
177 # 1. extract the contents of the package and store them in our catalog
178 self
._unpack
_service
_package
()
179 # 2. read in all descriptor files
180 self
._load
_package
_descriptor
()
184 raise OnBoardingException("No NSD found.")
185 if len(self
.vnfds
) < 1:
186 raise OnBoardingException("No VNFDs found.")
189 # 3. prepare container images (e.g. download or build Dockerfile)
191 self
._load
_docker
_files
()
192 self
._build
_images
_from
_dockerfiles
()
194 self
._load
_docker
_urls
()
195 self
._pull
_predefined
_dockerimages
()
196 LOG
.info("On-boarded service: %r" % self
.manifest
.get("name"))
198 def start_service(self
):
200 This methods creates and starts a new service instance.
201 It computes placements, iterates over all VNFDs, and starts
202 each VNFD as a Docker container in the data center selected
203 by the placement algorithm.
206 LOG
.info("Starting service %r" % self
.uuid
)
208 # 1. each service instance gets a new uuid to identify it
209 instance_uuid
= str(uuid
.uuid4())
210 # build a instances dict (a bit like a NSR :))
211 self
.instances
[instance_uuid
] = dict()
212 self
.instances
[instance_uuid
]["vnf_instances"] = list()
214 # 2. compute placement of this service instance (adds DC names to
216 if not GK_STANDALONE_MODE
:
217 # self._calculate_placement(FirstDcPlacement)
218 self
._calculate
_placement
(RoundRobinDcPlacementWithSAPs
)
219 # 3. start all vnfds that we have in the service (except SAPs)
220 for vnf_id
in self
.vnfds
:
221 vnfd
= self
.vnfds
[vnf_id
]
223 if not GK_STANDALONE_MODE
:
224 vnfi
= self
._start
_vnfd
(vnfd
, vnf_id
)
225 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
227 # 4. start all SAPs in the service
228 for sap
in self
.saps
:
229 self
._start
_sap
(self
.saps
[sap
], instance_uuid
)
231 # 5. Deploy E-Line and E_LAN links
232 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
233 # even if "forwarding_graphs" are not used directly.
234 if "virtual_links" in self
.nsd
and "forwarding_graphs" in self
.nsd
:
235 vlinks
= self
.nsd
["virtual_links"]
236 # constituent virtual links are not checked
237 # fwd_links = self.nsd["forwarding_graphs"][0]["constituent_virtual_links"]
238 eline_fwd_links
= [l
for l
in vlinks
if (
239 l
["connectivity_type"] == "E-Line")]
240 elan_fwd_links
= [l
for l
in vlinks
if (
241 l
["connectivity_type"] == "E-LAN")]
243 GK
.net
.deployed_elines
.extend(eline_fwd_links
)
244 GK
.net
.deployed_elans
.extend(elan_fwd_links
)
246 # 5a. deploy E-Line links
247 self
._connect
_elines
(eline_fwd_links
, instance_uuid
)
249 # 5b. deploy E-LAN links
250 self
._connect
_elans
(elan_fwd_links
, instance_uuid
)
252 # 6. run the emulator specific entrypoint scripts in the VNFIs of this
254 self
._trigger
_emulator
_start
_scripts
_in
_vnfis
(
255 self
.instances
[instance_uuid
]["vnf_instances"])
257 LOG
.info("Service started. Instance id: %r" % instance_uuid
)
260 def stop_service(self
, instance_uuid
):
262 This method stops a running service instance.
263 It iterates over all VNF instances, stopping them each
264 and removing them from their data center.
266 :param instance_uuid: the uuid of the service instance to be stopped
268 LOG
.info("Stopping service %r" % self
.uuid
)
269 # get relevant information
270 # instance_uuid = str(self.uuid.uuid4())
271 vnf_instances
= self
.instances
[instance_uuid
]["vnf_instances"]
273 # trigger stop skripts in vnf instances and wait a few seconds for
275 self
._trigger
_emulator
_stop
_scripts
_in
_vnfis
(vnf_instances
)
276 time
.sleep(VNF_STOP_WAIT_TIME
)
278 for v
in vnf_instances
:
281 for sap_name
in self
.saps_ext
:
282 ext_sap
= self
.saps
[sap_name
]
283 target_dc
= ext_sap
.get("dc")
284 target_dc
.removeExternalSAP(sap_name
)
285 LOG
.info("Stopping the SAP instance: %r in DC %r" %
286 (sap_name
, target_dc
))
288 if not GK_STANDALONE_MODE
:
290 # self._remove_placement(RoundRobinPlacement)
292 # last step: remove the instance from the list of all instances
293 del self
.instances
[instance_uuid
]
295 def _start_vnfd(self
, vnfd
, vnf_id
, **kwargs
):
297 Start a single VNFD of this service
298 :param vnfd: vnfd descriptor dict
299 :param vnf_id: unique id of this vnf in the nsd
302 # the vnf_name refers to the container image to be deployed
303 vnf_name
= vnfd
.get("name")
305 # iterate over all deployment units within each VNFDs
306 for u
in vnfd
.get("virtual_deployment_units"):
307 # 1. get the name of the docker image to start and the assigned DC
308 if vnf_id
not in self
.remote_docker_image_urls
:
309 raise Exception("No image name for %r found. Abort." % vnf_id
)
310 docker_name
= self
.remote_docker_image_urls
.get(vnf_id
)
311 target_dc
= vnfd
.get("dc")
312 # 2. perform some checks to ensure we can start the container
313 assert(docker_name
is not None)
314 assert(target_dc
is not None)
315 if not self
._check
_docker
_image
_exists
(docker_name
):
317 "Docker image %r not found. Abort." % docker_name
)
319 # 3. get the resource limits
320 res_req
= u
.get("resource_requirements")
321 cpu_list
= res_req
.get("cpu").get("cores")
323 cpu_list
= res_req
.get("cpu").get("vcpus")
326 cpu_bw
= res_req
.get("cpu").get("cpu_bw")
329 mem_num
= str(res_req
.get("memory").get("size"))
330 if len(mem_num
) == 0:
332 mem_unit
= str(res_req
.get("memory").get("size_unit"))
333 if str(mem_unit
) == 0:
335 mem_limit
= float(mem_num
)
337 mem_limit
= mem_limit
* 1024 * 1024 * 1024
338 elif mem_unit
== "MB":
339 mem_limit
= mem_limit
* 1024 * 1024
340 elif mem_unit
== "KB":
341 mem_limit
= mem_limit
* 1024
342 mem_lim
= int(mem_limit
)
343 cpu_period
, cpu_quota
= self
._calculate
_cpu
_cfs
_values
(
346 # check if we need to deploy the management ports (defined as
347 # type:management both on in the vnfd and nsd)
348 intfs
= vnfd
.get("connection_points", [])
349 # do some re-naming of fields to be compatible to containernet
352 i
["ip"] = i
.get("address")
356 mgmt_intfs
= [vnf_id
+ ':' + intf
['id']
357 for intf
in intfs
if intf
.get('type') == 'management']
358 # check if any of these management interfaces are used in a
359 # management-type network in the nsd
360 for nsd_intf_name
in mgmt_intfs
:
361 vlinks
= [l
["connection_points_reference"]
362 for l
in self
.nsd
.get("virtual_links", [])]
364 if nsd_intf_name
in link
and self
.check_mgmt_interface(
366 # this is indeed a management interface and can be
368 vnf_id
, vnf_interface
, vnf_sap_docker_name
= parse_interface(
371 intf
for intf
in intfs
if intf
.get('id') == vnf_interface
]
372 intfs
.remove(found_interfaces
[0])
373 mgmt_intf_names
.append(vnf_interface
)
375 # 4. generate the volume paths for the docker container
377 # a volume to extract log files
378 docker_log_path
= "/tmp/results/%s/%s" % (self
.uuid
, vnf_id
)
379 LOG
.debug("LOG path for vnf %s is %s." % (vnf_id
, docker_log_path
))
380 if not os
.path
.exists(docker_log_path
):
381 LOG
.debug("Creating folder %s" % docker_log_path
)
382 os
.makedirs(docker_log_path
)
384 volumes
.append(docker_log_path
+ ":/mnt/share/")
386 # 5. do the dc.startCompute(name="foobar") call to run the container
387 # TODO consider flavors, and other annotations
388 # TODO: get all vnf id's from the nsd for this vnfd and use those as dockername
389 # use the vnf_id in the nsd as docker name
390 # so deployed containers can be easily mapped back to the nsd
391 LOG
.info("Starting %r as %r in DC %r" %
392 (vnf_name
, vnf_id
, vnfd
.get("dc")))
393 LOG
.debug("Interfaces for %r: %r" % (vnf_id
, intfs
))
394 vnfi
= target_dc
.startCompute(
400 cpu_period
=cpu_period
,
404 type=kwargs
.get('type', 'docker'))
406 # rename the docker0 interfaces (eth0) to the management port name
407 # defined in the VNFD
409 for intf_name
in mgmt_intf_names
:
410 self
._vnf
_reconfigure
_network
(
411 vnfi
, 'eth0', new_name
=intf_name
)
415 def _stop_vnfi(self
, vnfi
):
419 :param vnfi: vnf instance to be stopped
421 # Find the correct datacenter
422 status
= vnfi
.getStatus()
426 LOG
.info("Stopping the vnf instance contained in %r in DC %r" %
427 (status
["name"], dc
))
428 dc
.stopCompute(status
["name"])
430 def _get_vnf_instance(self
, instance_uuid
, vnf_id
):
432 Returns the Docker object for the given VNF id (or Docker name).
433 :param instance_uuid: UUID of the service instance to search in.
434 :param name: VNF name or Docker name. We are fuzzy here.
438 for vnfi
in self
.instances
[instance_uuid
]["vnf_instances"]:
441 LOG
.warning("No container with name: {0} found.".format(dn
))
445 def _vnf_reconfigure_network(vnfi
, if_name
, net_str
=None, new_name
=None):
447 Reconfigure the network configuration of a specific interface
448 of a running container.
449 :param vnfi: container instance
450 :param if_name: interface name
451 :param net_str: network configuration string, e.g., 1.2.3.4/24
455 # assign new ip address
456 if net_str
is not None:
457 intf
= vnfi
.intf(intf
=if_name
)
460 LOG
.debug("Reconfigured network of %s:%s to %r" %
461 (vnfi
.name
, if_name
, net_str
))
463 LOG
.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
466 if new_name
is not None:
467 vnfi
.cmd('ip link set', if_name
, 'down')
468 vnfi
.cmd('ip link set', if_name
, 'name', new_name
)
469 vnfi
.cmd('ip link set', new_name
, 'up')
470 LOG
.debug("Reconfigured interface name of %s:%s to %s" %
471 (vnfi
.name
, if_name
, new_name
))
473 def _trigger_emulator_start_scripts_in_vnfis(self
, vnfi_list
):
474 for vnfi
in vnfi_list
:
475 config
= vnfi
.dcinfo
.get("Config", dict())
476 env
= config
.get("Env", list())
478 var
, cmd
= map(str.strip
, map(str, env_var
.split('=', 1)))
479 LOG
.debug("%r = %r" % (var
, cmd
))
480 if var
== "SON_EMU_CMD":
481 LOG
.info("Executing entry point script in %r: %r" %
483 # execute command in new thread to ensure that GK is not
485 t
= threading
.Thread(target
=vnfi
.cmdPrint
, args
=(cmd
,))
489 def _trigger_emulator_stop_scripts_in_vnfis(self
, vnfi_list
):
490 for vnfi
in vnfi_list
:
491 config
= vnfi
.dcinfo
.get("Config", dict())
492 env
= config
.get("Env", list())
494 var
, cmd
= map(str.strip
, map(str, env_var
.split('=', 1)))
495 if var
== "SON_EMU_CMD_STOP":
496 LOG
.info("Executing stop script in %r: %r" %
498 # execute command in new thread to ensure that GK is not
500 t
= threading
.Thread(target
=vnfi
.cmdPrint
, args
=(cmd
,))
504 def _unpack_service_package(self
):
506 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
508 LOG
.info("Unzipping: %r" % self
.package_file_path
)
509 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
510 z
.extractall(self
.package_content_path
)
512 def _load_package_descriptor(self
):
514 Load the main package descriptor YAML and keep it as dict.
517 self
.manifest
= load_yaml(
519 self
.package_content_path
, "TOSCA-Metadata/NAPD.yaml"))
523 Load the entry NSD YAML and keep it as dict.
526 if "package_content" in self
.manifest
:
528 for f
in self
.manifest
.get("package_content"):
529 if f
.get("content-type") == "application/vnd.5gtango.nsd":
530 nsd_path
= os
.path
.join(
531 self
.package_content_path
,
532 make_relative_path(f
.get("source")))
533 break # always use the first NSD for now
535 raise OnBoardingException("No NSD with type 'application/vnd.5gtango.nsd' found.")
536 self
.nsd
= load_yaml(nsd_path
)
537 GK
.net
.deployed_nsds
.append(self
.nsd
) # TODO this seems strange (remove?)
538 # create dict to find the vnf_name for any vnf id
539 self
.vnf_id2vnf_name
= defaultdict(lambda: "NotExistingNode",
540 reduce(lambda x
, y
: dict(x
, **y
),
541 map(lambda d
: {d
["vnf_id"]: d
["vnf_name"]},
542 self
.nsd
["network_functions"])))
543 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("name"))
545 raise OnBoardingException(
546 "No 'package_content' section in package manifest:\n{}"
547 .format(self
.manifest
))
549 def _load_vnfd(self
):
551 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
555 # first make a list of all the vnfds in the package
557 if "package_content" in self
.manifest
:
558 for pc
in self
.manifest
.get("package_content"):
560 "content-type") == "application/vnd.5gtango.vnfd":
561 vnfd_path
= os
.path
.join(
562 self
.package_content_path
,
563 make_relative_path(pc
.get("source")))
564 vnfd
= load_yaml(vnfd_path
)
565 vnfd_set
[vnfd
.get("name")] = vnfd
566 if len(vnfd_set
) < 1:
567 raise OnBoardingException("No VNFDs found.")
568 # then link each vnf_id in the nsd to its vnfd
569 for vnf_id
in self
.vnf_id2vnf_name
:
570 vnf_name
= self
.vnf_id2vnf_name
[vnf_id
]
571 self
.vnfds
[vnf_id
] = vnfd_set
[vnf_name
]
572 LOG
.debug("Loaded VNFD: {0} id: {1}".format(vnf_name
, vnf_id
))
574 def _load_saps(self
):
575 # create list of all SAPs
576 # check if we need to deploy management ports
578 SAPs
= [p
for p
in self
.nsd
["connection_points"]
579 if 'management' not in p
.get('type')]
581 SAPs
= [p
for p
in self
.nsd
["connection_points"]]
584 # endpoint needed in this service
585 sap_id
, sap_interface
, sap_docker_name
= parse_interface(sap
['id'])
586 # make sure SAP has type set (default internal)
587 sap
["type"] = sap
.get("type", 'internal')
589 # Each Service Access Point (connection_point) in the nsd is an IP
590 # address on the host
591 if sap
["type"] == "external":
592 # add to vnfds to calculate placement later on
593 sap_net
= SAP_SUBNETS
.pop(0)
594 self
.saps
[sap_docker_name
] = {
595 "name": sap_docker_name
, "type": "external", "net": sap_net
}
596 # add SAP vnf to list in the NSD so it is deployed later on
597 # each SAP gets a unique VNFD and vnf_id in the NSD and custom
598 # type (only defined in the dummygatekeeper)
599 self
.nsd
["network_functions"].append(
600 {"vnf_id": sap_docker_name
, "vnf_name": sap_docker_name
, "vnf_type": "sap_ext"})
602 # Each Service Access Point (connection_point) in the nsd is
603 # getting its own container (default)
604 elif sap
["type"] == "internal" or sap
["type"] == "management":
605 # add SAP to self.vnfds
607 sapfile
= pkg_resources
.resource_filename(
608 __name__
, "sap_vnfd.yml")
611 sap_vnfd
= load_yaml(sapfile
)
612 sap_vnfd
["connection_points"][0]["id"] = sap_interface
613 sap_vnfd
["name"] = sap_docker_name
614 sap_vnfd
["type"] = "internal"
615 # add to vnfds to calculate placement later on and deploy
616 self
.saps
[sap_docker_name
] = sap_vnfd
617 # add SAP vnf to list in the NSD so it is deployed later on
618 # each SAP get a unique VNFD and vnf_id in the NSD
619 self
.nsd
["network_functions"].append(
620 {"vnf_id": sap_docker_name
, "vnf_name": sap_docker_name
, "vnf_type": "sap_int"})
622 LOG
.debug("Loaded SAP: name: {0}, type: {1}".format(
623 sap_docker_name
, sap
['type']))
626 self
.saps_ext
= [self
.saps
[sap
]['name']
627 for sap
in self
.saps
if self
.saps
[sap
]["type"] == "external"]
628 self
.saps_int
= [self
.saps
[sap
]['name']
629 for sap
in self
.saps
if self
.saps
[sap
]["type"] == "internal"]
631 def _start_sap(self
, sap
, instance_uuid
):
635 LOG
.info('start SAP: {0} ,type: {1}'.format(sap
['name'], sap
['type']))
636 if sap
["type"] == "internal":
638 if not GK_STANDALONE_MODE
:
639 vnfi
= self
._start
_vnfd
(sap
, sap
['name'], type='sap_int')
640 self
.instances
[instance_uuid
]["vnf_instances"].append(vnfi
)
642 elif sap
["type"] == "external":
643 target_dc
= sap
.get("dc")
644 # add interface to dc switch
645 target_dc
.attachExternalSAP(sap
['name'], sap
['net'])
647 def _connect_elines(self
, eline_fwd_links
, instance_uuid
):
649 Connect all E-LINE links in the NSD
650 :param eline_fwd_links: list of E-LINE links in the NSD
651 :param: instance_uuid of the service
654 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
655 # eg. different services get a unique cookie for their flowrules
657 for link
in eline_fwd_links
:
658 # check if we need to deploy this link when its a management link:
660 if self
.check_mgmt_interface(
661 link
["connection_points_reference"]):
664 src_id
, src_if_name
, src_sap_id
= parse_interface(
665 link
["connection_points_reference"][0])
666 dst_id
, dst_if_name
, dst_sap_id
= parse_interface(
667 link
["connection_points_reference"][1])
670 # check if there is a SAP in the link and chain everything together
671 if src_sap_id
in self
.saps
and dst_sap_id
in self
.saps
:
673 '2 SAPs cannot be chained together : {0} - {1}'.format(src_sap_id
, dst_sap_id
))
676 elif src_sap_id
in self
.saps_ext
:
678 # set intf name to None so the chaining function will choose
681 dst_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, dst_id
)
682 if dst_vnfi
is not None:
683 # choose first ip address in sap subnet
684 sap_net
= self
.saps
[src_sap_id
]['net']
685 sap_ip
= "{0}/{1}".format(str(sap_net
[2]),
687 self
._vnf
_reconfigure
_network
(
688 dst_vnfi
, dst_if_name
, sap_ip
)
691 elif dst_sap_id
in self
.saps_ext
:
693 # set intf name to None so the chaining function will choose
696 src_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, src_id
)
697 if src_vnfi
is not None:
698 sap_net
= self
.saps
[dst_sap_id
]['net']
699 sap_ip
= "{0}/{1}".format(str(sap_net
[2]),
701 self
._vnf
_reconfigure
_network
(
702 src_vnfi
, src_if_name
, sap_ip
)
705 # Link between 2 VNFs
707 # make sure we use the correct sap vnf name
708 if src_sap_id
in self
.saps_int
:
710 if dst_sap_id
in self
.saps_int
:
712 # re-configure the VNFs IP assignment and ensure that a new
713 # subnet is used for each E-Link
714 src_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, src_id
)
715 dst_vnfi
= self
._get
_vnf
_instance
(instance_uuid
, dst_id
)
716 if src_vnfi
is not None and dst_vnfi
is not None:
717 eline_net
= ELINE_SUBNETS
.pop(0)
718 ip1
= "{0}/{1}".format(str(eline_net
[1]),
720 ip2
= "{0}/{1}".format(str(eline_net
[2]),
722 self
._vnf
_reconfigure
_network
(src_vnfi
, src_if_name
, ip1
)
723 self
._vnf
_reconfigure
_network
(dst_vnfi
, dst_if_name
, ip2
)
730 vnf_src_interface
=src_if_name
, vnf_dst_interface
=dst_if_name
,
731 bidirectional
=BIDIRECTIONAL_CHAIN
, cmd
="add-flow", cookie
=cookie
, priority
=10)
733 "Setting up E-Line link. (%s:%s) -> (%s:%s)" % (
734 src_id
, src_if_name
, dst_id
, dst_if_name
))
736 def _connect_elans(self
, elan_fwd_links
, instance_uuid
):
738 Connect all E-LAN links in the NSD
739 :param elan_fwd_links: list of E-LAN links in the NSD
740 :param: instance_uuid of the service
743 for link
in elan_fwd_links
:
744 # check if we need to deploy this link when its a management link:
746 if self
.check_mgmt_interface(
747 link
["connection_points_reference"]):
751 # check if an external SAP is in the E-LAN (then a subnet is
753 intfs_elan
= [intf
for intf
in link
["connection_points_reference"]]
754 lan_sap
= self
.check_ext_saps(intfs_elan
)
756 lan_net
= self
.saps
[lan_sap
]['net']
757 lan_hosts
= list(lan_net
.hosts())
759 lan_net
= ELAN_SUBNETS
.pop(0)
760 lan_hosts
= list(lan_net
.hosts())
762 # generate lan ip address for all interfaces except external SAPs
763 for intf
in link
["connection_points_reference"]:
765 # skip external SAPs, they already have an ip
766 vnf_id
, vnf_interface
, vnf_sap_docker_name
= parse_interface(
768 if vnf_sap_docker_name
in self
.saps_ext
:
769 elan_vnf_list
.append(
770 {'name': vnf_sap_docker_name
, 'interface': vnf_interface
})
773 ip_address
= "{0}/{1}".format(str(lan_hosts
.pop(0)),
775 vnf_id
, intf_name
, vnf_sap_id
= parse_interface(intf
)
777 # make sure we use the correct sap vnf name
778 src_docker_name
= vnf_id
779 if vnf_sap_id
in self
.saps_int
:
780 src_docker_name
= vnf_sap_id
784 "Setting up E-LAN interface. (%s:%s) -> %s" % (
785 vnf_id
, intf_name
, ip_address
))
787 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
788 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
789 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is necessary.
790 vnfi
= self
._get
_vnf
_instance
(instance_uuid
, vnf_id
)
792 self
._vnf
_reconfigure
_network
(vnfi
, intf_name
, ip_address
)
793 # add this vnf and interface to the E-LAN for tagging
794 elan_vnf_list
.append(
795 {'name': src_docker_name
, 'interface': intf_name
})
797 # install the VLAN tags for this E-LAN
798 GK
.net
.setLAN(elan_vnf_list
)
800 def _load_docker_files(self
):
802 Get all paths to Dockerfiles from VNFDs and store them in dict.
805 for k
, v
in self
.vnfds
.iteritems():
806 for vu
in v
.get("virtual_deployment_units"):
807 if vu
.get("vm_image_format") == "docker":
808 vm_image
= vu
.get("vm_image")
809 docker_path
= os
.path
.join(
810 self
.package_content_path
,
811 make_relative_path(vm_image
))
812 self
.local_docker_files
[k
] = docker_path
813 LOG
.debug("Found Dockerfile (%r): %r" % (k
, docker_path
))
815 def _load_docker_urls(self
):
817 Get all URLs to pre-build docker images in some repo.
820 # also merge sap dicts, because internal saps also need a docker
822 all_vnfs
= self
.vnfds
.copy()
823 all_vnfs
.update(self
.saps
)
825 for k
, v
in all_vnfs
.iteritems():
826 for vu
in v
.get("virtual_deployment_units", {}):
827 if vu
.get("vm_image_format") == "docker":
828 url
= vu
.get("vm_image")
830 url
= url
.replace("http://", "")
831 self
.remote_docker_image_urls
[k
] = url
832 LOG
.debug("Found Docker image URL (%r): %r" %
833 (k
, self
.remote_docker_image_urls
[k
]))
835 def _build_images_from_dockerfiles(self
):
837 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
839 if GK_STANDALONE_MODE
:
840 return # do not build anything in standalone mode
842 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(
843 self
.local_docker_files
))
844 for k
, v
in self
.local_docker_files
.iteritems():
845 for line
in dc
.build(path
=v
.replace(
846 "Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
847 LOG
.debug("DOCKER BUILD: %s" % line
)
848 LOG
.info("Docker image created: %s" % k
)
850 def _pull_predefined_dockerimages(self
):
852 If the package contains URLs to pre-build Docker images, we download them with this method.
855 for url
in self
.remote_docker_image_urls
.itervalues():
856 # only pull if not present (speedup for development)
858 if len(dc
.images
.list(name
=url
)) > 0:
859 LOG
.debug("Image %r present. Skipping pull." % url
)
861 LOG
.info("Pulling image: %r" % url
)
862 # this seems to fail with latest docker api version 2.0.2
863 # dc.images.pull(url,
864 # insecure_registry=True)
865 # using docker cli instead
872 def _check_docker_image_exists(self
, image_name
):
874 Query the docker service and check if the given image exists
875 :param image_name: name of the docker image
878 return len(DockerClient().images
.list(name
=image_name
)) > 0
880 def _calculate_placement(self
, algorithm
):
882 Do placement by adding the a field "dc" to
883 each VNFD that points to one of our
884 data center objects known to the gatekeeper.
886 assert(len(self
.vnfds
) > 0)
887 assert(len(GK
.dcs
) > 0)
888 # instantiate algorithm an place
890 p
.place(self
.nsd
, self
.vnfds
, self
.saps
, GK
.dcs
)
891 LOG
.info("Using placement algorithm: %r" % p
.__class
__.__name
__)
892 # lets print the placement result
893 for name
, vnfd
in self
.vnfds
.iteritems():
894 LOG
.info("Placed VNF %r on DC %r" % (name
, str(vnfd
.get("dc"))))
895 for sap
in self
.saps
:
896 sap_dict
= self
.saps
[sap
]
897 LOG
.info("Placed SAP %r on DC %r" % (sap
, str(sap_dict
.get("dc"))))
899 def _calculate_cpu_cfs_values(self
, cpu_time_percentage
):
901 Calculate cpu period and quota for CFS
902 :param cpu_time_percentage: percentage of overall CPU to be used
903 :return: cpu_period, cpu_quota
905 if cpu_time_percentage
is None:
907 if cpu_time_percentage
< 0:
909 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
910 # Attention minimum cpu_quota is 1ms (micro)
911 cpu_period
= 1000000 # lets consider a fixed period of 1000000 microseconds for now
912 LOG
.debug("cpu_period is %r, cpu_percentage is %r" %
913 (cpu_period
, cpu_time_percentage
))
914 # calculate the fraction of cpu time for this container
915 cpu_quota
= cpu_period
* cpu_time_percentage
916 # ATTENTION >= 1000 to avoid a invalid argument system error ... no
919 LOG
.debug("cpu_quota before correcting: %r" % cpu_quota
)
921 LOG
.warning("Increased CPU quota to avoid system error.")
922 LOG
.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
923 (cpu_period
, cpu_quota
))
924 return int(cpu_period
), int(cpu_quota
)
926 def check_ext_saps(self
, intf_list
):
927 # check if the list of interfacs contains an external SAP
928 saps_ext
= [self
.saps
[sap
]['name']
929 for sap
in self
.saps
if self
.saps
[sap
]["type"] == "external"]
930 for intf_name
in intf_list
:
931 vnf_id
, vnf_interface
, vnf_sap_docker_name
= parse_interface(
933 if vnf_sap_docker_name
in saps_ext
:
934 return vnf_sap_docker_name
936 def check_mgmt_interface(self
, intf_list
):
937 SAPs_mgmt
= [p
.get('id') for p
in self
.nsd
["connection_points"]
938 if 'management' in p
.get('type')]
939 for intf_name
in intf_list
:
940 if intf_name
in SAPs_mgmt
:
945 Some (simple) placement algorithms
949 class FirstDcPlacement(object):
951 Placement: Always use one and the same data center from the GK.dcs dict.
954 def place(self
, nsd
, vnfds
, saps
, dcs
):
955 for id, vnfd
in vnfds
.iteritems():
956 vnfd
["dc"] = list(dcs
.itervalues())[0]
959 class RoundRobinDcPlacement(object):
961 Placement: Distribute VNFs across all available DCs in a round robin fashion.
964 def place(self
, nsd
, vnfds
, saps
, dcs
):
966 dcs_list
= list(dcs
.itervalues())
967 for id, vnfd
in vnfds
.iteritems():
968 vnfd
["dc"] = dcs_list
[c
% len(dcs_list
)]
969 c
+= 1 # inc. c to use next DC
972 class RoundRobinDcPlacementWithSAPs(object):
974 Placement: Distribute VNFs across all available DCs in a round robin fashion,
975 every SAP is instantiated on the same DC as the connected VNF.
978 def place(self
, nsd
, vnfds
, saps
, dcs
):
982 dcs_list
= list(dcs
.itervalues())
983 for id, vnfd
in vnfds
.iteritems():
984 vnfd
["dc"] = dcs_list
[c
% len(dcs_list
)]
985 c
+= 1 # inc. c to use next DC
988 vlinks
= nsd
.get("virtual_links", [])
989 eline_fwd_links
= [l
for l
in vlinks
if (
990 l
["connectivity_type"] == "E-Line")]
991 elan_fwd_links
= [l
for l
in vlinks
if (
992 l
["connectivity_type"] == "E-LAN")]
994 # SAPs on E-Line links are placed on the same DC as the VNF on the
996 for link
in eline_fwd_links
:
997 src_id
, src_if_name
, src_sap_id
= parse_interface(
998 link
["connection_points_reference"][0])
999 dst_id
, dst_if_name
, dst_sap_id
= parse_interface(
1000 link
["connection_points_reference"][1])
1002 # check if there is a SAP in the link
1003 if src_sap_id
in saps
:
1004 # get dc where connected vnf is mapped to
1005 dc
= vnfds
[dst_id
]['dc']
1006 saps
[src_sap_id
]['dc'] = dc
1008 if dst_sap_id
in saps
:
1009 # get dc where connected vnf is mapped to
1010 dc
= vnfds
[src_id
]['dc']
1011 saps
[dst_sap_id
]['dc'] = dc
1013 # SAPs on E-LANs are placed on a random DC
1014 dcs_list
= list(dcs
.itervalues())
1015 dc_len
= len(dcs_list
)
1016 for link
in elan_fwd_links
:
1017 for intf
in link
["connection_points_reference"]:
1018 # find SAP interfaces
1019 intf_id
, intf_name
, intf_sap_id
= parse_interface(intf
)
1020 if intf_sap_id
in saps
:
1021 dc
= dcs_list
[randint(0, dc_len
- 1)]
1022 saps
[intf_sap_id
]['dc'] = dc
1026 Resource definitions and API endpoints
1030 class Packages(fr
.Resource
):
1034 Upload a *.son service package to the dummy gatekeeper.
1036 We expect request with a *.son file and store it in UPLOAD_FOLDER
1041 LOG
.info("POST /packages called")
1042 # lets search for the package in the request
1043 is_file_object
= False # make API more robust: file can be in data or in files field
1044 if "package" in request
.files
:
1045 son_file
= request
.files
["package"]
1046 is_file_object
= True
1047 elif len(request
.data
) > 0:
1048 son_file
= request
.data
1050 return {"service_uuid": None, "size": 0, "sha1": None,
1051 "error": "upload failed. file not found."}, 500
1052 # generate a uuid to reference this package
1053 service_uuid
= str(uuid
.uuid4())
1054 file_hash
= hashlib
.sha1(str(son_file
)).hexdigest()
1055 # ensure that upload folder exists
1056 ensure_dir(UPLOAD_FOLDER
)
1057 upload_path
= os
.path
.join(UPLOAD_FOLDER
, "%s.tgo" % service_uuid
)
1058 # store *.son file to disk
1060 son_file
.save(upload_path
)
1062 with
open(upload_path
, 'wb') as f
:
1064 size
= os
.path
.getsize(upload_path
)
1066 # first stop and delete any other running services
1068 service_list
= copy
.copy(GK
.services
)
1069 for service_uuid
in service_list
:
1070 instances_list
= copy
.copy(
1071 GK
.services
[service_uuid
].instances
)
1072 for instance_uuid
in instances_list
:
1073 # valid service and instance UUID, stop service
1074 GK
.services
.get(service_uuid
).stop_service(
1076 LOG
.info("service instance with uuid %r stopped." %
1079 # create a service object and register it
1080 s
= Service(service_uuid
, file_hash
, upload_path
)
1081 GK
.register_service_package(service_uuid
, s
)
1083 # automatically deploy the service
1085 # ok, we have a service uuid, lets start the service
1087 GK
.services
.get(service_uuid
).start_service()
1089 # generate the JSON result
1090 return {"service_uuid": service_uuid
, "size": size
,
1091 "sha1": file_hash
, "error": None}, 201
1092 except BaseException
:
1093 LOG
.exception("Service package upload failed:")
1094 return {"service_uuid": None, "size": 0,
1095 "sha1": None, "error": "upload failed"}, 500
1099 Return a list of UUID's of uploaded service packages.
1102 LOG
.info("GET /packages")
1103 return {"service_uuid_list": list(GK
.services
.iterkeys())}
1106 class Instantiations(fr
.Resource
):
1110 Instantiate a service specified by its UUID.
1111 Will return a new UUID to identify the running service instance.
1114 LOG
.info("POST /instantiations (or /requests) called")
1115 # try to extract the service uuid from the request
1116 json_data
= request
.get_json(force
=True)
1117 service_uuid
= json_data
.get("service_uuid")
1119 # lets be a bit fuzzy here to make testing easier
1120 if (service_uuid
is None or service_uuid
==
1121 "latest") and len(GK
.services
) > 0:
1122 # if we don't get a service uuid, we simple start the first service
1124 service_uuid
= list(GK
.services
.iterkeys())[0]
1125 if service_uuid
in GK
.services
:
1126 # ok, we have a service uuid, lets start the service
1127 service_instance_uuid
= GK
.services
.get(
1128 service_uuid
).start_service()
1129 return {"service_instance_uuid": service_instance_uuid
}, 201
1130 return "Service not found", 404
1134 Returns a list of UUIDs containing all running services.
1135 :return: dict / list
1137 LOG
.info("GET /instantiations")
1138 return {"service_instantiations_list": [
1139 list(s
.instances
.iterkeys()) for s
in GK
.services
.itervalues()]}
1143 Stops a running service specified by its service and instance UUID.
1145 # try to extract the service and instance UUID from the request
1146 json_data
= request
.get_json(force
=True)
1147 service_uuid
= json_data
.get("service_uuid")
1148 instance_uuid
= json_data
.get("service_instance_uuid")
1151 if service_uuid
is None and len(GK
.services
) > 0:
1152 # if we don't get a service uuid, we simply stop the last service
1154 service_uuid
= list(GK
.services
.iterkeys())[0]
1155 if instance_uuid
is None and len(
1156 GK
.services
[service_uuid
].instances
) > 0:
1157 instance_uuid
= list(
1158 GK
.services
[service_uuid
].instances
.iterkeys())[0]
1160 if service_uuid
in GK
.services
and instance_uuid
in GK
.services
[service_uuid
].instances
:
1161 # valid service and instance UUID, stop service
1162 GK
.services
.get(service_uuid
).stop_service(instance_uuid
)
1163 return "service instance with uuid %r stopped." % instance_uuid
, 200
1164 return "Service not found", 404
1167 class Exit(fr
.Resource
):
1171 Stop the running Containernet instance regardless of data transmitted
1173 list(GK
.dcs
.values())[0].net
.stop()
1176 def initialize_GK():
1181 # create a single, global GK object
1185 app
= Flask(__name__
)
1186 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
1189 api
.add_resource(Packages
, '/packages', '/api/v2/packages')
1190 api
.add_resource(Instantiations
, '/instantiations',
1191 '/api/v2/instantiations', '/api/v2/requests')
1192 api
.add_resource(Exit
, '/emulator/exit')
1195 def start_rest_api(host
, port
, datacenters
=dict()):
1196 GK
.dcs
= datacenters
1197 GK
.net
= get_dc_network()
1198 # start the Flask server (not the best performance but ok for our use case)
1202 use_reloader
=False # this is needed to run Flask in a non-main thread
1206 def ensure_dir(name
):
1207 if not os
.path
.exists(name
):
1211 def load_yaml(path
):
1212 with
open(path
, "r") as f
:
1215 except yaml
.YAMLError
as exc
:
1216 LOG
.exception("YAML parse error: %r" % str(exc
))
1221 def make_relative_path(path
):
1222 if path
.startswith("file://"):
1223 path
= path
.replace("file://", "", 1)
1224 if path
.startswith("/"):
1225 path
= path
.replace("/", "", 1)
1229 def get_dc_network():
1231 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
1232 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
1235 assert (len(GK
.dcs
) > 0)
1236 return GK
.dcs
.values()[0].net
1239 def parse_interface(interface_name
):
1241 convert the interface name in the nsd to the according vnf_id, vnf_interface names
1242 :param interface_name:
1246 if ':' in interface_name
:
1247 vnf_id
, vnf_interface
= interface_name
.split(':')
1248 vnf_sap_docker_name
= interface_name
.replace(':', '_')
1250 vnf_id
= interface_name
1251 vnf_interface
= interface_name
1252 vnf_sap_docker_name
= interface_name
1254 return vnf_id
, vnf_interface
, vnf_sap_docker_name
1257 def reset_subnets():
1258 # private subnet definitions for the generated interfaces
1261 SAP_SUBNETS
= generate_subnets('10.10', 0, subnet_size
=50, mask
=30)
1264 ELAN_SUBNETS
= generate_subnets('10.20', 0, subnet_size
=50, mask
=24)
1266 global ELINE_SUBNETS
1267 ELINE_SUBNETS
= generate_subnets('10.30', 0, subnet_size
=50, mask
=30)
1270 if __name__
== '__main__':
1272 Lets allow to run the API in standalone mode.
1274 GK_STANDALONE_MODE
= True
1275 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
1276 start_rest_api("0.0.0.0", 8000)