2 # Copyright (c) 2018 SONATA-NFV, 5GTANGO and Paderborn University
5 # Licensed under the Apache License, Version 2.0 (the "License");
6 # you may not use this file except in compliance with the License.
7 # You may obtain a copy of the License at
9 # http://www.apache.org/licenses/LICENSE-2.0
11 # Unless required by applicable law or agreed to in writing, software
12 # distributed under the License is distributed on an "AS IS" BASIS,
13 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 # See the License for the specific language governing permissions and
15 # limitations under the License.
17 # Neither the name of the SONATA-NFV, 5GTANGO, Paderborn University
18 # nor the names of its contributors may be used to endorse or promote
19 # products derived from this software without specific prior written
22 # This work has been performed in the framework of the SONATA project,
23 # funded by the European Commission under Grant number 671517 through
24 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
25 # acknowledge the contributions of their colleagues of the SONATA
26 # partner consortium (www.sonata-nfv.eu).
28 # This work has also been performed in the framework of the 5GTANGO project,
29 # funded by the European Commission under Grant number 761493 through
30 # the Horizon 2020 and 5G-PPP programmes. The authors would like to
31 # acknowledge the contributions of their colleagues of the 5GTANGO
32 # partner consortium (www.5gtango.eu).
41 from docker
import DockerClient
42 from flask
import Flask
, request
43 import flask_restful
as fr
44 from gevent
.pywsgi
import WSGIServer
45 from subprocess
import Popen
51 LOG
= logging
.getLogger("5gtango.llcm")
52 LOG
.setLevel(logging
.INFO
)
55 CORS_HEADER
= {'Access-Control-Allow-Origin': '*',
56 'Access-Control-Allow-Methods': 'GET,OPTIONS'}
59 GK_STORAGE
= "/tmp/vim-emu-tango-llcm/"
60 UPLOAD_FOLDER
= os
.path
.join(GK_STORAGE
, "uploads/")
61 CATALOG_FOLDER
= os
.path
.join(GK_STORAGE
, "catalog/")
63 # Enable Dockerfile build functionality
64 BUILD_DOCKERFILE
= False
66 # flag to indicate that we run without the emulator (only the bare API for
67 # integration testing)
68 GK_STANDALONE_MODE
= False
70 # should a new version of an image be pulled even if its available
73 # flag to indicate if we use bidirectional forwarding rules in the
74 # automatic chaining process
75 BIDIRECTIONAL_CHAIN
= True
77 # override the management interfaces in the descriptors with default
78 # docker0 interfaces in the containers
79 USE_DOCKER_MGMT
= False
81 # automatically deploy uploaded packages (no need to execute son-access
82 # deploy --latest separately)
85 # and also automatically terminate any other running services
88 # global subnet definitions (see reset_subnets())
92 # Time in seconds to wait for vnf stop scripts to execute fully
93 VNF_STOP_WAIT_TIME
= 5
95 # If services are instantiated multiple times, the public port
96 # mappings need to be adapted to avoid colisions. We use this
97 # offset for this: NEW_PORT (SSIID * OFFSET) + ORIGINAL_PORT
98 MULTI_INSTANCE_PORT_OFFSET
= 1000
100 # Selected Placement Algorithm: Points to the class of the selected
101 # placement algorithm.
102 PLACEMENT_ALGORITHM_OBJ
= None
104 # Path to folder with <container_name>.env.yml files that contain
105 # environment variables injected into the specific container
106 # when it is started.
107 PER_INSTANCE_ENV_CONFIGURATION_FOLDER
= None
110 class OnBoardingException(BaseException
):
114 class Gatekeeper(object):
117 self
.services
= dict()
120 # used to generate short names for VNFs (Mininet limitation)
123 LOG
.info("Initialized 5GTANGO LLCM module.")
125 def register_service_package(self
, service_uuid
, service
):
127 register new service package
129 :param service object
131 self
.services
[service_uuid
] = service
132 # lets perform all steps needed to onboard the service
136 class Service(object):
138 This class represents a NS uploaded as a *.son package to the
140 Can have multiple running instances of this service.
147 self
.uuid
= service_uuid
148 self
.package_file_hash
= package_file_hash
149 self
.package_file_path
= package_file_path
150 self
.package_content_path
= os
.path
.join(
151 CATALOG_FOLDER
, "services/%s" % self
.uuid
)
155 self
.local_docker_files
= dict()
156 self
.remote_docker_image_urls
= dict()
157 self
.instances
= dict()
158 self
._instance
_counter
= 0
159 self
.created_at
= str(datetime
.datetime
.now())
163 Do all steps to prepare this service to be instantiated
166 # 1. extract the contents of the package and store them in our catalog
167 self
._unpack
_service
_package
()
168 # 2. read in all descriptor files
169 self
._load
_package
_descriptor
()
173 raise OnBoardingException("No NSD found.")
174 if len(self
.vnfds
) < 1:
175 raise OnBoardingException("No VNFDs found.")
176 # 3. prepare container images (e.g. download or build Dockerfile)
178 self
._load
_docker
_files
()
179 self
._build
_images
_from
_dockerfiles
()
181 self
._load
_docker
_urls
()
182 self
._pull
_predefined
_dockerimages
()
184 eline_fwd_links
, elan_fwd_links
= self
._get
_elines
_and
_elans
()
185 self
.eline_subnets
= [ELINE_SUBNETS
.pop(0) for _
in eline_fwd_links
]
186 self
.elan_subnets
= [ELAN_SUBNETS
.pop(0) for _
in elan_fwd_links
]
187 LOG
.debug("Reserved subnets for service '{}': E-Line: {} / E-LAN: {}"
188 .format(self
.manifest
.get("name"),
189 self
.eline_subnets
, self
.elan_subnets
))
190 LOG
.info("On-boarded service: {}".format(self
.manifest
.get("name")))
192 def start_service(self
):
194 This methods creates and starts a new service instance.
195 It computes placements, iterates over all VNFDs, and starts
196 each VNFD as a Docker container in the data center selected
197 by the placement algorithm.
200 LOG
.info("Starting service {} ({})"
201 .format(get_triple_id(self
.nsd
), self
.uuid
))
203 # 1. each service instance gets a new uuid to identify it
204 instance_uuid
= str(uuid
.uuid4())
205 # build a instances dict (a bit like a NSR :))
206 self
.instances
[instance_uuid
] = dict()
207 self
.instances
[instance_uuid
]["uuid"] = self
.uuid
208 # SSIID = short service instance ID (to postfix Container names)
209 self
.instances
[instance_uuid
]["ssiid"] = self
._instance
_counter
210 self
.instances
[instance_uuid
]["name"] = get_triple_id(self
.nsd
)
211 self
.instances
[instance_uuid
]["vnf_instances"] = list()
212 self
.instances
[instance_uuid
]["created_at"] = str(datetime
.datetime
.now())
213 # increase for next instance
214 self
._instance
_counter
+= 1
216 # 3. start all vnfds that we have in the service
217 for vnf_id
in self
.vnfds
:
218 vnfd
= self
.vnfds
[vnf_id
]
219 # attention: returns a list of started deployment units
220 vnfis
= self
._start
_vnfd
(
221 vnfd
, vnf_id
, self
.instances
[instance_uuid
]["ssiid"])
222 # add list of VNFIs to total VNFI list
223 self
.instances
[instance_uuid
]["vnf_instances"].extend(vnfis
)
225 # 4. Deploy E-Line, E-Tree and E-LAN links
226 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
227 # even if "forwarding_graphs" are not used directly.
228 # Attention2: Do a copy of *_subnets with list() is important here!
229 eline_fwd_links
, elan_fwd_links
= self
._get
_elines
_and
_elans
()
230 # 5a. deploy E-Line links
231 GK
.net
.deployed_elines
.extend(eline_fwd_links
) # bookkeeping
232 self
._connect
_elines
(eline_fwd_links
, instance_uuid
, list(self
.eline_subnets
))
233 # 5b. deploy E-Tree/E-LAN links
234 GK
.net
.deployed_elans
.extend(elan_fwd_links
) # bookkeeping
235 self
._connect
_elans
(elan_fwd_links
, instance_uuid
, list(self
.elan_subnets
))
237 # 6. run the emulator specific entrypoint scripts in the VNFIs of this
239 self
._trigger
_emulator
_start
_scripts
_in
_vnfis
(
240 self
.instances
[instance_uuid
]["vnf_instances"])
242 LOG
.info("Service '{}' started. Instance id: {} SSIID: {}"
243 .format(self
.instances
[instance_uuid
]["name"],
245 self
.instances
[instance_uuid
]["ssiid"]))
248 def stop_service(self
, instance_uuid
):
250 This method stops a running service instance.
251 It iterates over all VNF instances, stopping them each
252 and removing them from their data center.
253 :param instance_uuid: the uuid of the service instance to be stopped
255 LOG
.info("Stopping service %r" % self
.uuid
)
256 # get relevant information
257 # instance_uuid = str(self.uuid.uuid4())
258 vnf_instances
= self
.instances
[instance_uuid
]["vnf_instances"]
259 # trigger stop skripts in vnf instances and wait a few seconds for
261 self
._trigger
_emulator
_stop
_scripts
_in
_vnfis
(vnf_instances
)
262 time
.sleep(VNF_STOP_WAIT_TIME
)
264 for v
in vnf_instances
:
266 # last step: remove the instance from the list of all instances
267 del self
.instances
[instance_uuid
]
269 def _get_elines_and_elans(self
):
271 Get the E-Line, E-LAN, E-Tree links from the NSD.
273 # Attention: Only done if ""forwarding_graphs" section in NSD exists,
274 # even if "forwarding_graphs" are not used directly.
275 eline_fwd_links
= list()
276 elan_fwd_links
= list()
277 if "virtual_links" in self
.nsd
and "forwarding_graphs" in self
.nsd
:
278 vlinks
= self
.nsd
["virtual_links"]
279 # constituent virtual links are not checked
280 eline_fwd_links
= [l
for l
in vlinks
if (
281 l
["connectivity_type"] == "E-Line")]
282 elan_fwd_links
= [l
for l
in vlinks
if (
283 l
["connectivity_type"] == "E-LAN" or
284 l
["connectivity_type"] == "E-Tree")] # Treat E-Tree as E-LAN
285 return eline_fwd_links
, elan_fwd_links
287 def _get_resource_limits(self
, deployment_unit
):
289 Extract resource limits from deployment units.
293 cpu_period
, cpu_quota
= self
._calculate
_cpu
_cfs
_values
(float(1.0))
295 # update from descriptor
296 if "resource_requirements" in deployment_unit
:
297 res_req
= deployment_unit
.get("resource_requirements")
298 cpu_list
= res_req
.get("cpu").get("cpuset")
300 cpu_list
= res_req
.get("cpu").get("vcpus")
301 if cpu_list
is not None:
302 # attention: docker expects list as string w/o spaces:
303 cpu_list
= str(cpu_list
).replace(" ", "").strip()
304 cpu_bw
= res_req
.get("cpu").get("cpu_bw")
307 cpu_period
, cpu_quota
= self
._calculate
_cpu
_cfs
_values
(float(cpu_bw
))
308 mem_limit
= res_req
.get("memory").get("size")
309 mem_unit
= str(res_req
.get("memory").get("size_unit", "GB"))
310 if mem_limit
is not None:
311 mem_limit
= int(mem_limit
)
314 mem_limit
= mem_limit
* 1024 * 1024 * 1024
315 elif "M" in mem_unit
:
316 mem_limit
= mem_limit
* 1024 * 1024
317 elif "K" in mem_unit
:
318 mem_limit
= mem_limit
* 1024
319 return cpu_list
, cpu_period
, cpu_quota
, mem_limit
321 def _start_vnfd(self
, vnfd
, vnf_id
, ssiid
, **kwargs
):
323 Start a single VNFD of this service
324 :param vnfd: vnfd descriptor dict
325 :param vnf_id: unique id of this vnf in the nsd
329 # the vnf_name refers to the container image to be deployed
330 vnf_name
= vnfd
.get("name")
331 # combine VDUs and CDUs
332 deployment_units
= (vnfd
.get("virtual_deployment_units", []) +
333 vnfd
.get("cloudnative_deployment_units", []))
334 # iterate over all deployment units within each VNFDs
335 for u
in deployment_units
:
336 # 0. vnf_container_name = vnf_id.vdu_id
337 vnf_container_name
= get_container_name(vnf_id
, u
.get("id"))
338 vnf_container_instance_name
= get_container_name(vnf_id
, u
.get("id"), ssiid
)
339 # 1. get the name of the docker image to star
340 if vnf_container_name
not in self
.remote_docker_image_urls
:
341 raise Exception("No image name for %r found. Abort." % vnf_container_name
)
342 docker_image_name
= self
.remote_docker_image_urls
.get(vnf_container_name
)
343 # 2. select datacenter to start the VNF in
344 target_dc
= self
._place
(vnfd
, vnf_id
, u
, ssiid
)
345 # 3. perform some checks to ensure we can start the container
346 assert(docker_image_name
is not None)
347 assert(target_dc
is not None)
348 if not self
._check
_docker
_image
_exists
(docker_image_name
):
349 raise Exception("Docker image {} not found. Abort."
350 .format(docker_image_name
))
352 # 4. get the resource limits
353 cpu_list
, cpu_period
, cpu_quota
, mem_limit
= self
._get
_resource
_limits
(u
)
355 # get connection points defined for the DU
356 intfs
= u
.get("connection_points", [])
357 # do some re-naming of fields to be compatible to containernet
360 LOG
.info("Found static address for {}: {}"
361 .format(i
.get("id"), i
.get("address")))
362 i
["ip"] = i
.get("address")
364 # get ports and port_bindings from the port and publish fields of CNFD
365 # see: https://github.com/containernet/containernet/wiki/Exposing-and-mapping-network-ports
366 ports
= list() # Containernet naming
367 port_bindings
= dict()
369 if i
.get("port"): # field with a single port
370 if not isinstance(i
.get("port"), int):
371 LOG
.info("Field 'port' is no int CP: {}".format(i
))
373 ports
.append(i
.get("port")) # collect all ports
374 if i
.get("ports"): # list with multiple ports
375 if not isinstance(i
.get("ports"), list):
376 LOG
.info("Field 'port' is no list CP: {}".format(i
))
378 for p
in i
.get("ports"):
379 if not isinstance(p
, int):
383 p
= tuple(p
.split("/"))
386 ports
.append(p
) # collect all ports
387 except BaseException
as ex
:
389 "Could not parse ports list: {}".format(p
))
392 ports
.append(p
) # collect all ports
394 if not isinstance(i
.get("publish"), dict):
395 LOG
.info("Field 'publish' is no dict CP: {}".format(i
))
397 port_bindings
.update(i
.get("publish"))
398 # update port mapping for cases where service is deployed > 1 times
399 port_bindings
= update_port_mapping_multi_instance(ssiid
, port_bindings
)
401 LOG
.info("{} exposes ports: {}".format(vnf_container_instance_name
, ports
))
402 if len(port_bindings
) > 0:
403 LOG
.info("{} publishes ports: {}".format(vnf_container_instance_name
, port_bindings
))
405 # 5. collect additional information to start container
408 # 5.1 inject descriptor based start/stop commands into env (overwrite)
409 VNFD_CMD_START
= u
.get("vm_cmd_start")
410 VNFD_CMD_STOP
= u
.get("vm_cmd_stop")
411 if VNFD_CMD_START
and not VNFD_CMD_START
== "None":
412 LOG
.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_START
) +
413 " Overwriting SON_EMU_CMD.")
414 cenv
["SON_EMU_CMD"] = VNFD_CMD_START
415 if VNFD_CMD_STOP
and not VNFD_CMD_STOP
== "None":
416 LOG
.info("Found 'vm_cmd_start'='{}' in VNFD.".format(VNFD_CMD_STOP
) +
417 " Overwriting SON_EMU_CMD_STOP.")
418 cenv
["SON_EMU_CMD_STOP"] = VNFD_CMD_STOP
420 # 5.2 inject per instance configurations based on envs
421 conf_envs
= self
._load
_instance
_conf
_envs
(vnf_container_instance_name
)
422 cenv
.update(conf_envs
)
424 # 5.3 handle optional ipc_mode setting
425 ipc_mode
= u
.get("ipc_mode", None)
426 # 5.4 handle optional devices setting
427 devices
= u
.get("devices", [])
428 # 5.5 handle optional cap_add setting
429 cap_add
= u
.get("cap_add", [])
431 # 6. Start the container
432 LOG
.info("Starting %r as %r in DC %r" %
433 (vnf_name
, vnf_container_instance_name
, target_dc
))
434 LOG
.debug("Interfaces for %r: %r" % (vnf_id
, intfs
))
435 # start the container
436 vnfi
= target_dc
.startCompute(
437 vnf_container_instance_name
,
439 image
=docker_image_name
,
441 cpu_period
=cpu_period
,
442 cpuset_cpus
=cpu_list
,
445 properties
=cenv
, # environment
447 port_bindings
=port_bindings
,
448 # only publish if explicitly stated in descriptor
449 publish_all_ports
=False,
453 type=kwargs
.get('type', 'docker'))
454 # add vnfd reference to vnfi
457 vnfi
.vnf_container_name
= vnf_container_name
458 vnfi
.vnf_container_instance_name
= vnf_container_instance_name
464 def _stop_vnfi(self
, vnfi
):
467 :param vnfi: vnf instance to be stopped
469 # Find the correct datacenter
470 status
= vnfi
.getStatus()
473 LOG
.info("Stopping the vnf instance contained in %r in DC %r" %
474 (status
["name"], dc
))
475 dc
.stopCompute(status
["name"])
477 def _get_vnf_instance(self
, instance_uuid
, vnf_id
):
479 Returns VNFI object for a given "vnf_id" or "vnf_container_name" taken from an NSD.
480 :return: single object
482 for vnfi
in self
.instances
[instance_uuid
]["vnf_instances"]:
483 if str(vnfi
.name
) == str(vnf_id
):
485 LOG
.warning("No container with name: {0} found.".format(vnf_id
))
488 def _get_vnf_instance_units(self
, instance_uuid
, vnf_id
):
490 Returns a list of VNFI objects (all deployment units) for a given
491 "vnf_id" taken from an NSD.
497 for vnfi
in self
.instances
[instance_uuid
]["vnf_instances"]:
498 if vnf_id
in vnfi
.name
:
501 LOG
.debug("Found units: {} for vnf_id: {}"
502 .format([i
.name
for i
in r
], vnf_id
))
504 LOG
.warning("No container(s) with name: {0} found.".format(vnf_id
))
508 def _vnf_reconfigure_network(vnfi
, if_name
, net_str
=None, new_name
=None):
510 Reconfigure the network configuration of a specific interface
511 of a running container.
512 :param vnfi: container instance
513 :param if_name: interface name
514 :param net_str: network configuration string, e.g., 1.2.3.4/24
517 # assign new ip address
518 if net_str
is not None:
519 intf
= vnfi
.intf(intf
=if_name
)
522 LOG
.debug("Reconfigured network of %s:%s to %r" %
523 (vnfi
.name
, if_name
, net_str
))
525 LOG
.warning("Interface not found: %s:%s. Network reconfiguration skipped." % (
528 if new_name
is not None:
529 vnfi
.cmd('ip link set', if_name
, 'down')
530 vnfi
.cmd('ip link set', if_name
, 'name', new_name
)
531 vnfi
.cmd('ip link set', new_name
, 'up')
532 LOG
.debug("Reconfigured interface name of %s:%s to %s" %
533 (vnfi
.name
, if_name
, new_name
))
535 def _trigger_emulator_start_scripts_in_vnfis(self
, vnfi_list
):
536 for vnfi
in vnfi_list
:
537 config
= vnfi
.dcinfo
.get("Config", dict())
538 env
= config
.get("Env", list())
540 var
, cmd
= map(str.strip
, map(str, env_var
.split('=', 1)))
541 if var
== "SON_EMU_CMD" or var
== "VIM_EMU_CMD":
542 LOG
.info("Executing script in '{}': {}={}"
543 .format(vnfi
.name
, var
, cmd
))
544 # execute command in new thread to ensure that GK is not
546 t
= threading
.Thread(target
=vnfi
.cmdPrint
, args
=(cmd
,))
549 break # only execute one command
551 def _trigger_emulator_stop_scripts_in_vnfis(self
, vnfi_list
):
552 for vnfi
in vnfi_list
:
553 config
= vnfi
.dcinfo
.get("Config", dict())
554 env
= config
.get("Env", list())
556 var
, cmd
= map(str.strip
, map(str, env_var
.split('=', 1)))
557 if var
== "SON_EMU_CMD_STOP" or var
== "VIM_EMU_CMD_STOP":
558 LOG
.info("Executing script in '{}': {}={}"
559 .format(vnfi
.name
, var
, cmd
))
560 # execute command in new thread to ensure that GK is not
562 t
= threading
.Thread(target
=vnfi
.cmdPrint
, args
=(cmd
,))
565 break # only execute one command
567 def _load_instance_conf_envs(self
, cname
):
569 Try to load an instance-specific env file. If not found,
570 just return an empty dict.
572 if PER_INSTANCE_ENV_CONFIGURATION_FOLDER
is None:
575 path
= os
.path
.expanduser(PER_INSTANCE_ENV_CONFIGURATION_FOLDER
)
576 path
= os
.path
.join(path
, "{}.env.yml".format(cname
))
577 res
= load_yaml(path
)
578 LOG
.info("Loaded instance-specific env file for '{}': {}"
581 except BaseException
as ex
:
582 LOG
.info("No instance-specific env file found for: {}"
587 def _unpack_service_package(self
):
589 unzip *.son file and store contents in CATALOG_FOLDER/services/<service_uuid>/
591 LOG
.info("Unzipping: %r" % self
.package_file_path
)
592 with zipfile
.ZipFile(self
.package_file_path
, "r") as z
:
593 z
.extractall(self
.package_content_path
)
595 def _load_package_descriptor(self
):
597 Load the main package descriptor YAML and keep it as dict.
600 self
.manifest
= load_yaml(
602 self
.package_content_path
, "TOSCA-Metadata/NAPD.yaml"))
606 Load the entry NSD YAML and keep it as dict.
609 if "package_content" in self
.manifest
:
611 for f
in self
.manifest
.get("package_content"):
612 if f
.get("content-type") == "application/vnd.5gtango.nsd":
613 nsd_path
= os
.path
.join(
614 self
.package_content_path
,
615 make_relative_path(f
.get("source")))
616 break # always use the first NSD for now
618 raise OnBoardingException("No NSD with type 'application/vnd.5gtango.nsd' found.")
619 self
.nsd
= load_yaml(nsd_path
)
620 GK
.net
.deployed_nsds
.append(self
.nsd
) # TODO this seems strange (remove?)
621 LOG
.debug("Loaded NSD: %r" % self
.nsd
.get("name"))
623 raise OnBoardingException(
624 "No 'package_content' section in package manifest:\n{}"
625 .format(self
.manifest
))
627 def _load_vnfd(self
):
629 Load all VNFD YAML files referenced in MANIFEST.MF and keep them in dict.
632 # first make a list of all the vnfds in the package
634 if "package_content" in self
.manifest
:
635 for pc
in self
.manifest
.get("package_content"):
637 "content-type") == "application/vnd.5gtango.vnfd":
638 vnfd_path
= os
.path
.join(
639 self
.package_content_path
,
640 make_relative_path(pc
.get("source")))
641 vnfd
= load_yaml(vnfd_path
)
642 vnfd_set
[vnfd
.get("name")] = vnfd
643 if len(vnfd_set
) < 1:
644 raise OnBoardingException("No VNFDs found.")
645 # then link each vnf_id in the nsd to its vnfd
646 for v
in self
.nsd
.get("network_functions"):
647 if v
.get("vnf_name") in vnfd_set
:
648 self
.vnfds
[v
.get("vnf_id")] = vnfd_set
[v
.get("vnf_name")]
649 LOG
.debug("Loaded VNFD: {0} id: {1}"
650 .format(v
.get("vnf_name"), v
.get("vnf_id")))
652 def _connect_elines(self
, eline_fwd_links
, instance_uuid
, subnets
):
654 Connect all E-LINE links in the NSD
655 Attention: This method DOES NOT support multi V/CDU VNFs!
656 :param eline_fwd_links: list of E-LINE links in the NSD
657 :param: instance_uuid of the service
658 :param: subnets list of subnets to be used
661 # cookie is used as identifier for the flowrules installed by the dummygatekeeper
662 # eg. different services get a unique cookie for their flowrules
664 for link
in eline_fwd_links
:
665 LOG
.info("Found E-Line: {}".format(link
))
666 src_id
, src_if_name
= parse_interface(
667 link
["connection_points_reference"][0])
668 dst_id
, dst_if_name
= parse_interface(
669 link
["connection_points_reference"][1])
670 LOG
.info("Searching C/VDU for E-Line: src={}, src_if={}, dst={}, dst_if={}"
671 .format(src_id
, src_if_name
, dst_id
, dst_if_name
))
672 # handle C/VDUs (ugly hack, only one V/CDU per VNF for now)
673 src_units
= self
._get
_vnf
_instance
_units
(instance_uuid
, src_id
)
674 dst_units
= self
._get
_vnf
_instance
_units
(instance_uuid
, dst_id
)
675 if src_units
is None or dst_units
is None:
676 LOG
.info("No VNF-VNF link. Skipping: src={}, src_if={}, dst={}, dst_if={}"
677 .format(src_id
, src_if_name
, dst_id
, dst_if_name
))
679 # we only support VNFs with one V/CDU right now
680 if len(src_units
) != 1 or len(dst_units
) != 1:
681 raise BaseException("LLCM does not support E-LINES for multi V/CDU VNFs.")
682 # get the full name from that C/VDU and use it as src_id and dst_id
683 src_id
= src_units
[0].name
684 dst_id
= dst_units
[0].name
685 # from here we have all info we need
686 LOG
.info("Creating E-Line for C/VDU: src={}, src_if={}, dst={}, dst_if={}"
687 .format(src_id
, src_if_name
, dst_id
, dst_if_name
))
689 src_vnfi
= src_units
[0]
690 dst_vnfi
= dst_units
[0]
691 # proceed with chaining setup
693 if src_vnfi
is not None and dst_vnfi
is not None:
695 # re-configure the VNFs IP assignment and ensure that a new
696 # subnet is used for each E-Link
697 eline_net
= subnets
.pop(0)
698 ip1
= "{0}/{1}".format(str(eline_net
[1]),
700 ip2
= "{0}/{1}".format(str(eline_net
[2]),
702 # check if VNFs have fixed IPs (ip/address field in VNFDs)
703 if (self
._get
_vnfd
_cp
_from
_vnfi
(
704 src_vnfi
, src_if_name
).get("ip") is None and
705 self
._get
_vnfd
_cp
_from
_vnfi
(
706 src_vnfi
, src_if_name
).get("address") is None):
707 self
._vnf
_reconfigure
_network
(src_vnfi
, src_if_name
, ip1
)
708 # check if VNFs have fixed IPs (ip field in VNFDs)
709 if (self
._get
_vnfd
_cp
_from
_vnfi
(
710 dst_vnfi
, dst_if_name
).get("ip") is None and
711 self
._get
_vnfd
_cp
_from
_vnfi
(
712 dst_vnfi
, dst_if_name
).get("address") is None):
713 self
._vnf
_reconfigure
_network
(dst_vnfi
, dst_if_name
, ip2
)
718 vnf_src_interface
=src_if_name
, vnf_dst_interface
=dst_if_name
,
719 bidirectional
=BIDIRECTIONAL_CHAIN
, cmd
="add-flow", cookie
=cookie
, priority
=10)
721 def _get_vnfd_cp_from_vnfi(self
, vnfi
, ifname
):
723 Gets the connection point data structure from the VNFD
724 of the given VNFI using ifname.
726 if vnfi
.vnfd
is None:
728 cps
= vnfi
.vnfd
.get("connection_points")
730 if cp
.get("id") == ifname
:
733 def _connect_elans(self
, elan_fwd_links
, instance_uuid
, subnets
):
735 Connect all E-LAN/E-Tree links in the NSD
736 This method supports multi-V/CDU VNFs if the connection
737 point names of the DUs are the same as the ones in the NSD.
738 :param elan_fwd_links: list of E-LAN links in the NSD
739 :param: instance_uuid of the service
740 :param: subnets list of subnets to be used
743 for link
in elan_fwd_links
:
746 lan_net
= subnets
.pop(0)
747 lan_hosts
= list(lan_net
.hosts())
749 # generate lan ip address for all interfaces (of all involved (V/CDUs))
750 for intf_ref
in link
["connection_points_reference"]:
751 vnf_id
, intf_name
= parse_interface(intf_ref
)
753 continue # skip references to NS connection points
754 units
= self
._get
_vnf
_instance
_units
(instance_uuid
, vnf_id
)
756 continue # skip if no deployment unit is present
757 # iterate over all involved deployment units
759 # Attention: we apply a simplification for multi DU VNFs here:
760 # the connection points of all involved DUs have to have the same
761 # name as the connection points of the surrounding VNF to be mapped.
762 # This is because we do not consider links specified in the VNFDs
763 container_name
= uvnfi
.name
766 # get the interface of the unit
767 intf
= self
._get
_vnfd
_cp
_from
_vnfi
(uvnfi
, intf_name
)
768 # check if there is a manually assigned address
770 if intf
.get("address"):
771 ip_address
= intf
.get("address")
772 if ip_address
is None:
773 # automatically asign an IP from our pool
774 ip_address
= "{0}/{1}".format(str(lan_hosts
.pop(0)),
777 "Setting up E-LAN/E-Tree interface. (%s:%s) -> %s" % (
778 container_name
, intf_name
, ip_address
))
779 # re-configure the VNFs IP assignment and ensure that a new subnet is used for each E-LAN
780 # E-LAN relies on the learning switch capability of Ryu which has to be turned on in the topology
781 # (DCNetwork(controller=RemoteController, enable_learning=True)), so no explicit chaining is
783 vnfi
= self
._get
_vnf
_instance
(instance_uuid
, container_name
)
785 self
._vnf
_reconfigure
_network
(vnfi
, intf_name
, ip_address
)
786 # add this vnf and interface to the E-LAN for tagging
787 elan_vnf_list
.append(
788 {'name': container_name
, 'interface': intf_name
})
789 # install the VLAN tags for this E-LAN
790 GK
.net
.setLAN(elan_vnf_list
)
792 def _load_docker_files(self
):
794 Get all paths to Dockerfiles from VNFDs and store them in dict.
797 for vnf_id
, v
in self
.vnfds
.iteritems():
798 for vu
in v
.get("virtual_deployment_units", []):
799 vnf_container_name
= get_container_name(vnf_id
, vu
.get("id"))
800 if vu
.get("vm_image_format") == "docker":
801 vm_image
= vu
.get("vm_image")
802 docker_path
= os
.path
.join(
803 self
.package_content_path
,
804 make_relative_path(vm_image
))
805 self
.local_docker_files
[vnf_container_name
] = docker_path
806 LOG
.debug("Found Dockerfile (%r): %r" % (vnf_container_name
, docker_path
))
807 for cu
in v
.get("cloudnative_deployment_units", []):
808 vnf_container_name
= get_container_name(vnf_id
, cu
.get("id"))
809 image
= cu
.get("image")
810 docker_path
= os
.path
.join(
811 self
.package_content_path
,
812 make_relative_path(image
))
813 self
.local_docker_files
[vnf_container_name
] = docker_path
814 LOG
.debug("Found Dockerfile (%r): %r" % (vnf_container_name
, docker_path
))
816 def _load_docker_urls(self
):
818 Get all URLs to pre-build docker images in some repo.
821 for vnf_id
, v
in list(self
.vnfds
.items()):
822 for vu
in v
.get("virtual_deployment_units", []):
823 vnf_container_name
= get_container_name(vnf_id
, vu
.get("id"))
824 if vu
.get("vm_image_format") == "docker":
825 url
= vu
.get("vm_image")
827 url
= url
.replace("http://", "")
828 self
.remote_docker_image_urls
[vnf_container_name
] = url
829 LOG
.debug("Found Docker image URL (%r): %r" %
831 self
.remote_docker_image_urls
[vnf_container_name
]))
832 for cu
in v
.get("cloudnative_deployment_units", []):
833 vnf_container_name
= get_container_name(vnf_id
, cu
.get("id"))
834 url
= cu
.get("image")
836 url
= url
.replace("http://", "")
837 self
.remote_docker_image_urls
[vnf_container_name
] = url
838 LOG
.debug("Found Docker image URL (%r): %r" %
840 self
.remote_docker_image_urls
[vnf_container_name
]))
842 def _build_images_from_dockerfiles(self
):
844 Build Docker images for each local Dockerfile found in the package: self.local_docker_files
846 if GK_STANDALONE_MODE
:
847 return # do not build anything in standalone mode
849 LOG
.info("Building %d Docker images (this may take several minutes) ..." % len(
850 self
.local_docker_files
))
851 for k
, v
in list(self
.local_docker_files
.items()):
852 for line
in dc
.build(path
=v
.replace(
853 "Dockerfile", ""), tag
=k
, rm
=False, nocache
=False):
854 LOG
.debug("DOCKER BUILD: %s" % line
)
855 LOG
.info("Docker image created: %s" % k
)
857 def _pull_predefined_dockerimages(self
):
859 If the package contains URLs to pre-build Docker images, we download them with this method.
862 for url
in list(self
.remote_docker_image_urls
.values()):
863 # only pull if not present (speedup for development)
865 if len(dc
.images
.list(name
=url
)) > 0:
866 LOG
.debug("Image %r present. Skipping pull." % url
)
868 LOG
.info("Pulling image: %r" % url
)
869 # this seems to fail with latest docker api version 2.0.2
870 # dc.images.pull(url,
871 # insecure_registry=True)
872 # using docker cli instead
879 def _check_docker_image_exists(self
, image_name
):
881 Query the docker service and check if the given image exists
882 :param image_name: name of the docker image
885 return len(DockerClient().images
.list(name
=image_name
)) > 0
887 def _place(self
, vnfd
, vnfid
, vdu
, ssiid
):
889 Do placement. Return the name of the DC to place
892 assert(len(self
.vnfds
) > 0)
893 assert(len(GK
.dcs
) > 0)
894 if PLACEMENT_ALGORITHM_OBJ
is None:
895 LOG
.error("No placement algorithm given. Using FirstDcPlacement!")
896 p
= FirstDcPlacement()
898 p
= PLACEMENT_ALGORITHM_OBJ
899 cname
= get_container_name(vnfid
, vdu
.get("id"), ssiid
)
900 rdc
= p
.place(GK
.dcs
, vnfd
, vnfid
, vdu
, ssiid
, cname
)
901 LOG
.info("Placement: '{}' --> '{}'".format(cname
, rdc
))
904 def _calculate_cpu_cfs_values(self
, cpu_time_percentage
):
906 Calculate cpu period and quota for CFS
907 :param cpu_time_percentage: percentage of overall CPU to be used
908 :return: cpu_period, cpu_quota
910 if cpu_time_percentage
is None:
912 if cpu_time_percentage
< 0:
914 # (see: https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt)
915 # Attention minimum cpu_quota is 1ms (micro)
916 cpu_period
= 1000000 # lets consider a fixed period of 1000000 microseconds for now
917 LOG
.debug("cpu_period is %r, cpu_percentage is %r" %
918 (cpu_period
, cpu_time_percentage
))
919 # calculate the fraction of cpu time for this container
920 cpu_quota
= cpu_period
* cpu_time_percentage
921 # ATTENTION >= 1000 to avoid a invalid argument system error ... no
924 LOG
.debug("cpu_quota before correcting: %r" % cpu_quota
)
926 LOG
.warning("Increased CPU quota to avoid system error.")
927 LOG
.debug("Calculated: cpu_period=%f / cpu_quota=%f" %
928 (cpu_period
, cpu_quota
))
929 return int(cpu_period
), int(cpu_quota
)
933 Some (simple) placement algorithms
937 class FirstDcPlacement(object):
939 Placement: Always use one and the same data center from the GK.dcs dict.
942 def place(self
, dcs
, vnfd
, vnfid
, vdu
, ssiid
, cname
):
943 return list(dcs
.values())[0]
946 class RoundRobinDcPlacement(object):
948 Placement: Distribute VNFs across all available DCs in a round robin fashion.
954 def place(self
, dcs
, vnfd
, vnfid
, vdu
, ssiid
, cname
):
955 dcs_list
= list(dcs
.values())
956 rdc
= dcs_list
[self
.count
% len(dcs_list
)]
957 self
.count
+= 1 # inc. count to use next DC
961 class StaticConfigPlacement(object):
963 Placement: Fixed assignment based on config file.
966 def __init__(self
, path
=None):
968 path
= "static_placement.yml"
969 path
= os
.path
.expanduser(path
)
970 self
.static_placement
= dict()
972 self
.static_placement
= load_yaml(path
)
973 except BaseException
as ex
:
975 LOG
.error("Couldn't load placement from {}"
977 LOG
.info("Loaded static placement: {}"
978 .format(self
.static_placement
))
980 def place(self
, dcs
, vnfd
, vnfid
, vdu
, ssiid
, cname
):
981 # check for container name entry
982 if cname
not in self
.static_placement
:
983 LOG
.error("Coudn't find {} in placement".format(cname
))
984 LOG
.error("Using first DC as fallback!")
985 return list(dcs
.values())[0]
987 candidate_dc
= self
.static_placement
.get(cname
)
989 if candidate_dc
not in dcs
:
990 LOG
.error("Coudn't find DC {}".format(candidate_dc
))
991 LOG
.error("Using first DC as fallback!")
992 return list(dcs
.values())[0]
994 return dcs
.get(candidate_dc
)
998 Resource definitions and API endpoints
1002 class Packages(fr
.Resource
):
1006 Upload a *.son service package to the dummy gatekeeper.
1008 We expect request with a *.son file and store it in UPLOAD_FOLDER
1013 LOG
.info("POST /packages called")
1014 # lets search for the package in the request
1015 is_file_object
= False # make API more robust: file can be in data or in files field
1016 if "package" in request
.files
:
1017 son_file
= request
.files
["package"]
1018 is_file_object
= True
1019 elif len(request
.data
) > 0:
1020 son_file
= request
.data
1022 return {"service_uuid": None, "size": 0, "sha1": None,
1023 "error": "upload failed. file not found."}, 500
1024 # generate a uuid to reference this package
1025 service_uuid
= str(uuid
.uuid4())
1026 file_hash
= str(son_file
)
1027 file_hash
= hashlib
.sha1(file_hash
.encode())
1028 file_hash
= file_hash
.hexdigest()
1029 # ensure that upload folder exists
1030 ensure_dir(UPLOAD_FOLDER
)
1031 upload_path
= os
.path
.\
1032 join(UPLOAD_FOLDER
, "%s.tgo" % service_uuid
)
1033 # store *.son file to disk
1035 son_file
.save(upload_path
)
1037 with
open(upload_path
, 'wb') as f
:
1039 size
= os
.path
.getsize(upload_path
)
1041 # first stop and delete any other running services
1043 service_list
= copy
.copy(GK
.services
)
1044 for service_uuid
in service_list
:
1045 instances_list
= copy
.copy(
1046 GK
.services
[service_uuid
].instances
)
1047 for instance_uuid
in instances_list
:
1048 # valid service and instance UUID, stop service
1049 GK
.services
.get(service_uuid
).stop_service(
1051 LOG
.info("service instance with uuid %r stopped." %
1054 # create a service object and register it
1055 s
= Service(service_uuid
, file_hash
, upload_path
)
1056 GK
.register_service_package(service_uuid
, s
)
1058 # automatically deploy the service
1060 # ok, we have a service uuid, lets start the service
1062 GK
.services
.get(service_uuid
).start_service()
1064 # generate the JSON result
1065 return {"service_uuid": service_uuid
, "size": size
,
1066 "sha1": file_hash
, "error": None}, 201
1067 except BaseException
:
1068 LOG
.exception("Service package upload failed:")
1069 return {"service_uuid": None, "size": 0,
1070 "sha1": None, "error": "upload failed"}, 500
1074 Return a list of package descriptor headers.
1075 Fakes the behavior of 5GTANGO's GK API to be
1076 compatible with tng-cli.
1079 LOG
.info("GET /packages")
1081 for suuid
, sobj
in GK
.services
.items():
1085 pkg
["pd"]["name"] = sobj
.manifest
.get("name")
1086 pkg
["pd"]["version"] = sobj
.manifest
.get("version")
1087 pkg
["created_at"] = sobj
.created_at
1089 return result
, 200, CORS_HEADER
1092 class Services(fr
.Resource
):
1096 Return a list of services.
1097 Fakes the behavior of 5GTANGO's GK API to be
1098 compatible with tng-cli.
1101 LOG
.info("GET /services")
1103 for suuid
, sobj
in GK
.services
.items():
1105 service
["nsd"] = dict()
1106 service
["uuid"] = suuid
1107 service
["nsd"]["name"] = sobj
.nsd
.get("name")
1108 service
["nsd"]["version"] = sobj
.nsd
.get("version")
1109 service
["created_at"] = sobj
.created_at
1110 result
.append(service
)
1111 return result
, 200, CORS_HEADER
1114 class Instantiations(fr
.Resource
):
1118 Instantiate a service specified by its UUID.
1119 Will return a new UUID to identify the running service instance.
1122 LOG
.info("POST /instantiations (or /requests) called")
1123 # try to extract the service uuid from the request
1124 json_data
= request
.get_json(force
=True)
1125 service_uuid
= json_data
.get("service_uuid")
1126 service_name
= json_data
.get("service_name")
1127 if service_name
is None:
1129 service_name
= service_uuid
1130 # first try to find by service_name
1131 if service_name
is not None:
1132 for s_uuid
, s
in GK
.services
.items():
1133 if s
.manifest
.get("name") == service_name
:
1134 LOG
.info("Searched for: {}. Found service w. UUID: {}"
1135 .format(service_name
, s_uuid
))
1136 service_uuid
= s_uuid
1137 # lets be a bit fuzzy here to make testing easier
1138 if (service_uuid
is None or service_uuid
==
1139 "latest") and len(GK
.services
) > 0:
1140 # if we don't get a service uuid, we simple start the first service
1142 service_uuid
= list(GK
.services
.keys())[0]
1143 if service_uuid
in GK
.services
:
1144 # ok, we have a service uuid, lets start the service
1145 service_instance_uuid
= GK
.services
.get(
1146 service_uuid
).start_service()
1147 # multiple ID fields to be compatible with tng-bench and tng-cli
1148 return ({"service_instance_uuid": service_instance_uuid
,
1149 "id": service_instance_uuid
}, 201)
1150 LOG
.error("Service not found: {}/{}".format(service_uuid
, service_name
))
1151 return "Service not found", 404
1155 Returns a list of UUIDs containing all running services.
1156 :return: dict / list
1158 LOG
.debug("GET /instantiations or /api/v3/records/services")
1159 # return {"service_instantiations_list": [
1160 # list(s.instances.keys()) for s in GK.services.values()]}
1162 for suuid
, sobj
in GK
.services
.items():
1163 for iuuid
, iobj
in sobj
.instances
.items():
1165 inst
["uuid"] = iobj
.get("uuid")
1166 inst
["instance_name"] = "{}-inst.{}".format(
1167 iobj
.get("name"), iobj
.get("ssiid"))
1168 inst
["status"] = "running"
1169 inst
["created_at"] = iobj
.get("created_at")
1171 return result
, 200, CORS_HEADER
1175 Stops a running service specified by its service and instance UUID.
1177 # try to extract the service and instance UUID from the request
1178 json_data
= request
.get_json(force
=True)
1179 service_uuid_input
= json_data
.get("service_uuid")
1180 instance_uuid_input
= json_data
.get("service_instance_uuid")
1181 if len(GK
.services
) < 1:
1182 return "No service on-boarded.", 404
1184 if service_uuid_input
is None:
1185 # if we don't get a service uuid we stop all services
1186 service_uuid_list
= list(GK
.services
.keys())
1187 LOG
.info("No service_uuid given, stopping all.")
1189 service_uuid_list
= [service_uuid_input
]
1191 for service_uuid
in service_uuid_list
:
1192 if instance_uuid_input
is None:
1193 instance_uuid_list
= list(
1194 GK
.services
[service_uuid
].instances
.keys())
1196 instance_uuid_list
= [instance_uuid_input
]
1197 # for all service instances
1198 for instance_uuid
in instance_uuid_list
:
1199 if (service_uuid
in GK
.services
and
1200 instance_uuid
in GK
.services
[service_uuid
].instances
):
1201 # valid service and instance UUID, stop service
1202 GK
.services
.get(service_uuid
).stop_service(instance_uuid
)
1203 LOG
.info("Service instance with uuid %r stopped." % instance_uuid
)
1204 return "Service(s) stopped.", 200
1207 class Exit(fr
.Resource
):
1211 Stop the running Containernet instance regardless of data transmitted
1213 list(GK
.dcs
.values())[0].net
.stop()
1216 def generate_subnets(prefix
, base
, subnet_size
=50, mask
=24):
1217 # Generate a list of ipaddress in subnets
1219 for net
in range(base
, base
+ subnet_size
):
1220 subnet
= "{0}.{1}.0/{2}".format(prefix
, net
, mask
)
1222 r
.append(ipaddress
.ip_network(subnet
))
1224 r
.append(ipaddress
.ip_network(unicode(subnet
)))
1228 def reset_subnets():
1229 global ELINE_SUBNETS
1231 # private subnet definitions for the generated interfaces
1233 ELAN_SUBNETS
= generate_subnets('30.0', 0, subnet_size
=50, mask
=24)
1235 ELINE_SUBNETS
= generate_subnets('20.0', 0, subnet_size
=50, mask
=24)
1238 def initialize_GK():
1243 # create a single, global GK object
1248 app
= Flask(__name__
)
1249 app
.config
['MAX_CONTENT_LENGTH'] = 512 * 1024 * 1024 # 512 MB max upload
1252 api
.add_resource(Packages
, '/packages', '/api/v2/packages', '/api/v3/packages')
1253 api
.add_resource(Services
, '/services', '/api/v2/services', '/api/v3/services')
1254 api
.add_resource(Instantiations
, '/instantiations',
1255 '/api/v2/instantiations', '/api/v2/requests', '/api/v3/requests',
1256 '/api/v3/records/services')
1257 api
.add_resource(Exit
, '/emulator/exit')
1260 def start_rest_api(host
, port
, datacenters
=dict()):
1262 GK
.dcs
= datacenters
1263 GK
.net
= get_dc_network()
1264 # start the Flask server (not the best performance but ok for our use case)
1265 # app.run(host=host,
1268 # use_reloader=False # this is needed to run Flask in a non-main thread
1270 http_server
= WSGIServer((host
, port
), app
, log
=open("/dev/null", "w"))
1271 http_server
.serve_forever()
1274 def stop_rest_api():
1279 def ensure_dir(name
):
1280 if not os
.path
.exists(name
):
1284 def load_yaml(path
):
1285 with
open(path
, "r") as f
:
1288 except yaml
.YAMLError
as exc
:
1289 LOG
.exception("YAML parse error: %r" % str(exc
))
1294 def make_relative_path(path
):
1295 if path
.startswith("file://"):
1296 path
= path
.replace("file://", "", 1)
1297 if path
.startswith("/"):
1298 path
= path
.replace("/", "", 1)
1302 def get_dc_network():
1304 retrieve the DCnetwork where this dummygatekeeper (GK) connects to.
1305 Assume at least 1 datacenter is connected to this GK, and that all datacenters belong to the same DCNetwork
1308 assert (len(GK
.dcs
) > 0)
1309 return list(GK
.dcs
.values())[0].net
1312 def parse_interface(interface_name
):
1314 convert the interface name in the nsd to the according vnf_id, vnf_interface names
1315 :param interface_name:
1318 if ':' in interface_name
:
1319 vnf_id
, vnf_interface
= interface_name
.split(':')
1322 vnf_interface
= interface_name
1323 return vnf_id
, vnf_interface
1326 def get_container_name(vnf_id
, vdu_id
, ssiid
=None):
1327 if ssiid
is not None:
1328 return "{}.{}.{}".format(vnf_id
, vdu_id
, ssiid
)
1329 return "{}.{}".format(vnf_id
, vdu_id
)
1332 def get_triple_id(descr
):
1333 return "{}.{}.{}".format(
1334 descr
.get("vendor"), descr
.get("name"), descr
.get("version"))
1337 def update_port_mapping_multi_instance(ssiid
, port_bindings
):
1339 Port_bindings are used to expose ports of the deployed containers.
1340 They would collide if we deploy multiple service instances.
1341 This function adds a offset to them which is based on the
1342 short service instance id (SSIID).
1343 MULTI_INSTANCE_PORT_OFFSET
1346 return p
+ MULTI_INSTANCE_PORT_OFFSET
* ssiid
1348 port_bindings
= {k
: _offset(v
) for k
, v
in port_bindings
.items()}
1349 return port_bindings
1352 if __name__
== '__main__':
1354 Lets allow to run the API in standalone mode.
1356 GK_STANDALONE_MODE
= True
1357 logging
.getLogger("werkzeug").setLevel(logging
.INFO
)
1358 start_rest_api("0.0.0.0", 8000)