Extract resource limits from deployment units.
"""
# defaults
- cpu_list = "1"
+ cpu_list = None
cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(1.0))
- mem_limit = 0
+ mem_limit = None
# update from descriptor
if "resource_requirements" in deployment_unit:
res_req = deployment_unit.get("resource_requirements")
- cpu_list = res_req.get("cpu").get("cores")
+ cpu_list = res_req.get("cpu").get("cpuset")
if cpu_list is None:
cpu_list = res_req.get("cpu").get("vcpus")
- cpu_bw = res_req.get("cpu").get("cpu_bw", 1.0)
+ if cpu_list is not None:
+ # attention: docker expects list as string w/o spaces:
+ cpu_list = str(cpu_list).replace(" ", "").strip()
+ cpu_bw = res_req.get("cpu").get("cpu_bw")
+ if cpu_bw is None:
+ cpu_bw = 1.0
cpu_period, cpu_quota = self._calculate_cpu_cfs_values(float(cpu_bw))
- mem_num = str(res_req.get("memory").get("size", 2))
+ mem_limit = res_req.get("memory").get("size")
mem_unit = str(res_req.get("memory").get("size_unit", "GB"))
- mem_limit = float(mem_num)
- if mem_unit == "GB":
- mem_limit = mem_limit * 1024 * 1024 * 1024
- elif mem_unit == "MB":
- mem_limit = mem_limit * 1024 * 1024
- elif mem_unit == "KB":
- mem_limit = mem_limit * 1024
- mem_limit = int(mem_limit)
+ if mem_limit is not None:
+ mem_limit = int(mem_limit)
+ # to bytes
+ if "G" in mem_unit:
+ mem_limit = mem_limit * 1024 * 1024 * 1024
+ elif "M" in mem_unit:
+ mem_limit = mem_limit * 1024 * 1024
+ elif "K" in mem_unit:
+ mem_limit = mem_limit * 1024
return cpu_list, cpu_period, cpu_quota, mem_limit
def _start_vnfd(self, vnfd, vnf_id, **kwargs):
image=docker_image_name,
cpu_quota=cpu_quota,
cpu_period=cpu_period,
- cpuset=cpu_list,
+ cpuset_cpus=cpu_list,
mem_limit=mem_limit,
volumes=volumes,
properties=cenv, # environment
"vnf_id" taken from an NSD.
:return: list
"""
+ if vnf_id is None:
+ return None
r = list()
for vnfi in self.instances[instance_uuid]["vnf_instances"]:
if vnf_id in vnfi.name:
env = config.get("Env", list())
for env_var in env:
var, cmd = map(str.strip, map(str, env_var.split('=', 1)))
- # LOG.debug("%r = %r" % (var, cmd))
if var == "SON_EMU_CMD" or var == "VIM_EMU_CMD":
LOG.info("Executing script in '{}': {}={}"
.format(vnfi.name, var, cmd))
cookie = 1
for link in eline_fwd_links:
LOG.info("Found E-Line: {}".format(link))
- # check if we need to deploy this link when its a management link:
- if USE_DOCKER_MGMT:
- if self.check_mgmt_interface(
- link["connection_points_reference"]):
- continue
-
src_id, src_if_name = parse_interface(
link["connection_points_reference"][0])
dst_id, dst_if_name = parse_interface(
link["connection_points_reference"][1])
- setChaining = False
- LOG.info("Creating E-Line: src={}, dst={}"
- .format(src_id, dst_id))
+ LOG.info("Searching C/VDU for E-Line: src={}, src_if={}, dst={}, dst_if={}"
+ .format(src_id, src_if_name, dst_id, dst_if_name))
+ # handle C/VDUs (ugly hack, only one V/CDU per VNF for now)
+ src_units = self._get_vnf_instance_units(instance_uuid, src_id)
+ dst_units = self._get_vnf_instance_units(instance_uuid, dst_id)
+ if src_units is None or dst_units is None:
+ LOG.info("No VNF-VNF link. Skipping: src={}, src_if={}, dst={}, dst_if={}"
+ .format(src_id, src_if_name, dst_id, dst_if_name))
+ return
+ # we only support VNFs with one V/CDU right now
+ if len(src_units) != 1 or len(dst_units) != 1:
+ raise BaseException("LLCM does not support E-LINES for multi V/CDU VNFs.")
+ # get the full name from that C/VDU and use it as src_id and dst_id
+ src_id = src_units[0].name
+ dst_id = dst_units[0].name
+ # from here we have all info we need
+ LOG.info("Creating E-Line for C/VDU: src={}, src_if={}, dst={}, dst_if={}"
+ .format(src_id, src_if_name, dst_id, dst_if_name))
# get involved vnfis
- src_vnfi = self._get_vnf_instance(instance_uuid, src_id)
- dst_vnfi = self._get_vnf_instance(instance_uuid, dst_id)
-
+ src_vnfi = src_units[0]
+ dst_vnfi = dst_units[0]
+ # proceed with chaining setup
+ setChaining = False
if src_vnfi is not None and dst_vnfi is not None:
setChaining = True
# re-configure the VNFs IP assignment and ensure that a new
"""
# try to extract the service and instance UUID from the request
json_data = request.get_json(force=True)
- service_uuid = json_data.get("service_uuid")
- instance_uuid = json_data.get("service_instance_uuid")
-
+ service_uuid_input = json_data.get("service_uuid")
+ instance_uuid_input = json_data.get("service_instance_uuid")
+ if len(GK.services) < 1:
+ return "No service on-boarded.", 404
# try to be fuzzy
- if service_uuid is None and len(GK.services) > 0:
- # if we don't get a service uuid, we simply stop the last service
- # in the list
- service_uuid = list(GK.services.iterkeys())[0]
- if instance_uuid is None and len(
- GK.services[service_uuid].instances) > 0:
- instance_uuid = list(
- GK.services[service_uuid].instances.iterkeys())[0]
-
- if service_uuid in GK.services and instance_uuid in GK.services[service_uuid].instances:
- # valid service and instance UUID, stop service
- GK.services.get(service_uuid).stop_service(instance_uuid)
- return "service instance with uuid %r stopped." % instance_uuid, 200
- return "Service not found", 404
+ if service_uuid_input is None:
+ # if we don't get a service uuid we stop all services
+ service_uuid_list = list(GK.services.iterkeys())
+ LOG.info("No service_uuid given, stopping all.")
+ else:
+ service_uuid_list = [service_uuid_input]
+ # for each service
+ for service_uuid in service_uuid_list:
+ if instance_uuid_input is None:
+ instance_uuid_list = list(
+ GK.services[service_uuid].instances.iterkeys())
+ else:
+ instance_uuid_list = [instance_uuid_input]
+ # for all service instances
+ for instance_uuid in instance_uuid_list:
+ if (service_uuid in GK.services and
+ instance_uuid in GK.services[service_uuid].instances):
+ # valid service and instance UUID, stop service
+ GK.services.get(service_uuid).stop_service(instance_uuid)
+ LOG.info("Service instance with uuid %r stopped." % instance_uuid)
+ return "Service(s) stopped.", 200
class Exit(fr.Resource):