if name not in vim_threads["names"]:
vim_threads["names"].append(name)
return name
- name = datacenter_name[:16] + "." + tenant_name[:16]
- if name not in vim_threads["names"]:
- vim_threads["names"].append(name)
- return name
- name = datacenter_id + "-" + tenant_id
+ if tenant_name:
+ name = datacenter_name[:16] + "." + tenant_name[:16]
+ if name not in vim_threads["names"]:
+ vim_threads["names"].append(name)
+ return name
+ name = datacenter_id
vim_threads["names"].append(name)
return name
except Exception as e:
raise NfvoException("Error at VIM {}; {}: {}".format(vim["type"], type(e).__name__, e),
httperrors.Internal_Server_Error)
- thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['vim_tenant_id'], vim['vim_tenant_name'],
+ thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['datacenter_id'], vim['vim_tenant_name'],
vim['vim_tenant_id'])
new_thread = vim_thread.vim_thread(task_lock, thread_name, vim['datacenter_name'],
vim['datacenter_tenant_id'], db=db, db_lock=db_lock, ovim=ovim)
nb_deleted += len(actions_to_delete)
if len(actions_to_delete) < 100:
break
+ # clean locks
+ mydb.update_rows("vim_wim_actions", UPDATE={"worker": None}, WHERE={"worker<>": None})
+
if nb_deleted:
logger.debug("Removed {} unused vim_wim_actions".format(nb_deleted))
db_image["uuid"] = image_uuid
return None
+def get_resource_allocation_params(quota_descriptor):
+ """
+ read the quota_descriptor from vnfd and fetch the resource allocation properties from the descriptor object
+ :param quota_descriptor: cpu/mem/vif/disk-io quota descriptor
+ :return: quota params for limit, reserve, shares from the descriptor object
+ """
+ quota = {}
+ if quota_descriptor.get("limit"):
+ quota["limit"] = int(quota_descriptor["limit"])
+ if quota_descriptor.get("reserve"):
+ quota["reserve"] = int(quota_descriptor["reserve"])
+ if quota_descriptor.get("shares"):
+ quota["shares"] = int(quota_descriptor["shares"])
+ return quota
+
def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
"""
Parses an OSM IM vnfd_catalog and insert at DB
try:
myvnfd = vnfd_catalog.vnfd()
try:
- pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd, path_helper=True)
+ pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd, path_helper=True,
+ skip_unknown=True)
except Exception as e:
raise NfvoException("Error. Invalid VNF descriptor format " + str(e), httperrors.Bad_Request)
db_vnfs = []
numa["cores"] = max(db_flavor["vcpus"], 1)
else:
numa["threads"] = max(db_flavor["vcpus"], 1)
+ epa_vcpu_set = True
+ if vdu["guest-epa"].get("cpu-quota") and not epa_vcpu_set:
+ extended["cpu-quota"] = get_resource_allocation_params(vdu["guest-epa"].get("cpu-quota"))
+ if vdu["guest-epa"].get("mem-quota"):
+ extended["mem-quota"] = get_resource_allocation_params(vdu["guest-epa"].get("mem-quota"))
+ if vdu["guest-epa"].get("disk-io-quota"):
+ extended["disk-io-quota"] = get_resource_allocation_params(vdu["guest-epa"].get("disk-io-quota"))
+ if vdu["guest-epa"].get("vif-quota"):
+ extended["vif-quota"] = get_resource_allocation_params(vdu["guest-epa"].get("vif-quota"))
if numa:
extended["numas"] = [numa]
if extended:
"'member-vdus':'{vdu}'. Reference to a non-existing vdu".format(
vnf=vnfd_id, pg=pg_name, vdu=vdu_id),
httperrors.Bad_Request)
- if vdu_id2db_table_index[vdu_id]:
- db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
+ db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
# TODO consider the case of isolation and not colocation
# if pg.get("strategy") == "ISOLATION":
try:
mynsd = nsd_catalog.nsd()
try:
- pybindJSONDecoder.load_ietf_json(nsd_descriptor, None, None, obj=mynsd)
+ pybindJSONDecoder.load_ietf_json(nsd_descriptor, None, None, obj=mynsd, skip_unknown=True)
except Exception as e:
raise NfvoException("Error. Invalid NS descriptor format: " + str(e), httperrors.Bad_Request)
db_scenarios = []
# <-- WIM
descriptor_net = {}
- if instance_dict.get("networks") and instance_dict["networks"].get(sce_net["name"]):
- descriptor_net = instance_dict["networks"][sce_net["name"]]
+ if instance_dict.get("networks"):
+ if sce_net.get("uuid") in instance_dict["networks"]:
+ descriptor_net = instance_dict["networks"][sce_net["uuid"]]
+ descriptor_net_name = sce_net["uuid"]
+ elif sce_net.get("osm_id") in instance_dict["networks"]:
+ descriptor_net = instance_dict["networks"][sce_net["osm_id"]]
+ descriptor_net_name = sce_net["osm_id"]
+ elif sce_net["name"] in instance_dict["networks"]:
+ descriptor_net = instance_dict["networks"][sce_net["name"]]
+ descriptor_net_name = sce_net["name"]
net_name = descriptor_net.get("vim-network-name")
# add datacenters from instantiation parameters
if descriptor_net.get("sites"):
sce_net2instance[sce_net_uuid] = {}
net2task_id['scenario'][sce_net_uuid] = {}
+ use_network = None
+ related_network = None
+ if descriptor_net.get("use-network"):
+ target_instance_nets = mydb.get_rows(
+ SELECT="related",
+ FROM="instance_nets",
+ WHERE={"instance_scenario_id": descriptor_net["use-network"]["instance_scenario_id"],
+ "osm_id": descriptor_net["use-network"]["osm_id"]},
+ )
+ if not target_instance_nets:
+ raise NfvoException(
+ "Cannot find the target network at instance:networks[{}]:use-network".format(descriptor_net_name),
+ httperrors.Bad_Request)
+ else:
+ use_network = target_instance_nets[0]["related"]
+
if sce_net["external"]:
number_mgmt_networks += 1
net_uuid = str(uuid4())
uuid_list.append(net_uuid)
sce_net2instance[sce_net_uuid][datacenter_id] = net_uuid
+ if not related_network: # all db_instance_nets will have same related
+ related_network = use_network or net_uuid
db_net = {
"uuid": net_uuid,
+ "osm_id": sce_net.get("osm_id") or sce_net["name"],
+ "related": related_network,
'vim_net_id': None,
"vim_name": net_vim_name,
"instance_scenario_id": instance_uuid,
"action": task_action,
"item": "instance_nets",
"item_id": net_uuid,
+ "related": related_network,
"extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
}
net2task_id['scenario'][sce_net_uuid][datacenter_id] = task_index
uuid_list.append(sfi_uuid)
db_sfi = {
"uuid": sfi_uuid,
+ "related": sfi_uuid,
"instance_scenario_id": instance_uuid,
'sce_rsp_hop_id': cp['uuid'],
'datacenter_id': datacenter_id,
"status": "SCHEDULED",
"item": "instance_sfis",
"item_id": sfi_uuid,
+ "related": sfi_uuid,
"extra": yaml.safe_dump({"params": extra_params, "depends_on": [dependencies[i]]},
default_flow_style=True, width=256)
}
uuid_list.append(sf_uuid)
db_sf = {
"uuid": sf_uuid,
+ "related": sf_uuid,
"instance_scenario_id": instance_uuid,
'sce_rsp_hop_id': cp['uuid'],
'datacenter_id': datacenter_id,
"status": "SCHEDULED",
"item": "instance_sfs",
"item_id": sf_uuid,
+ "related": sf_uuid,
"extra": yaml.safe_dump({"params": "", "depends_on": sfis_created},
default_flow_style=True, width=256)
}
uuid_list.append(classification_uuid)
db_classification = {
"uuid": classification_uuid,
+ "related": classification_uuid,
"instance_scenario_id": instance_uuid,
'sce_classifier_match_id': match['uuid'],
'datacenter_id': datacenter_id,
"status": "SCHEDULED",
"item": "instance_classifications",
"item_id": classification_uuid,
+ "related": classification_uuid,
"extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]},
default_flow_style=True, width=256)
}
uuid_list.append(sfp_uuid)
db_sfp = {
"uuid": sfp_uuid,
+ "related": sfp_uuid,
"instance_scenario_id": instance_uuid,
'sce_rsp_id': rsp['uuid'],
'datacenter_id': datacenter_id,
"status": "SCHEDULED",
"item": "instance_sfps",
"item_id": sfp_uuid,
+ "related": sfp_uuid,
"extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created},
default_flow_style=True, width=256)
}
vnf_net2instance[sce_vnf['uuid']][net['uuid']] = net_uuid
db_net = {
"uuid": net_uuid,
+ "related": net_uuid,
'vim_net_id': None,
"vim_name": net_name,
"instance_scenario_id": instance_uuid,
"action": task_action,
"item": "instance_nets",
"item_id": net_uuid,
+ "related": net_uuid,
"extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
}
task_index += 1
uuid_list.append(vm_uuid)
db_vm = {
"uuid": vm_uuid,
+ "related": vm_uuid,
'instance_vnf_id': vnf_uuid,
# TODO delete "vim_vm_id": vm_id,
"vm_id": vm["uuid"],
"status": "SCHEDULED",
"item": "instance_vms",
"item_id": vm_uuid,
+ "related": vm_uuid,
"extra": yaml.safe_dump({"params": task_params, "depends_on": task_depends_on},
default_flow_style=True, width=256)
}
"status": "SCHEDULED",
"item": "instance_sfps",
"item_id": sfp["uuid"],
+ "related": sfp["related"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
"status": "SCHEDULED",
"item": "instance_classifications",
"item_id": classification["uuid"],
+ "related": classification["related"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
"status": "SCHEDULED",
"item": "instance_sfs",
"item_id": sf["uuid"],
+ "related": sf["related"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
"status": "SCHEDULED",
"item": "instance_sfis",
"item_id": sfi["uuid"],
+ "related": sfi["related"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
"status": "SCHEDULED",
"item": "instance_vms",
"item_id": vm["uuid"],
+ "related": vm["related"],
"extra": yaml.safe_dump({"params": vm["interfaces"], "depends_on": sfi_dependencies},
default_flow_style=True, width=256)
}
"status": "SCHEDULED",
"item": "instance_nets",
"item_id": net["uuid"],
+ "related": net["related"],
"extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
}
task_index += 1
"status": "SCHEDULED",
"item": "instance_vms",
"item_id": vdu_id,
+ "related": vm["related"],
"extra": yaml.safe_dump({"params": vm_interfaces},
default_flow_style=True, width=256)
}
"status": "SCHEDULED",
"item": "instance_vms",
"item_id": vm_uuid,
+ "related": target_vm["related"],
# ALF
# ALF
# TODO examinar parametros, quitar MAC o incrementar. Incrementar IP y colocar las dependencias con ACTION-asdfasd.