X-Git-Url: https://osm.etsi.org/gitweb/?a=blobdiff_plain;f=osm_ro%2Fnfvo.py;h=bf32c2459537f7d096f40ac9d8ba80eff03eee9c;hb=69b590eb0469efa021bada0d2bf867bbdff27a10;hp=e5d13eba12cab7d2a4ae75cff6e386bed42b5db8;hpb=3fcfdb7674436861d6ab0740972573293b9a355f;p=osm%2FRO.git diff --git a/osm_ro/nfvo.py b/osm_ro/nfvo.py index e5d13eba..bf32c245 100644 --- a/osm_ro/nfvo.py +++ b/osm_ro/nfvo.py @@ -38,6 +38,7 @@ import console_proxy_thread as cli import vimconn import logging import collections +import math from uuid import uuid4 from db_base import db_base_Exception @@ -391,7 +392,7 @@ def rollback(mydb, vims, rollback_list): mydb.delete_row(FROM="datacenters_images", WHERE={"datacenter_vim_id": vim["id"], "vim_id":item["uuid"]}) elif item["what"]=="flavor": vim.delete_flavor(item["uuid"]) - mydb.delete_row(FROM="datacenters_flavors", WHERE={"datacenter_id": vim["id"], "vim_id":item["uuid"]}) + mydb.delete_row(FROM="datacenters_flavors", WHERE={"datacenter_vim_id": vim["id"], "vim_id":item["uuid"]}) elif item["what"]=="network": vim.delete_network(item["uuid"]) elif item["what"]=="vm": @@ -620,7 +621,7 @@ def create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vi def create_or_use_flavor(mydb, vims, flavor_dict, rollback_list, only_create_at_vim=False, return_on_error = None): - temp_flavor_dict= {'disk':flavor_dict.get('disk',1), + temp_flavor_dict= {'disk':flavor_dict.get('disk',0), 'ram':flavor_dict.get('ram'), 'vcpus':flavor_dict.get('vcpus'), } @@ -827,7 +828,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): try: myvnfd = vnfd_catalog.vnfd() try: - pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd) + pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd, path_helper=True) except Exception as e: raise NfvoException("Error. Invalid VNF descriptor format " + str(e), HTTP_Bad_Request) db_vnfs = [] @@ -852,9 +853,10 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): vnf_uuid = str(uuid4()) uuid_list.append(vnf_uuid) vnfd_uuid_list.append(vnf_uuid) + vnfd_id = get_str(vnfd, "id", 255) db_vnf = { "uuid": vnf_uuid, - "osm_id": get_str(vnfd, "id", 255), + "osm_id": vnfd_id, "name": get_str(vnfd, "name", 255), "description": get_str(vnfd, "description", 255), "tenant_id": tenant_id, @@ -882,15 +884,21 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): net_id2uuid[vld.get("id")] = net_uuid db_nets.append(db_net) + # connection points vaiable declaration + cp_name2iface_uuid = {} + cp_name2vm_uuid = {} + cp_name2db_interface = {} + # table vms (vdus) vdu_id2uuid = {} vdu_id2db_table_index = {} for vdu in vnfd.get("vdu").itervalues(): vm_uuid = str(uuid4()) uuid_list.append(vm_uuid) + vdu_id = get_str(vdu, "id", 255) db_vm = { "uuid": vm_uuid, - "osm_id": get_str(vdu, "id", 255), + "osm_id": vdu_id, "name": get_str(vdu, "name", 255), "description": get_str(vdu, "description", 255), "vnf_id": vnf_uuid, @@ -937,74 +945,6 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): device["image checksum"] = str(volume["image-checksum"]) devices.append(device) - # table flavors - db_flavor = { - "name": get_str(vdu, "name", 250) + "-flv", - "vcpus": int(vdu["vm-flavor"].get("vcpu-count", 1)), - "ram": int(vdu["vm-flavor"].get("memory-mb", 1)), - "disk": int(vdu["vm-flavor"].get("storage-gb", 1)), - } - # EPA TODO revise - extended = {} - numa = {} - if devices: - extended["devices"] = devices - if vdu.get("guest-epa"): # TODO or dedicated_int: - epa_vcpu_set = False - if vdu["guest-epa"].get("numa-node-policy"): # TODO or dedicated_int: - numa_node_policy = vdu["guest-epa"].get("numa-node-policy") - if numa_node_policy.get("node"): - numa_node = numa_node_policy["node"]['0'] - if numa_node.get("num-cores"): - numa["cores"] = numa_node["num-cores"] - epa_vcpu_set = True - if numa_node.get("paired-threads"): - if numa_node["paired-threads"].get("num-paired-threads"): - numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"]) - epa_vcpu_set = True - if len(numa_node["paired-threads"].get("paired-thread-ids")): - numa["paired-threads-id"] = [] - for pair in numa_node["paired-threads"]["paired-thread-ids"].itervalues(): - numa["paired-threads-id"].append( - (str(pair["thread-a"]), str(pair["thread-b"])) - ) - if numa_node.get("num-threads"): - numa["threads"] = int(numa_node["num-threads"]) - epa_vcpu_set = True - if numa_node.get("memory-mb"): - numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1) - if vdu["guest-epa"].get("mempage-size"): - if vdu["guest-epa"]["mempage-size"] != "SMALL": - numa["memory"] = max(int(db_flavor["ram"] / 1024), 1) - if vdu["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set: - if vdu["guest-epa"]["cpu-pinning-policy"] == "DEDICATED": - if vdu["guest-epa"].get("cpu-thread-pinning-policy") and \ - vdu["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER": - numa["cores"] = max(db_flavor["vcpus"], 1) - else: - numa["threads"] = max(db_flavor["vcpus"], 1) - if numa: - extended["numas"] = [numa] - if extended: - extended_text = yaml.safe_dump(extended, default_flow_style=True, width=256) - db_flavor["extended"] = extended_text - # look if flavor exist - - temp_flavor_dict = {'disk': db_flavor.get('disk', 1), - 'ram': db_flavor.get('ram'), - 'vcpus': db_flavor.get('vcpus'), - 'extended': db_flavor.get('extended') - } - existing_flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict) - if existing_flavors: - flavor_uuid = existing_flavors[0]["uuid"] - else: - flavor_uuid = str(uuid4()) - uuid_list.append(flavor_uuid) - db_flavor["uuid"] = flavor_uuid - db_flavors.append(db_flavor) - db_vm["flavor_id"] = flavor_uuid - # cloud-init boot_data = {} if vdu.get("cloud-init"): @@ -1032,11 +972,11 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): db_vms_index += 1 # table interfaces (internal/external interfaces) - cp_name2iface_uuid = {} - cp_name2vm_uuid = {} - cp_name2db_interface = {} + flavor_epa_interfaces = [] + vdu_id2cp_name = {} # stored only when one external connection point is presented at this VDU # for iface in chain(vdu.get("internal-interface").itervalues(), vdu.get("external-interface").itervalues()): for iface in vdu.get("interface").itervalues(): + flavor_epa_interface = {} iface_uuid = str(uuid4()) uuid_list.append(iface_uuid) db_interface = { @@ -1044,12 +984,15 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): "internal_name": get_str(iface, "name", 255), "vm_id": vm_uuid, } + flavor_epa_interface["name"] = db_interface["internal_name"] if iface.get("virtual-interface").get("vpci"): db_interface["vpci"] = get_str(iface.get("virtual-interface"), "vpci", 12) + flavor_epa_interface["vpci"] = db_interface["vpci"] if iface.get("virtual-interface").get("bandwidth"): bps = int(iface.get("virtual-interface").get("bandwidth")) - db_interface["bw"] = bps/1000 + db_interface["bw"] = int(math.ceil(bps/1000000.0)) + flavor_epa_interface["bandwidth"] = "{} Mbps".format(db_interface["bw"]) if iface.get("virtual-interface").get("type") == "OM-MGMT": db_interface["type"] = "mgmt" @@ -1059,11 +1002,13 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): elif iface.get("virtual-interface").get("type") in ("SR-IOV", "PCI-PASSTHROUGH"): db_interface["type"] = "data" db_interface["model"] = get_str(iface.get("virtual-interface"), "type", 12) + flavor_epa_interface["dedicated"] = "no" if iface["virtual-interface"]["type"] == "SR-IOV" \ + else "yes" + flavor_epa_interfaces.append(flavor_epa_interface) else: raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vdu[{}]':'interface':'virtual" "-interface':'type':'{}'. Interface type is not supported".format( - str(vnfd["id"])[:255], str(vdu["id"])[:255], - iface.get("virtual-interface").get("type")), + vnfd_id, vdu_id, iface.get("virtual-interface").get("type")), HTTP_Bad_Request) if iface.get("external-connection-point-ref"): @@ -1076,6 +1021,15 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): for cp_descriptor in vnfd_descriptor["connection-point"]: if cp_descriptor["name"] == db_interface["external_name"]: break + else: + raise KeyError() + + if vdu_id in vdu_id2cp_name: + vdu_id2cp_name[vdu_id] = None # more than two connecdtion point for this VDU + else: + vdu_id2cp_name[vdu_id] = db_interface["external_name"] + + # port security if str(cp_descriptor.get("port-security-enabled")).lower() == "false": db_interface["port_security"] = 0 elif str(cp_descriptor.get("port-security-enabled")).lower() == "true": @@ -1084,7 +1038,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':" "'interface[{iface}]':'vnfd-connection-point-ref':'{cp}' is not present" " at connection-point".format( - vnf=vnfd["id"], vdu=vdu["id"], iface=iface["name"], + vnf=vnfd_id, vdu=vdu_id, iface=iface["name"], cp=iface.get("vnfd-connection-point-ref")), HTTP_Bad_Request) elif iface.get("internal-connection-point-ref"): @@ -1094,7 +1048,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): if cp.get("id-ref") == iface.get("internal-connection-point-ref"): db_interface["net_id"] = net_id2uuid[vld.get("id")] for cp_descriptor in vnfd_descriptor["connection-point"]: - if cp_descriptor["name"] == db_interface["external_name"]: + if cp_descriptor["name"] == db_interface["internal_name"]: break if str(cp_descriptor.get("port-security-enabled")).lower() == "false": db_interface["port_security"] = 0 @@ -1103,15 +1057,84 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): break except KeyError: raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':" - "'interface[{iface}]':'vdu-internal-connection-point-ref':'{cp}' is not" + "'interface[{iface}]':'internal-connection-point-ref':'{cp}' is not" " referenced by any internal-vld".format( - vnf=vnfd["id"], vdu=vdu["id"], iface=iface["name"], - cp=iface.get("vdu-internal-connection-point-ref")), + vnf=vnfd_id, vdu=vdu_id, iface=iface["name"], + cp=iface.get("internal-connection-point-ref")), HTTP_Bad_Request) if iface.get("position") is not None: db_interface["created_at"] = int(iface.get("position")) - 1000 db_interfaces.append(db_interface) + # table flavors + db_flavor = { + "name": get_str(vdu, "name", 250) + "-flv", + "vcpus": int(vdu["vm-flavor"].get("vcpu-count", 1)), + "ram": int(vdu["vm-flavor"].get("memory-mb", 1)), + "disk": int(vdu["vm-flavor"].get("storage-gb", 0)), + } + # TODO revise the case of several numa-node-policy node + extended = {} + numa = {} + if devices: + extended["devices"] = devices + if flavor_epa_interfaces: + numa["interfaces"] = flavor_epa_interfaces + if vdu.get("guest-epa"): # TODO or dedicated_int: + epa_vcpu_set = False + if vdu["guest-epa"].get("numa-node-policy"): # TODO or dedicated_int: + numa_node_policy = vdu["guest-epa"].get("numa-node-policy") + if numa_node_policy.get("node"): + numa_node = numa_node_policy["node"].values()[0] + if numa_node.get("num-cores"): + numa["cores"] = numa_node["num-cores"] + epa_vcpu_set = True + if numa_node.get("paired-threads"): + if numa_node["paired-threads"].get("num-paired-threads"): + numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"]) + epa_vcpu_set = True + if len(numa_node["paired-threads"].get("paired-thread-ids")): + numa["paired-threads-id"] = [] + for pair in numa_node["paired-threads"]["paired-thread-ids"].itervalues(): + numa["paired-threads-id"].append( + (str(pair["thread-a"]), str(pair["thread-b"])) + ) + if numa_node.get("num-threads"): + numa["threads"] = int(numa_node["num-threads"]) + epa_vcpu_set = True + if numa_node.get("memory-mb"): + numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1) + if vdu["guest-epa"].get("mempage-size"): + if vdu["guest-epa"]["mempage-size"] != "SMALL": + numa["memory"] = max(int(db_flavor["ram"] / 1024), 1) + if vdu["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set: + if vdu["guest-epa"]["cpu-pinning-policy"] == "DEDICATED": + if vdu["guest-epa"].get("cpu-thread-pinning-policy") and \ + vdu["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER": + numa["cores"] = max(db_flavor["vcpus"], 1) + else: + numa["threads"] = max(db_flavor["vcpus"], 1) + if numa: + extended["numas"] = [numa] + if extended: + extended_text = yaml.safe_dump(extended, default_flow_style=True, width=256) + db_flavor["extended"] = extended_text + # look if flavor exist + temp_flavor_dict = {'disk': db_flavor.get('disk', 0), + 'ram': db_flavor.get('ram'), + 'vcpus': db_flavor.get('vcpus'), + 'extended': db_flavor.get('extended') + } + existing_flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict) + if existing_flavors: + flavor_uuid = existing_flavors[0]["uuid"] + else: + flavor_uuid = str(uuid4()) + uuid_list.append(flavor_uuid) + db_flavor["uuid"] = flavor_uuid + db_flavors.append(db_flavor) + db_vm["flavor_id"] = flavor_uuid + # VNF affinity and antiaffinity for pg in vnfd.get("placement-groups").itervalues(): pg_name = get_str(pg, "name", 255) @@ -1120,7 +1143,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): if vdu_id not in vdu_id2db_table_index: raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'placement-groups[{pg}]':" "'member-vdus':'{vdu}'. Reference to a non-existing vdu".format( - vnf=vnfd["id"], pg=pg_name, vdu=vdu_id), + vnf=vnfd_id, pg=pg_name, vdu=vdu_id), HTTP_Bad_Request) db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name # TODO consider the case of isolation and not colocation @@ -1129,19 +1152,24 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor): # VNF mgmt configuration mgmt_access = {} if vnfd["mgmt-interface"].get("vdu-id"): - if vnfd["mgmt-interface"]["vdu-id"] not in vdu_id2uuid: + mgmt_vdu_id = get_str(vnfd["mgmt-interface"], "vdu-id", 255) + if mgmt_vdu_id not in vdu_id2uuid: raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'vdu-id':" "'{vdu}'. Reference to a non-existing vdu".format( - vnf=vnfd["id"], vdu=vnfd["mgmt-interface"]["vdu-id"]), + vnf=vnfd_id, vdu=mgmt_vdu_id), HTTP_Bad_Request) mgmt_access["vm_id"] = vdu_id2uuid[vnfd["mgmt-interface"]["vdu-id"]] + # if only one cp is defined by this VDU, mark this interface as of type "mgmt" + if vdu_id2cp_name.get(mgmt_vdu_id): + cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]["type"] = "mgmt" + if vnfd["mgmt-interface"].get("ip-address"): mgmt_access["ip-address"] = str(vnfd["mgmt-interface"].get("ip-address")) if vnfd["mgmt-interface"].get("cp"): if vnfd["mgmt-interface"]["cp"] not in cp_name2iface_uuid: raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'cp':'{cp}'. " "Reference to a non-existing connection-point".format( - vnf=vnfd["id"], cp=vnfd["mgmt-interface"]["cp"]), + vnf=vnfd_id, cp=vnfd["mgmt-interface"]["cp"]), HTTP_Bad_Request) mgmt_access["vm_id"] = cp_name2vm_uuid[vnfd["mgmt-interface"]["cp"]] mgmt_access["interface_id"] = cp_name2iface_uuid[vnfd["mgmt-interface"]["cp"]] @@ -1234,7 +1262,7 @@ def new_vnf(mydb, tenant_id, vnf_descriptor): myflavorDict["description"] = VNFCitem["description"] myflavorDict["ram"] = vnfc.get("ram", 0) myflavorDict["vcpus"] = vnfc.get("vcpus", 0) - myflavorDict["disk"] = vnfc.get("disk", 1) + myflavorDict["disk"] = vnfc.get("disk", 0) myflavorDict["extended"] = {} devices = vnfc.get("devices") @@ -1370,7 +1398,7 @@ def new_vnf_v02(mydb, tenant_id, vnf_descriptor): myflavorDict["description"] = VNFCitem["description"] myflavorDict["ram"] = vnfc.get("ram", 0) myflavorDict["vcpus"] = vnfc.get("vcpus", 0) - myflavorDict["disk"] = vnfc.get("disk", 1) + myflavorDict["disk"] = vnfc.get("disk", 0) myflavorDict["extended"] = {} devices = vnfc.get("devices") @@ -1557,26 +1585,33 @@ def delete_vnf(mydb,tenant_id,vnf_id,datacenter=None,vim_tenant=None): continue #flavor not used, must be deleted #delelte at VIM - c = mydb.get_rows(FROM='datacenters_flavors', WHERE={'flavor_id':flavor}) + c = mydb.get_rows(FROM='datacenters_flavors', WHERE={'flavor_id': flavor}) for flavor_vim in c: - if flavor_vim["datacenter_vim_id"] not in vims: # TODO change to datacenter_tenant_id + if not flavor_vim['created']: # skip this flavor because not created by openmano continue - if flavor_vim['created']=='false': #skip this flavor because not created by openmano + # look for vim + myvim = None + for vim in vims.values(): + if vim["config"]["datacenter_tenant_id"] == flavor_vim["datacenter_vim_id"]: + myvim = vim + break + if not myvim: continue - myvim=vims[ flavor_vim["datacenter_id"] ] try: myvim.delete_flavor(flavor_vim["vim_id"]) - except vimconn.vimconnNotFoundException as e: - logger.warn("VIM flavor %s not exist at datacenter %s", flavor_vim["vim_id"], flavor_vim["datacenter_id"] ) + except vimconn.vimconnNotFoundException: + logger.warn("VIM flavor %s not exist at datacenter %s", flavor_vim["vim_id"], + flavor_vim["datacenter_vim_id"] ) except vimconn.vimconnException as e: logger.error("Not possible to delete VIM flavor %s from datacenter %s: %s %s", - flavor_vim["vim_id"], flavor_vim["datacenter_id"], type(e).__name__, str(e)) - undeletedItems.append("flavor {} from VIM {}".format(flavor_vim["vim_id"], flavor_vim["datacenter_id"] )) - #delete flavor from Database, using table flavors and with cascade foreign key also at datacenters_flavors + flavor_vim["vim_id"], flavor_vim["datacenter_vim_id"], type(e).__name__, str(e)) + undeletedItems.append("flavor {} from VIM {}".format(flavor_vim["vim_id"], + flavor_vim["datacenter_vim_id"])) + # delete flavor from Database, using table flavors and with cascade foreign key also at datacenters_flavors mydb.delete_row_by_id('flavors', flavor) except db_base_Exception as e: logger.error("delete_vnf_error. Not possible to get flavor details and delete '%s'. %s", flavor, str(e)) - undeletedItems.append("flavor %s" % flavor) + undeletedItems.append("flavor {}".format(flavor)) for image in imageList: @@ -2072,7 +2107,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor): :param mydb: :param tenant_id: :param nsd_descriptor: - :return: The list of cretated NSD ids + :return: The list of created NSD ids """ try: mynsd = nsd_catalog.nsd() @@ -2084,6 +2119,11 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor): db_sce_nets = [] db_sce_vnfs = [] db_sce_interfaces = [] + db_sce_vnffgs = [] + db_sce_rsps = [] + db_sce_rsp_hops = [] + db_sce_classifiers = [] + db_sce_classifier_matches = [] db_ip_profiles = [] db_ip_profiles_index = 0 uuid_list = [] @@ -2091,7 +2131,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor): for nsd_yang in mynsd.nsd_catalog.nsd.itervalues(): nsd = nsd_yang.get() - # table sceanrios + # table scenarios scenario_uuid = str(uuid4()) uuid_list.append(scenario_uuid) nsd_uuid_list.append(scenario_uuid) @@ -2173,7 +2213,8 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor): elif vld.get("provider-network").get("overlay-type") == "VLAN": db_sce_net["type"] = "data" else: - db_sce_net["type"] = "bridge" + # later on it will be fixed to bridge or data depending on the type of interfaces attached to it + db_sce_net["type"] = None db_sce_nets.append(db_sce_net) # ip-profile, link db_ip_profile with db_sce_net @@ -2197,7 +2238,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor): str(nsd["id"]), str(vld["id"]), str(iface["member-vnf-index-ref"])), HTTP_Bad_Request) - existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',), + existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid', 'i.type as iface_type'), FROM="interfaces as i join vms on i.vm_id=vms.uuid", WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index], 'external_name': get_str(iface, "vnfd-connection-point-ref", @@ -2210,6 +2251,8 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor): str(iface.get("vnfd-id-ref"))[:255]), HTTP_Bad_Request) interface_uuid = existing_ifaces[0]["uuid"] + if existing_ifaces[0]["iface_type"] == "data" and not db_sce_net["type"]: + db_sce_net["type"] = "data" sce_interface_uuid = str(uuid4()) uuid_list.append(sce_net_uuid) db_sce_interface = { @@ -2220,6 +2263,129 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor): # "ip_address": #TODO } db_sce_interfaces.append(db_sce_interface) + if not db_sce_net["type"]: + db_sce_net["type"] = "bridge" + + # table sce_vnffgs (vnffgd) + for vnffg in nsd.get("vnffgd").itervalues(): + sce_vnffg_uuid = str(uuid4()) + uuid_list.append(sce_vnffg_uuid) + db_sce_vnffg = { + "uuid": sce_vnffg_uuid, + "name": get_str(vnffg, "name", 255), + "scenario_id": scenario_uuid, + "vendor": get_str(vnffg, "vendor", 255), + "description": get_str(vld, "description", 255), + } + db_sce_vnffgs.append(db_sce_vnffg) + + # deal with rsps + db_sce_rsps = [] + for rsp in vnffg.get("rsp").itervalues(): + sce_rsp_uuid = str(uuid4()) + uuid_list.append(sce_rsp_uuid) + db_sce_rsp = { + "uuid": sce_rsp_uuid, + "name": get_str(rsp, "name", 255), + "sce_vnffg_id": sce_vnffg_uuid, + "id": get_str(rsp, "id", 255), # only useful to link with classifiers; will be removed later in the code + } + db_sce_rsps.append(db_sce_rsp) + db_sce_rsp_hops = [] + for iface in rsp.get("vnfd-connection-point-ref").itervalues(): + vnf_index = int(iface['member-vnf-index-ref']) + if_order = int(iface['order']) + # check correct parameters + if vnf_index not in vnf_index2vnf_uuid: + raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point" + "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at " + "'nsd':'constituent-vnfd'".format( + str(nsd["id"]), str(rsp["id"]), str(iface["member-vnf-index-ref"])), + HTTP_Bad_Request) + + existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',), + FROM="interfaces as i join vms on i.vm_id=vms.uuid", + WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index], + 'external_name': get_str(iface, "vnfd-connection-point-ref", + 255)}) + if not existing_ifaces: + raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point" + "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing " + "connection-point name at VNFD '{}'".format( + str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]), + str(iface.get("vnfd-id-ref"))[:255]), + HTTP_Bad_Request) + interface_uuid = existing_ifaces[0]["uuid"] + sce_rsp_hop_uuid = str(uuid4()) + uuid_list.append(sce_rsp_hop_uuid) + db_sce_rsp_hop = { + "uuid": sce_rsp_hop_uuid, + "if_order": if_order, + "interface_id": interface_uuid, + "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index], + "sce_rsp_id": sce_rsp_uuid, + } + db_sce_rsp_hops.append(db_sce_rsp_hop) + + # deal with classifiers + db_sce_classifiers = [] + for classifier in vnffg.get("classifier").itervalues(): + sce_classifier_uuid = str(uuid4()) + uuid_list.append(sce_classifier_uuid) + + # source VNF + vnf_index = int(classifier['member-vnf-index-ref']) + if vnf_index not in vnf_index2vnf_uuid: + raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'classifier[{}]':'vnfd-connection-point" + "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at " + "'nsd':'constituent-vnfd'".format( + str(nsd["id"]), str(classifier["id"]), str(classifier["member-vnf-index-ref"])), + HTTP_Bad_Request) + existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',), + FROM="interfaces as i join vms on i.vm_id=vms.uuid", + WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index], + 'external_name': get_str(classifier, "vnfd-connection-point-ref", + 255)}) + if not existing_ifaces: + raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point" + "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing " + "connection-point name at VNFD '{}'".format( + str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]), + str(iface.get("vnfd-id-ref"))[:255]), + HTTP_Bad_Request) + interface_uuid = existing_ifaces[0]["uuid"] + + db_sce_classifier = { + "uuid": sce_classifier_uuid, + "name": get_str(classifier, "name", 255), + "sce_vnffg_id": sce_vnffg_uuid, + "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index], + "interface_id": interface_uuid, + } + rsp_id = get_str(classifier, "rsp-id-ref", 255) + rsp = next((item for item in db_sce_rsps if item["id"] == rsp_id), None) + db_sce_classifier["sce_rsp_id"] = rsp["uuid"] + db_sce_classifiers.append(db_sce_classifier) + + db_sce_classifier_matches = [] + for match in classifier.get("match-attributes").itervalues(): + sce_classifier_match_uuid = str(uuid4()) + uuid_list.append(sce_classifier_match_uuid) + db_sce_classifier_match = { + "uuid": sce_classifier_match_uuid, + "ip_proto": get_str(match, "ip-proto", 2), + "source_ip": get_str(match, "source-ip-address", 16), + "destination_ip": get_str(match, "destination-ip-address", 16), + "source_port": get_str(match, "source-port", 5), + "destination_port": get_str(match, "destination-port", 5), + "sce_classifier_id": sce_classifier_uuid, + } + db_sce_classifier_matches.append(db_sce_classifier_match) + # TODO: vnf/cp keys + + # remove unneeded id's in sce_rsps + for rsp in db_sce_rsps: + rsp.pop('id') db_tables = [ {"scenarios": db_scenarios}, @@ -2227,9 +2393,14 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor): {"ip_profiles": db_ip_profiles}, {"sce_vnfs": db_sce_vnfs}, {"sce_interfaces": db_sce_interfaces}, + {"sce_vnffgs": db_sce_vnffgs}, + {"sce_rsps": db_sce_rsps}, + {"sce_rsp_hops": db_sce_rsp_hops}, + {"sce_classifiers": db_sce_classifiers}, + {"sce_classifier_matches": db_sce_classifier_matches}, ] - logger.debug("create_vnf Deployment done vnfDict: %s", + logger.debug("new_nsd_v3 done: %s", yaml.safe_dump(db_tables, indent=4, default_flow_style=False) ) mydb.new_rows(db_tables, uuid_list) return nsd_uuid_list @@ -2654,7 +2825,6 @@ def create_instance(mydb, tenant_id, instance_dict): myvim_threads_id[default_datacenter_id], _ = get_vim_thread(mydb, tenant_id, default_datacenter_id) tenant = mydb.get_rows_by_id('nfvo_tenants', tenant_id) # myvim_tenant = myvim['tenant_id'] - rollbackList=[] # print "Checking that the scenario exists and getting the scenario dictionary" @@ -2668,6 +2838,10 @@ def create_instance(mydb, tenant_id, instance_dict): db_instance_vnfs = [] db_instance_vms = [] db_instance_interfaces = [] + db_instance_sfis = [] + db_instance_sfs = [] + db_instance_classifications = [] + db_instance_sfps = [] db_ip_profiles = [] db_vim_actions = [] uuid_list = [] @@ -2751,10 +2925,6 @@ def create_instance(mydb, tenant_id, instance_dict): # 0.1 parse cloud-config parameters cloud_config = unify_cloud_config(instance_dict.get("cloud-config"), scenarioDict.get("cloud-config")) - # We add the RO key to cloud_config - if tenant[0].get('RO_pub_key'): - RO_key = {"key-pairs": [tenant[0]['RO_pub_key']]} - cloud_config = unify_cloud_config(cloud_config, RO_key) # 0.2 merge instance information into scenario # Ideally, the operation should be as simple as: update(scenarioDict,instance_dict) @@ -2981,6 +3151,9 @@ def create_instance(mydb, tenant_id, instance_dict): # myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict) sce_vnf_list = sorted(scenarioDict['vnfs'], key=lambda k: k['name']) for sce_vnf in sce_vnf_list: + ssh_access = None + if sce_vnf.get('mgmt_access'): + ssh_access = sce_vnf['mgmt_access'].get('config-access', {}).get('ssh-access') vnf_availability_zones = [] for vm in sce_vnf['vms']: vm_av = vm.get('availability_zone') @@ -3130,10 +3303,15 @@ def create_instance(mydb, tenant_id, instance_dict): # print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False) # print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False) # print ">>>>>>>>>>>>>>>>>>>>>>>>>>>" + + # We add the RO key to cloud_config if vnf will need ssh access + cloud_config_vm = cloud_config + if ssh_access and ssh_access['required'] and ssh_access['default-user'] and tenant[0].get('RO_pub_key'): + RO_key = {"key-pairs": [tenant[0]['RO_pub_key']]} + cloud_config_vm = unify_cloud_config(cloud_config_vm, RO_key) if vm.get("boot_data"): - cloud_config_vm = unify_cloud_config(vm["boot_data"], cloud_config) - else: - cloud_config_vm = cloud_config + cloud_config_vm = unify_cloud_config(vm["boot_data"], cloud_config_vm) + if myVMDict.get('availability_zone'): av_index = vnf_availability_zones.index(myVMDict['availability_zone']) else: @@ -3200,6 +3378,157 @@ def create_instance(mydb, tenant_id, instance_dict): task_index += 1 db_vim_actions.append(db_vim_action) + task_depends_on = [] + for vnffg in scenarioDict['vnffgs']: + for rsp in vnffg['rsps']: + sfs_created = [] + for cp in rsp['connection_points']: + count = mydb.get_rows( + SELECT=('vms.count'), + FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_rsp_hops as h on interfaces.uuid=h.interface_id", + WHERE={'h.uuid': cp['uuid']})[0]['count'] + instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == cp['sce_vnf_id']), None) + instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']] + dependencies = [] + for instance_vm in instance_vms: + action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None) + if action: + dependencies.append(action['task_index']) + # TODO: throw exception if count != len(instance_vms) + # TODO: and action shouldn't ever be None + sfis_created = [] + for i in range(count): + # create sfis + sfi_uuid = str(uuid4()) + uuid_list.append(sfi_uuid) + db_sfi = { + "uuid": sfi_uuid, + "instance_scenario_id": instance_uuid, + 'sce_rsp_hop_id': cp['uuid'], + 'datacenter_id': datacenter_id, + 'datacenter_tenant_id': myvim_thread_id, + "vim_sfi_id": None, # vim thread will populate + } + db_instance_sfis.append(db_sfi) + db_vim_action = { + "instance_action_id": instance_action_id, + "task_index": task_index, + "datacenter_vim_id": myvim_thread_id, + "action": "CREATE", + "status": "SCHEDULED", + "item": "instance_sfis", + "item_id": sfi_uuid, + "extra": yaml.safe_dump({"params": "", "depends_on": [dependencies[i]]}, + default_flow_style=True, width=256) + } + sfis_created.append(task_index) + task_index += 1 + db_vim_actions.append(db_vim_action) + # create sfs + sf_uuid = str(uuid4()) + uuid_list.append(sf_uuid) + db_sf = { + "uuid": sf_uuid, + "instance_scenario_id": instance_uuid, + 'sce_rsp_hop_id': cp['uuid'], + 'datacenter_id': datacenter_id, + 'datacenter_tenant_id': myvim_thread_id, + "vim_sf_id": None, # vim thread will populate + } + db_instance_sfs.append(db_sf) + db_vim_action = { + "instance_action_id": instance_action_id, + "task_index": task_index, + "datacenter_vim_id": myvim_thread_id, + "action": "CREATE", + "status": "SCHEDULED", + "item": "instance_sfs", + "item_id": sf_uuid, + "extra": yaml.safe_dump({"params": "", "depends_on": sfis_created}, + default_flow_style=True, width=256) + } + sfs_created.append(task_index) + task_index += 1 + db_vim_actions.append(db_vim_action) + classifier = rsp['classifier'] + + # TODO the following ~13 lines can be reused for the sfi case + count = mydb.get_rows( + SELECT=('vms.count'), + FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_classifiers as c on interfaces.uuid=c.interface_id", + WHERE={'c.uuid': classifier['uuid']})[0]['count'] + instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == classifier['sce_vnf_id']), None) + instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']] + dependencies = [] + for instance_vm in instance_vms: + action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None) + if action: + dependencies.append(action['task_index']) + # TODO: throw exception if count != len(instance_vms) + # TODO: and action shouldn't ever be None + classifications_created = [] + for i in range(count): + for match in classifier['matches']: + # create classifications + classification_uuid = str(uuid4()) + uuid_list.append(classification_uuid) + db_classification = { + "uuid": classification_uuid, + "instance_scenario_id": instance_uuid, + 'sce_classifier_match_id': match['uuid'], + 'datacenter_id': datacenter_id, + 'datacenter_tenant_id': myvim_thread_id, + "vim_classification_id": None, # vim thread will populate + } + db_instance_classifications.append(db_classification) + classification_params = { + "ip_proto": match["ip_proto"], + "source_ip": match["source_ip"], + "destination_ip": match["destination_ip"], + "source_port": match["source_port"], + "destination_port": match["destination_port"] + } + db_vim_action = { + "instance_action_id": instance_action_id, + "task_index": task_index, + "datacenter_vim_id": myvim_thread_id, + "action": "CREATE", + "status": "SCHEDULED", + "item": "instance_classifications", + "item_id": classification_uuid, + "extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]}, + default_flow_style=True, width=256) + } + classifications_created.append(task_index) + task_index += 1 + db_vim_actions.append(db_vim_action) + + # create sfps + sfp_uuid = str(uuid4()) + uuid_list.append(sfp_uuid) + db_sfp = { + "uuid": sfp_uuid, + "instance_scenario_id": instance_uuid, + 'sce_rsp_id': rsp['uuid'], + 'datacenter_id': datacenter_id, + 'datacenter_tenant_id': myvim_thread_id, + "vim_sfp_id": None, # vim thread will populate + } + db_instance_sfps.append(db_sfp) + db_vim_action = { + "instance_action_id": instance_action_id, + "task_index": task_index, + "datacenter_vim_id": myvim_thread_id, + "action": "CREATE", + "status": "SCHEDULED", + "item": "instance_sfps", + "item_id": sfp_uuid, + "extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created}, + default_flow_style=True, width=256) + } + task_index += 1 + db_vim_actions.append(db_vim_action) + scenarioDict["datacenter2tenant"] = myvim_threads_id db_instance_action["number_tasks"] = task_index @@ -3213,6 +3542,10 @@ def create_instance(mydb, tenant_id, instance_dict): {"instance_vms": db_instance_vms}, {"instance_interfaces": db_instance_interfaces}, {"instance_actions": db_instance_action}, + {"instance_sfis": db_instance_sfis}, + {"instance_sfs": db_instance_sfs}, + {"instance_classifications": db_instance_classifications}, + {"instance_sfps": db_instance_sfps}, {"vim_actions": db_vim_actions} ] @@ -3244,7 +3577,6 @@ def delete_instance(mydb, tenant_id, instance_id): # print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False) tenant_id = instanceDict["tenant_id"] # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id" - # 1. Delete from Database message = mydb.delete_instance_scenario(instance_id, tenant_id) @@ -3353,6 +3685,156 @@ def delete_instance(mydb, tenant_id, instance_id): task_index += 1 db_vim_actions.append(db_vim_action) + # 2.3 deleting VNFFGs + + for sfp in instanceDict.get('sfps', ()): + vimthread_affected[sfp["datacenter_tenant_id"]] = None + datacenter_key = (sfp["datacenter_id"], sfp["datacenter_tenant_id"]) + if datacenter_key not in myvims: + try: + _,myvim_thread = get_vim_thread(mydb, tenant_id, sfp["datacenter_id"], sfp["datacenter_tenant_id"]) + except NfvoException as e: + logger.error(str(e)) + myvim_thread = None + myvim_threads[datacenter_key] = myvim_thread + vims = get_vim(mydb, tenant_id, datacenter_id=sfp["datacenter_id"], + datacenter_tenant_id=sfp["datacenter_tenant_id"]) + if len(vims) == 0: + logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfp["datacenter_id"], sfp["datacenter_tenant_id"])) + myvims[datacenter_key] = None + else: + myvims[datacenter_key] = vims.values()[0] + myvim = myvims[datacenter_key] + myvim_thread = myvim_threads[datacenter_key] + + if not myvim: + error_msg += "\n vim_sfp_id={} cannot be deleted because datacenter={} not found".format(sfp['vim_sfp_id'], sfp["datacenter_id"]) + continue + extra = {"params": (sfp['vim_sfp_id'])} + db_vim_action = { + "instance_action_id": instance_action_id, + "task_index": task_index, + "datacenter_vim_id": sfp["datacenter_tenant_id"], + "action": "DELETE", + "status": "SCHEDULED", + "item": "instance_sfps", + "item_id": sfp["uuid"], + "extra": yaml.safe_dump(extra, default_flow_style=True, width=256) + } + task_index += 1 + db_vim_actions.append(db_vim_action) + + for sf in instanceDict.get('sfs', ()): + vimthread_affected[sf["datacenter_tenant_id"]] = None + datacenter_key = (sf["datacenter_id"], sf["datacenter_tenant_id"]) + if datacenter_key not in myvims: + try: + _,myvim_thread = get_vim_thread(mydb, tenant_id, sf["datacenter_id"], sf["datacenter_tenant_id"]) + except NfvoException as e: + logger.error(str(e)) + myvim_thread = None + myvim_threads[datacenter_key] = myvim_thread + vims = get_vim(mydb, tenant_id, datacenter_id=sf["datacenter_id"], + datacenter_tenant_id=sf["datacenter_tenant_id"]) + if len(vims) == 0: + logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sf["datacenter_id"], sf["datacenter_tenant_id"])) + myvims[datacenter_key] = None + else: + myvims[datacenter_key] = vims.values()[0] + myvim = myvims[datacenter_key] + myvim_thread = myvim_threads[datacenter_key] + + if not myvim: + error_msg += "\n vim_sf_id={} cannot be deleted because datacenter={} not found".format(sf['vim_sf_id'], sf["datacenter_id"]) + continue + extra = {"params": (sf['vim_sf_id'])} + db_vim_action = { + "instance_action_id": instance_action_id, + "task_index": task_index, + "datacenter_vim_id": sf["datacenter_tenant_id"], + "action": "DELETE", + "status": "SCHEDULED", + "item": "instance_sfs", + "item_id": sf["uuid"], + "extra": yaml.safe_dump(extra, default_flow_style=True, width=256) + } + task_index += 1 + db_vim_actions.append(db_vim_action) + + for sfi in instanceDict.get('sfis', ()): + vimthread_affected[sfi["datacenter_tenant_id"]] = None + datacenter_key = (sfi["datacenter_id"], sfi["datacenter_tenant_id"]) + if datacenter_key not in myvims: + try: + _,myvim_thread = get_vim_thread(mydb, tenant_id, sfi["datacenter_id"], sfi["datacenter_tenant_id"]) + except NfvoException as e: + logger.error(str(e)) + myvim_thread = None + myvim_threads[datacenter_key] = myvim_thread + vims = get_vim(mydb, tenant_id, datacenter_id=sfi["datacenter_id"], + datacenter_tenant_id=sfi["datacenter_tenant_id"]) + if len(vims) == 0: + logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfi["datacenter_id"], sfi["datacenter_tenant_id"])) + myvims[datacenter_key] = None + else: + myvims[datacenter_key] = vims.values()[0] + myvim = myvims[datacenter_key] + myvim_thread = myvim_threads[datacenter_key] + + if not myvim: + error_msg += "\n vim_sfi_id={} cannot be deleted because datacenter={} not found".format(sfi['vim_sfi_id'], sfi["datacenter_id"]) + continue + extra = {"params": (sfi['vim_sfi_id'])} + db_vim_action = { + "instance_action_id": instance_action_id, + "task_index": task_index, + "datacenter_vim_id": sfi["datacenter_tenant_id"], + "action": "DELETE", + "status": "SCHEDULED", + "item": "instance_sfis", + "item_id": sfi["uuid"], + "extra": yaml.safe_dump(extra, default_flow_style=True, width=256) + } + task_index += 1 + db_vim_actions.append(db_vim_action) + + for classification in instanceDict['classifications']: + vimthread_affected[classification["datacenter_tenant_id"]] = None + datacenter_key = (classification["datacenter_id"], classification["datacenter_tenant_id"]) + if datacenter_key not in myvims: + try: + _,myvim_thread = get_vim_thread(mydb, tenant_id, classification["datacenter_id"], classification["datacenter_tenant_id"]) + except NfvoException as e: + logger.error(str(e)) + myvim_thread = None + myvim_threads[datacenter_key] = myvim_thread + vims = get_vim(mydb, tenant_id, datacenter_id=classification["datacenter_id"], + datacenter_tenant_id=classification["datacenter_tenant_id"]) + if len(vims) == 0: + logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(classification["datacenter_id"], classification["datacenter_tenant_id"])) + myvims[datacenter_key] = None + else: + myvims[datacenter_key] = vims.values()[0] + myvim = myvims[datacenter_key] + myvim_thread = myvim_threads[datacenter_key] + + if not myvim: + error_msg += "\n vim_classification_id={} cannot be deleted because datacenter={} not found".format(classification['vim_classification_id'], classification["datacenter_id"]) + continue + extra = {"params": (classification['vim_classification_id'])} + db_vim_action = { + "instance_action_id": instance_action_id, + "task_index": task_index, + "datacenter_vim_id": classification["datacenter_tenant_id"], + "action": "DELETE", + "status": "SCHEDULED", + "item": "instance_classifications", + "item_id": classification["uuid"], + "extra": yaml.safe_dump(extra, default_flow_style=True, width=256) + } + task_index += 1 + db_vim_actions.append(db_vim_action) + db_instance_action["number_tasks"] = task_index db_tables = [ {"instance_actions": db_instance_action}, @@ -3721,7 +4203,7 @@ def new_datacenter(mydb, datacenter_descriptor): except (IOError, ImportError): # if module_info and module_info[0]: # file.close(module_info[0]) - raise NfvoException("Incorrect datacenter type '{}'. Plugin '{}'.py not installed".format(datacenter_type, module), HTTP_Bad_Request) + raise NfvoException("Incorrect datacenter type '{}'. Plugin '{}.py' not installed".format(datacenter_type, module), HTTP_Bad_Request) datacenter_id = mydb.new_row("datacenters", datacenter_descriptor, add_uuid=True, confidential_data=True) return datacenter_id @@ -4308,29 +4790,35 @@ def vim_action_create(mydb, tenant_id, datacenter, item, descriptor): #If the datacenter has a SDN controller defined and the network is of dataplane type, then create the sdn network if get_sdn_controller_id(mydb, datacenter) != None and (net_type == 'data' or net_type == 'ptp'): + #obtain datacenter_tenant_id + datacenter_tenant_id = mydb.get_rows(SELECT=('uuid',), + FROM='datacenter_tenants', + WHERE={'datacenter_id': datacenter})[0]['uuid'] try: sdn_network = {} sdn_network['vlan'] = net_vlan sdn_network['type'] = net_type sdn_network['name'] = net_name + sdn_network['region'] = datacenter_tenant_id ovim_content = ovim.new_network(sdn_network) except ovimException as e: - self.logger.error("ovimException creating SDN network={} ".format( + logger.error("ovimException creating SDN network={} ".format( sdn_network) + str(e), exc_info=True) raise NfvoException("ovimException creating SDN network={} ".format(sdn_network) + str(e), HTTP_Internal_Server_Error) # Save entry in in dabase mano_db in table instance_nets to stablish a dictionary vim_net_id <->sdn_net_id # use instance_scenario_id=None to distinguish from real instaces of nets - correspondence = {'instance_scenario_id': None, 'sdn_net_id': ovim_content, 'vim_net_id': content} - #obtain datacenter_tenant_id - correspondence['datacenter_tenant_id'] = mydb.get_rows(SELECT=('uuid',), FROM='datacenter_tenants', WHERE={'datacenter_id': datacenter})[0]['uuid'] - + correspondence = {'instance_scenario_id': None, + 'sdn_net_id': ovim_content, + 'vim_net_id': content, + 'datacenter_tenant_id': datacenter_tenant_id + } try: mydb.new_row('instance_nets', correspondence, add_uuid=True) except db_base_Exception as e: - raise NfvoException("Error saving correspondence for VIM/SDN dataplane networks{}: ".format(correspondence) + - str(e), HTTP_Internal_Server_Error) + raise NfvoException("Error saving correspondence for VIM/SDN dataplane networks{}: {}".format( + correspondence, e), HTTP_Internal_Server_Error) elif item=="tenants": tenant = descriptor["tenant"] content = myvim.new_tenant(tenant["name"], tenant.get("description"))