bug 425 fix SR-IOV PCI-PASSTHROUGH interfaces 76/5676/3
authortierno <alfonso.tiernosepulveda@telefonica.com>
Fri, 10 Nov 2017 16:09:18 +0000 (17:09 +0100)
committertierno <alfonso.tiernosepulveda@telefonica.com>
Tue, 14 Nov 2017 12:42:22 +0000 (13:42 +0100)
Change-Id: I29528c574c05cec06ae8a4537f8398f0ca713011
Signed-off-by: tierno <alfonso.tiernosepulveda@telefonica.com>
openmanod
osm_ro/nfvo.py
osm_ro/vimconn.py
osm_ro/vimconn_aws.py
osm_ro/vimconn_openstack.py
osm_ro/vimconn_openvim.py
osm_ro/vimconn_vmware.py
scenarios/examples/v3_2linux_sriov.yaml [new file with mode: 0644]
scenarios/examples/v3_3vdu_2vnf_nsd.yaml
vnfs/examples/v3_3vdu_vnfd.yaml
vnfs/examples/v3_linux_sriov.yaml [new file with mode: 0644]

index 02428d7..030d4ec 100755 (executable)
--- a/openmanod
+++ b/openmanod
@@ -48,7 +48,7 @@ import osm_ro
 
 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ = "$26-aug-2014 11:09:29$"
-__version__ = "0.5.41-r551"
+__version__ = "0.5.42-r552"
 version_date = "Nov 2017"
 database_version = 27      # expected database schema version
 
index e5d13eb..bd9d368 100644 (file)
@@ -38,6 +38,7 @@ import console_proxy_thread as cli
 import vimconn
 import logging
 import collections
+import math
 from uuid import uuid4
 from db_base import db_base_Exception
 
@@ -852,9 +853,10 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
             vnf_uuid = str(uuid4())
             uuid_list.append(vnf_uuid)
             vnfd_uuid_list.append(vnf_uuid)
+            vnfd_id = get_str(vnfd, "id", 255)
             db_vnf = {
                 "uuid": vnf_uuid,
-                "osm_id": get_str(vnfd, "id", 255),
+                "osm_id": vnfd_id,
                 "name": get_str(vnfd, "name", 255),
                 "description": get_str(vnfd, "description", 255),
                 "tenant_id": tenant_id,
@@ -888,9 +890,10 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
             for vdu in vnfd.get("vdu").itervalues():
                 vm_uuid = str(uuid4())
                 uuid_list.append(vm_uuid)
+                vdu_id = get_str(vdu, "id", 255)
                 db_vm = {
                     "uuid": vm_uuid,
-                    "osm_id": get_str(vdu, "id", 255),
+                    "osm_id": vdu_id,
                     "name": get_str(vdu, "name", 255),
                     "description": get_str(vdu, "description", 255),
                     "vnf_id": vnf_uuid,
@@ -937,74 +940,6 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                                     device["image checksum"] = str(volume["image-checksum"])
                             devices.append(device)
 
-                # table flavors
-                db_flavor = {
-                    "name": get_str(vdu, "name", 250) + "-flv",
-                    "vcpus": int(vdu["vm-flavor"].get("vcpu-count", 1)),
-                    "ram": int(vdu["vm-flavor"].get("memory-mb", 1)),
-                    "disk": int(vdu["vm-flavor"].get("storage-gb", 1)),
-                }
-                # EPA  TODO revise
-                extended = {}
-                numa = {}
-                if devices:
-                    extended["devices"] = devices
-                if vdu.get("guest-epa"):   # TODO or dedicated_int:
-                    epa_vcpu_set = False
-                    if vdu["guest-epa"].get("numa-node-policy"):  # TODO or dedicated_int:
-                        numa_node_policy = vdu["guest-epa"].get("numa-node-policy")
-                        if numa_node_policy.get("node"):
-                            numa_node = numa_node_policy["node"]['0']
-                            if numa_node.get("num-cores"):
-                                numa["cores"] = numa_node["num-cores"]
-                                epa_vcpu_set = True
-                            if numa_node.get("paired-threads"):
-                                if numa_node["paired-threads"].get("num-paired-threads"):
-                                    numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"])
-                                    epa_vcpu_set = True
-                                if len(numa_node["paired-threads"].get("paired-thread-ids")):
-                                    numa["paired-threads-id"] = []
-                                    for pair in numa_node["paired-threads"]["paired-thread-ids"].itervalues():
-                                        numa["paired-threads-id"].append(
-                                            (str(pair["thread-a"]), str(pair["thread-b"]))
-                                        )
-                            if numa_node.get("num-threads"):
-                                numa["threads"] = int(numa_node["num-threads"])
-                                epa_vcpu_set = True
-                            if numa_node.get("memory-mb"):
-                                numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1)
-                    if vdu["guest-epa"].get("mempage-size"):
-                        if vdu["guest-epa"]["mempage-size"] != "SMALL":
-                            numa["memory"] = max(int(db_flavor["ram"] / 1024), 1)
-                    if vdu["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set:
-                        if vdu["guest-epa"]["cpu-pinning-policy"] == "DEDICATED":
-                            if vdu["guest-epa"].get("cpu-thread-pinning-policy") and \
-                                            vdu["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER":
-                                numa["cores"] = max(db_flavor["vcpus"], 1)
-                            else:
-                                numa["threads"] = max(db_flavor["vcpus"], 1)
-                if numa:
-                    extended["numas"] = [numa]
-                if extended:
-                    extended_text = yaml.safe_dump(extended, default_flow_style=True, width=256)
-                    db_flavor["extended"] = extended_text
-                # look if flavor exist
-
-                temp_flavor_dict = {'disk': db_flavor.get('disk', 1),
-                                    'ram': db_flavor.get('ram'),
-                                    'vcpus': db_flavor.get('vcpus'),
-                                    'extended': db_flavor.get('extended')
-                                    }
-                existing_flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict)
-                if existing_flavors:
-                    flavor_uuid = existing_flavors[0]["uuid"]
-                else:
-                    flavor_uuid = str(uuid4())
-                    uuid_list.append(flavor_uuid)
-                    db_flavor["uuid"] = flavor_uuid
-                    db_flavors.append(db_flavor)
-                db_vm["flavor_id"] = flavor_uuid
-
                 # cloud-init
                 boot_data = {}
                 if vdu.get("cloud-init"):
@@ -1032,11 +967,14 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                 db_vms_index += 1
 
                 # table interfaces (internal/external interfaces)
+                flavor_epa_interfaces = []
                 cp_name2iface_uuid = {}
                 cp_name2vm_uuid = {}
                 cp_name2db_interface = {}
+                vdu_id2cp_name = {}  # stored only when one external connection point is presented at this VDU
                 # for iface in chain(vdu.get("internal-interface").itervalues(), vdu.get("external-interface").itervalues()):
                 for iface in vdu.get("interface").itervalues():
+                    flavor_epa_interface = {}
                     iface_uuid = str(uuid4())
                     uuid_list.append(iface_uuid)
                     db_interface = {
@@ -1044,12 +982,15 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                         "internal_name": get_str(iface, "name", 255),
                         "vm_id": vm_uuid,
                     }
+                    flavor_epa_interface["name"] = db_interface["internal_name"]
                     if iface.get("virtual-interface").get("vpci"):
                         db_interface["vpci"] = get_str(iface.get("virtual-interface"), "vpci", 12)
+                        flavor_epa_interface["vpci"] = db_interface["vpci"]
 
                     if iface.get("virtual-interface").get("bandwidth"):
                         bps = int(iface.get("virtual-interface").get("bandwidth"))
-                        db_interface["bw"] = bps/1000
+                        db_interface["bw"] = int(math.ceil(bps/1000000.0))
+                        flavor_epa_interface["bandwidth"] = "{} Mbps".format(db_interface["bw"])
 
                     if iface.get("virtual-interface").get("type") == "OM-MGMT":
                         db_interface["type"] = "mgmt"
@@ -1059,11 +1000,13 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                     elif iface.get("virtual-interface").get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
                         db_interface["type"] = "data"
                         db_interface["model"] = get_str(iface.get("virtual-interface"), "type", 12)
+                        flavor_epa_interface["dedicated"] = "no" if iface["virtual-interface"]["type"] == "SR-IOV" \
+                            else "yes"
+                        flavor_epa_interfaces.append(flavor_epa_interface)
                     else:
                         raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vdu[{}]':'interface':'virtual"
                                             "-interface':'type':'{}'. Interface type is not supported".format(
-                                                str(vnfd["id"])[:255], str(vdu["id"])[:255],
-                                                iface.get("virtual-interface").get("type")),
+                                                vnfd_id, vdu_id, iface.get("virtual-interface").get("type")),
                                             HTTP_Bad_Request)
 
                     if iface.get("external-connection-point-ref"):
@@ -1076,6 +1019,15 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                             for cp_descriptor in vnfd_descriptor["connection-point"]:
                                 if cp_descriptor["name"] == db_interface["external_name"]:
                                     break
+                            else:
+                                raise KeyError()
+
+                            if vdu_id in vdu_id2cp_name:
+                                vdu_id2cp_name[vdu_id] = None  # more than two connecdtion point for this VDU
+                            else:
+                                vdu_id2cp_name[vdu_id] = db_interface["external_name"]
+
+                            # port security
                             if str(cp_descriptor.get("port-security-enabled")).lower() == "false":
                                 db_interface["port_security"] = 0
                             elif str(cp_descriptor.get("port-security-enabled")).lower() == "true":
@@ -1084,7 +1036,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                             raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':"
                                                 "'interface[{iface}]':'vnfd-connection-point-ref':'{cp}' is not present"
                                                 " at connection-point".format(
-                                                    vnf=vnfd["id"], vdu=vdu["id"], iface=iface["name"],
+                                                    vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
                                                     cp=iface.get("vnfd-connection-point-ref")),
                                                 HTTP_Bad_Request)
                     elif iface.get("internal-connection-point-ref"):
@@ -1105,13 +1057,82 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                             raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':"
                                                 "'interface[{iface}]':'vdu-internal-connection-point-ref':'{cp}' is not"
                                                 " referenced by any internal-vld".format(
-                                                    vnf=vnfd["id"], vdu=vdu["id"], iface=iface["name"],
+                                                    vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
                                                     cp=iface.get("vdu-internal-connection-point-ref")),
                                                 HTTP_Bad_Request)
                     if iface.get("position") is not None:
                         db_interface["created_at"] = int(iface.get("position")) - 1000
                     db_interfaces.append(db_interface)
 
+                # table flavors
+                db_flavor = {
+                    "name": get_str(vdu, "name", 250) + "-flv",
+                    "vcpus": int(vdu["vm-flavor"].get("vcpu-count", 1)),
+                    "ram": int(vdu["vm-flavor"].get("memory-mb", 1)),
+                    "disk": int(vdu["vm-flavor"].get("storage-gb", 1)),
+                }
+                # EPA  TODO revise
+                extended = {}
+                numa = {}
+                if devices:
+                    extended["devices"] = devices
+                if flavor_epa_interfaces:
+                    numa["interfaces"] = flavor_epa_interfaces
+                if vdu.get("guest-epa"):   # TODO or dedicated_int:
+                    epa_vcpu_set = False
+                    if vdu["guest-epa"].get("numa-node-policy"):  # TODO or dedicated_int:
+                        numa_node_policy = vdu["guest-epa"].get("numa-node-policy")
+                        if numa_node_policy.get("node"):
+                            numa_node = numa_node_policy["node"]['0']
+                            if numa_node.get("num-cores"):
+                                numa["cores"] = numa_node["num-cores"]
+                                epa_vcpu_set = True
+                            if numa_node.get("paired-threads"):
+                                if numa_node["paired-threads"].get("num-paired-threads"):
+                                    numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"])
+                                    epa_vcpu_set = True
+                                if len(numa_node["paired-threads"].get("paired-thread-ids")):
+                                    numa["paired-threads-id"] = []
+                                    for pair in numa_node["paired-threads"]["paired-thread-ids"].itervalues():
+                                        numa["paired-threads-id"].append(
+                                            (str(pair["thread-a"]), str(pair["thread-b"]))
+                                        )
+                            if numa_node.get("num-threads"):
+                                numa["threads"] = int(numa_node["num-threads"])
+                                epa_vcpu_set = True
+                            if numa_node.get("memory-mb"):
+                                numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1)
+                    if vdu["guest-epa"].get("mempage-size"):
+                        if vdu["guest-epa"]["mempage-size"] != "SMALL":
+                            numa["memory"] = max(int(db_flavor["ram"] / 1024), 1)
+                    if vdu["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set:
+                        if vdu["guest-epa"]["cpu-pinning-policy"] == "DEDICATED":
+                            if vdu["guest-epa"].get("cpu-thread-pinning-policy") and \
+                                            vdu["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER":
+                                numa["cores"] = max(db_flavor["vcpus"], 1)
+                            else:
+                                numa["threads"] = max(db_flavor["vcpus"], 1)
+                if numa:
+                    extended["numas"] = [numa]
+                if extended:
+                    extended_text = yaml.safe_dump(extended, default_flow_style=True, width=256)
+                    db_flavor["extended"] = extended_text
+                # look if flavor exist
+                temp_flavor_dict = {'disk': db_flavor.get('disk', 1),
+                                    'ram': db_flavor.get('ram'),
+                                    'vcpus': db_flavor.get('vcpus'),
+                                    'extended': db_flavor.get('extended')
+                                    }
+                existing_flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict)
+                if existing_flavors:
+                    flavor_uuid = existing_flavors[0]["uuid"]
+                else:
+                    flavor_uuid = str(uuid4())
+                    uuid_list.append(flavor_uuid)
+                    db_flavor["uuid"] = flavor_uuid
+                    db_flavors.append(db_flavor)
+                db_vm["flavor_id"] = flavor_uuid
+
             # VNF affinity and antiaffinity
             for pg in vnfd.get("placement-groups").itervalues():
                 pg_name = get_str(pg, "name", 255)
@@ -1120,7 +1141,7 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
                     if vdu_id not in vdu_id2db_table_index:
                         raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'placement-groups[{pg}]':"
                                             "'member-vdus':'{vdu}'. Reference to a non-existing vdu".format(
-                                                vnf=vnfd["id"], pg=pg_name, vdu=vdu_id),
+                                                vnf=vnfd_id, pg=pg_name, vdu=vdu_id),
                                             HTTP_Bad_Request)
                     db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
                     # TODO consider the case of isolation and not colocation
@@ -1129,19 +1150,24 @@ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
             # VNF mgmt configuration
             mgmt_access = {}
             if vnfd["mgmt-interface"].get("vdu-id"):
-                if vnfd["mgmt-interface"]["vdu-id"] not in vdu_id2uuid:
+                mgmt_vdu_id = get_str(vnfd["mgmt-interface"], "vdu-id", 255)
+                if mgmt_vdu_id not in vdu_id2uuid:
                     raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'vdu-id':"
                                         "'{vdu}'. Reference to a non-existing vdu".format(
-                                            vnf=vnfd["id"], vdu=vnfd["mgmt-interface"]["vdu-id"]),
+                                            vnf=vnfd_id, vdu=mgmt_vdu_id),
                                         HTTP_Bad_Request)
                 mgmt_access["vm_id"] = vdu_id2uuid[vnfd["mgmt-interface"]["vdu-id"]]
+                # if only one cp is defined by this VDU, mark this interface as of type "mgmt"
+                if vdu_id2cp_name.get(mgmt_vdu_id):
+                    cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]["type"] = "mgmt"
+
             if vnfd["mgmt-interface"].get("ip-address"):
                 mgmt_access["ip-address"] = str(vnfd["mgmt-interface"].get("ip-address"))
             if vnfd["mgmt-interface"].get("cp"):
                 if vnfd["mgmt-interface"]["cp"] not in cp_name2iface_uuid:
                     raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'cp':'{cp}'. "
                                         "Reference to a non-existing connection-point".format(
-                                            vnf=vnfd["id"], cp=vnfd["mgmt-interface"]["cp"]),
+                                            vnf=vnfd_id, cp=vnfd["mgmt-interface"]["cp"]),
                                         HTTP_Bad_Request)
                 mgmt_access["vm_id"] = cp_name2vm_uuid[vnfd["mgmt-interface"]["cp"]]
                 mgmt_access["interface_id"] = cp_name2iface_uuid[vnfd["mgmt-interface"]["cp"]]
@@ -2173,7 +2199,8 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                 elif vld.get("provider-network").get("overlay-type") == "VLAN":
                     db_sce_net["type"] = "data"
                 else:
-                    db_sce_net["type"] = "bridge"
+                    # later on it will be fixed to bridge or data depending on the type of interfaces attached to it
+                    db_sce_net["type"] = None
                 db_sce_nets.append(db_sce_net)
 
                 # ip-profile, link db_ip_profile with db_sce_net
@@ -2197,7 +2224,7 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                                                 str(nsd["id"]), str(vld["id"]), str(iface["member-vnf-index-ref"])),
                                             HTTP_Bad_Request)
 
-                    existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+                    existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid', 'i.type as iface_type'),
                                                     FROM="interfaces as i join vms on i.vm_id=vms.uuid",
                                                     WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
                                                            'external_name': get_str(iface, "vnfd-connection-point-ref",
@@ -2210,6 +2237,8 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                                                 str(iface.get("vnfd-id-ref"))[:255]),
                                             HTTP_Bad_Request)
                     interface_uuid = existing_ifaces[0]["uuid"]
+                    if existing_ifaces[0]["iface_type"] == "data" and not db_sce_net["type"]:
+                        db_sce_net["type"] = "data"
                     sce_interface_uuid = str(uuid4())
                     uuid_list.append(sce_net_uuid)
                     db_sce_interface = {
@@ -2220,6 +2249,8 @@ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
                         # "ip_address": #TODO
                     }
                     db_sce_interfaces.append(db_sce_interface)
+                if not db_sce_net["type"]:
+                    db_sce_net["type"] = "bridge"
 
         db_tables = [
             {"scenarios": db_scenarios},
index 1f60986..bdfcb15 100644 (file)
@@ -487,9 +487,9 @@ class vimconnector():
                     the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
                 'type': (mandatory) can be one of:
                     'virtual', in this case always connected to a network of type 'net_type=bridge'
-                     'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                     'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
                            can created unconnected
-                     'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                     'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
                      'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
                             are allocated on the same physical NIC
                 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
index 0e1f573..12b2411 100644 (file)
@@ -605,9 +605,9 @@ class vimconnector(vimconn.vimconnector):
                     mac_address: (optional) mac address to assign to this interface
                     type: (mandatory) can be one of:
                         virtual, in this case always connected to a network of type 'net_type=bridge'
-                        PF - (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
-                               can created unconnected
-                        VF - (SRIOV with VLAN tag): same as PF for network connectivity.
+                        'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                           can created unconnected
+                        'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
                         VFnotShared - (SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
                             are allocated on the same physical NIC
                     bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
index c0f1b4d..7b1415b 100644 (file)
@@ -947,7 +947,7 @@ class vimconnector(vimconn.vimconnector):
                 model: interface model, ignored #TODO
                 mac_address: used for  SR-IOV ifaces #TODO for other types
                 use: 'data', 'bridge',  'mgmt'
-                type: 'virtual', 'PF', 'VF', 'VFnotShared'
+                type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
                 vim_id: filled/added by this function
                 floating_ip: True/False (or it can be None)
                 'cloud_config': (optional) dictionary with:
@@ -1001,7 +1001,7 @@ class vimconnector(vimconn.vimconnector):
                     pass
                     # if "vpci" in net:
                     #     metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
-                elif net["type"]=="VF": # for VF
+                elif net["type"] == "VF" or net["type"] == "SR-IOV":  # for VF
                     # if "vpci" in net:
                     #     if "VF" not in metadata_vpci:
                     #         metadata_vpci["VF"]=[]
@@ -1013,7 +1013,7 @@ class vimconnector(vimconn.vimconnector):
                         port_dict["port_security_enabled"]=False
                         port_dict["provider_security_groups"]=[]
                         port_dict["security_groups"]=[]
-                else: #For PT
+                else:   # For PT PCI-PASSTHROUGH
                     # VIO specific Changes
                     # Current VIO release does not support port with type 'direct-physical'
                     # So no need to create virtual port in case of PCI-device.
index b45edf6..b34fdf0 100644 (file)
@@ -32,6 +32,7 @@ import requests
 import json
 import yaml
 import logging
+import math
 from openmano_schemas import id_schema, name_schema, nameshort_schema, description_schema, \
                             vlan1000_schema, integer0_schema
 from jsonschema import validate as js_v, exceptions as js_e
@@ -599,6 +600,18 @@ class vimconnector(vimconn.vimconnector):
             for device in new_flavor_dict.get('extended', {}).get('devices', ()):
                 if 'image name' in device:
                     del device['image name']
+            numas = new_flavor_dict.get('extended', {}).get('numas')
+            if numas:
+                numa = numas[0]
+                # translate memory, cpus to EPA
+                if "cores" not in numa and "threads" not in numa and "paired-threads" not in numa:
+                    numa["paired-threads"] = new_flavor_dict["vcpus"]
+                if "memory" not in numa:
+                    numa["memory"] = int(math.ceil(new_flavor_dict["ram"]/1024.0))
+                for iface in numa.get("interfaces", ()):
+                    if not iface.get("bandwidth"):
+                        iface["bandwidth"] = "1 Mbps"
+
             new_flavor_dict["name"] = flavor_data["name"][:64]
             self._get_my_tenant()
             payload_req = json.dumps({'flavor': new_flavor_dict})
@@ -786,7 +799,7 @@ class vimconnector(vimconn.vimconnector):
 
     def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
                        availability_zone_index=None, availability_zone_list=None):
-        '''Adds a VM instance to VIM
+        """Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
             image_id,flavor_id: image and flavor uuid
@@ -797,7 +810,7 @@ class vimconnector(vimconn.vimconnector):
                 model: interface model, virtio, e2000, ...
                 mac_address: 
                 use: 'data', 'bridge',  'mgmt'
-                type: 'virtual', 'PF', 'VF', 'VFnotShared'
+                type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
                 vim_id: filled/added by this function
                 #TODO ip, security groups
         Returns a tuple with the instance identifier and created_items or raises an exception on error
@@ -805,7 +818,7 @@ class vimconnector(vimconn.vimconnector):
             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
             as not present.
-        '''
+        """
         self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'", image_id, flavor_id, str(net_list))
         try:
             self._get_my_tenant()
@@ -818,12 +831,25 @@ class vimconnector(vimconn.vimconnector):
             for net in net_list:
                 if not net.get("net_id"):
                     continue
-                net_dict={'uuid': net["net_id"]}
-                if net.get("type"):        net_dict["type"] = net["type"]
-                if net.get("name"):        net_dict["name"] = net["name"]
-                if net.get("vpci"):        net_dict["vpci"] = net["vpci"]
-                if net.get("model"):       net_dict["model"] = net["model"]
-                if net.get("mac_address"): net_dict["mac_address"] = net["mac_address"]
+                net_dict = {'uuid': net["net_id"]}
+                if net.get("type"):
+                    if net["type"] == "SR-IOV":
+                        net_dict["type"] = "VF"
+                    elif net["type"] == "PCI-PASSTHROUGH":
+                        net_dict["type"] = "PF"
+                    else:
+                        net_dict["type"] = net["type"]
+                if net.get("name"):
+                    net_dict["name"] = net["name"]
+                if net.get("vpci"):
+                    net_dict["vpci"] = net["vpci"]
+                if net.get("model"):
+                    if net["model"] == "VIRTIO":
+                        net_dict["model"] = "virtio"
+                    else:
+                        net_dict["model"] = net["model"]
+                if net.get("mac_address"):
+                    net_dict["mac_address"] = net["mac_address"]
                 virtio_net_list.append(net_dict)
             payload_dict={  "name":        name[:64],
                             "description": description,
index f99ef65..d1c3977 100644 (file)
@@ -1388,9 +1388,9 @@ class vimconnector(vimconn.vimconnector):
                     the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
                 'type': (mandatory) can be one of:
                     'virtual', in this case always connected to a network of type 'net_type=bridge'
-                     'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                     'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
                            can created unconnected
-                     'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                     'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
                      'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
                             are allocated on the same physical NIC
                 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
@@ -1564,9 +1564,9 @@ class vimconnector(vimconn.vimconnector):
         reserve_memory = False
 
         for net in net_list:
-            if net["type"]=="PF":
+            if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
                 pci_devices_info.append(net)
-            elif  (net["type"]=="VF" or  net["type"]=="VFnotShared") and 'net_id'in net:
+            elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
                 sriov_net_info.append(net)
 
         #Add PCI
@@ -1670,7 +1670,7 @@ class vimconnector(vimconn.vimconnector):
                             self.vca.block_until_completed(task)
                         # connect network to VM - with all DHCP by default
 
-                        type_list = ['PF','VF','VFnotShared']
+                        type_list = ('PF', 'PCI-PASSTHROUGH', 'VF', 'SR-IOV', 'VFnotShared')
                         if 'type' in net and net['type'] not in type_list:
                             # fetching nic type from vnf
                             if 'model' in net:
@@ -4637,7 +4637,7 @@ class vimconnector(vimconn.vimconnector):
                             for sriov_net in sriov_nets:
                                 network_name = sriov_net.get('net_id')
                                 dvs_portgr_name = self.create_dvPort_group(network_name)
-                                if sriov_net.get('type') == "VF":
+                                if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
                                     #add vlan ID ,Modify portgroup for vlan ID
                                     self.configure_vlanID(content, vcenter_conect, network_name)
 
diff --git a/scenarios/examples/v3_2linux_sriov.yaml b/scenarios/examples/v3_2linux_sriov.yaml
new file mode 100644 (file)
index 0000000..74d1c3a
--- /dev/null
@@ -0,0 +1,73 @@
+nsd:nsd-catalog:
+    nsd:
+    -   id:          2linux-sriov
+        name:        2linux_sriov
+        short-name:  2linux_sriov
+        description: Generated by OSM pacakage generator
+        vendor:      OSM
+        version:     '1.0'
+
+        # Place the logo as png in icons directory and provide the name here
+        logo: osm_2x.png
+
+        # Specify the VNFDs that are part of this NSD
+        constituent-vnfd:
+            # The member-vnf-index needs to be unique, starting from 1
+            # vnfd-id-ref is the id of the VNFD
+            # Multiple constituent VNFDs can be specified
+        -   member-vnf-index: 1
+            vnfd-id-ref: linux-sriov
+        -   member-vnf-index: 2
+            vnfd-id-ref: linux-sriov
+        scaling-group-descriptor:
+        -   name: "scaling_cirros"
+            vnfd-member:
+            -   count: 1
+                member-vnf-index-ref: 1
+            min-instance-count: 0
+            max-instance-count: 10
+            scaling-policy:
+            -   scaling-type: "manual"
+                cooldown-time: 10
+                threshold-time: 10
+                name: manual_scale
+        vld:
+        # Networks for the VNFs
+        -   id: mgmt
+            mgmt-network: 'true'
+            name: mgmt
+            type: ELAN
+            # vim-network-name: <update>
+            # provider-network:
+            #     overlay-type: VLAN
+            #     segmentation_id: <update>
+            vnfd-connection-point-ref:
+            # Specify the constituent VNFs
+            # member-vnf-index-ref - entry from constituent vnf
+            # vnfd-id-ref - VNFD id
+            # vnfd-connection-point-ref - connection point name in the VNFD
+            -   member-vnf-index-ref: 1
+                vnfd-id-ref: linux-sriov
+                vnfd-connection-point-ref: eth0
+            -   member-vnf-index-ref: 2
+                vnfd-id-ref: linux-sriov
+                vnfd-connection-point-ref: eth0
+        -   id: sriov-vld
+            name: sriov_vld
+            type: ELAN
+            # vim-network-name: <update>
+            # provider-network:
+            #     overlay-type: VLAN
+            #     segmentation_id: <update>
+            vnfd-connection-point-ref:
+            # Specify the constituent VNFs
+            # member-vnf-index-ref - entry from constituent vnf
+            # vnfd-id-ref - VNFD id
+            # vnfd-connection-point-ref - connection point name in the VNFD
+            -   member-vnf-index-ref: 1
+                vnfd-id-ref: linux-sriov
+                vnfd-connection-point-ref: sriov0
+            -   member-vnf-index-ref: 2
+                vnfd-id-ref: linux-sriov
+                vnfd-connection-point-ref: sriov0
+
index ffe8219..361805e 100644 (file)
@@ -38,6 +38,7 @@ nsd:nsd-catalog:
         vld:
         # Networks for the VNFs
             -   id: vld1
+                mgmt-network: 'true'
                 name: vld1-name
                 short-name: vld1-sname
                 type: ELAN
index 203f40d..6d56e9d 100644 (file)
@@ -39,7 +39,7 @@ vnfd:vnfd-catalog:
                 type: EXTERNAL
                 position: 0
                 virtual-interface:
-                    type: OM-MGMT
+                    type: VIRTIO
                     bandwidth: '0'
                 # vnfd-connection-point-ref: eth0
                 external-connection-point-ref: eth0
diff --git a/vnfs/examples/v3_linux_sriov.yaml b/vnfs/examples/v3_linux_sriov.yaml
new file mode 100644 (file)
index 0000000..78bdb83
--- /dev/null
@@ -0,0 +1,53 @@
+vnfd:vnfd-catalog:
+    vnfd:
+    -   id: linux-sriov
+        name: linux_sriov
+        short-name: linux_sriov
+        description: Simple VNF example with a ubuntu using SR-IOV
+        vendor: OSM
+        version: '1.0'
+
+        # Place the logo as png in icons directory and provide the name here
+        logo: cirros-64.png
+
+        # Management interface
+        mgmt-interface:
+            cp: eth0
+
+        # Atleast one VDU need to be specified
+        vdu:
+        -   id: linux-sriov-VM
+            name: linux_sriov_VM
+            description: linux_sriov_VM
+            count: 1
+
+            # Flavour of the VM to be instantiated for the VDU
+            vm-flavor:
+                vcpu-count: 1
+                memory-mb: 2048
+                storage-gb: 20
+
+            # Image/checksum or image including the full path
+            image: ubuntu16.04
+            #checksum: 
+
+            interface:
+            # Specify the external interfaces
+            -   name: eth0
+                type: EXTERNAL
+                virtual-interface:
+                    type: VIRTIO
+                external-connection-point-ref: eth0
+            -   name: sriov0
+                type: EXTERNAL
+                virtual-interface:
+                    type: SR-IOV
+                    bandwidth: '0'
+                    vpci: 0000:00:0b.0
+                external-connection-point-ref: sriov0
+        connection-point:
+            -   name: eth0
+                type: VPORT
+            -   name: sriov0
+                type: VPORT
+