Merge branch 'packaging' 07/1607/3
authortierno <alfonso.tiernosepulveda@telefonica.com>
Thu, 20 Apr 2017 16:56:07 +0000 (18:56 +0200)
committertierno <alfonso.tiernosepulveda@telefonica.com>
Thu, 20 Apr 2017 16:57:01 +0000 (18:57 +0200)
Change-Id: I58a236852dab90f025d8300bbbf508d368d799c0
Signed-off-by: tierno <alfonso.tiernosepulveda@telefonica.com>
33 files changed:
database_utils/migrate_mano_db.sh
openmano
openmanod
osm_ro/db_base.py
osm_ro/httpserver.py
osm_ro/nfvo.py
osm_ro/nfvo_db.py
osm_ro/openmano_schemas.py
osm_ro/openmanoclient.py
osm_ro/openmanod.cfg
osm_ro/vim_thread.py
osm_ro/vimconn.py
osm_ro/vimconn_openstack.py
osm_ro/vimconn_vmware.py
scripts/install-openmano.sh
sdn/sdn_port_mapping.yaml [new file with mode: 0644]
test/RO_tests/passthrough/scenario_p2p_passthrough.yaml [new file with mode: 0644]
test/RO_tests/passthrough/vnfd_1passthrough.yaml [new file with mode: 0644]
test/RO_tests/pmp_passthrough/scenario_pmp_passthrough.yaml [new file with mode: 0644]
test/RO_tests/pmp_passthrough/vnfd_1passthrough.yaml [new file with mode: 0644]
test/RO_tests/pmp_sriov/scenario_pmp_sriov.yaml [new file with mode: 0644]
test/RO_tests/pmp_sriov/vnfd_1sriov.yaml [new file with mode: 0644]
test/RO_tests/pmp_sriov_passthrough/scenario_pmp_sriov_passthrough.yaml [new file with mode: 0644]
test/RO_tests/pmp_sriov_passthrough/vnfd_1passthrough.yaml [new file with mode: 0644]
test/RO_tests/pmp_sriov_passthrough/vnfd_1sriov.yaml [new file with mode: 0644]
test/RO_tests/simple_2_vnf/scenario_simple_2_vnf.yaml [new file with mode: 0644]
test/RO_tests/simple_2_vnf/vnfd_linux.yaml [new file with mode: 0644]
test/RO_tests/sr_iov/scenario_p2p_sriov.yaml [new file with mode: 0644]
test/RO_tests/sr_iov/vnfd_1sriov.yaml [new file with mode: 0644]
test/RO_tests/sriov_passthrough/scenario_p2p_sriov_passthrough.yaml [new file with mode: 0644]
test/RO_tests/sriov_passthrough/vnfd_1passthrough.yaml [new file with mode: 0644]
test/RO_tests/sriov_passthrough/vnfd_1sriov.yaml [new file with mode: 0644]
test/test_RO.py

index a8ec8f9..ba8a829 100755 (executable)
@@ -187,6 +187,7 @@ DATABASE_TARGET_VER_NUM=0
 [ $OPENMANO_VER_NUM -ge 5003 ] && DATABASE_TARGET_VER_NUM=17  #0.5.3 =>  17
 [ $OPENMANO_VER_NUM -ge 5004 ] && DATABASE_TARGET_VER_NUM=18  #0.5.4 =>  18
 [ $OPENMANO_VER_NUM -ge 5005 ] && DATABASE_TARGET_VER_NUM=19  #0.5.5 =>  19
+[ $OPENMANO_VER_NUM -ge 5009 ] && DATABASE_TARGET_VER_NUM=20  #0.5.9 =>  20
 #TODO ... put next versions here
 
 
@@ -730,6 +731,27 @@ function downgrade_from_19(){
     echo "DELETE FROM schema_version WHERE version_int='19';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
 }
 
+function upgrade_to_20(){
+    echo "    upgrade database from version 0.19 to version 0.20"
+    echo "      add column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
+    echo "ALTER TABLE instance_nets ADD sdn_net_id varchar(36) DEFAULT NULL NULL COMMENT 'Network id in ovim';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "ALTER TABLE instance_interfaces ADD sdn_port_id varchar(36) DEFAULT NULL NULL COMMENT 'Port id in ovim';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "ALTER TABLE instance_interfaces ADD compute_node varchar(100) DEFAULT NULL NULL COMMENT 'Compute node id used to specify the SDN port mapping';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "ALTER TABLE instance_interfaces ADD pci varchar(12) DEFAULT NULL NULL COMMENT 'PCI of the physical port in the host';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "ALTER TABLE instance_interfaces ADD vlan SMALLINT UNSIGNED DEFAULT NULL NULL COMMENT 'VLAN tag used by the port';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (20, '0.20', '0.5.9', 'Added columns to store dataplane connectivity info', '2017-03-13');" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+}
+function downgrade_from_20(){
+    echo "    downgrade database from version 0.20 to version 0.19"
+    echo "      remove column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
+    echo "ALTER TABLE instance_nets DROP COLUMN sdn_net_id;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "ALTER TABLE instance_interfaces DROP COLUMN vlan;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "ALTER TABLE instance_interfaces DROP COLUMN pci;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "ALTER TABLE instance_interfaces DROP COLUMN compute_node;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "ALTER TABLE instance_interfaces DROP COLUMN sdn_port_id;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "DELETE FROM schema_version WHERE version_int='20';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+}
+
 function upgrade_to_X(){
     echo "      change 'datacenter_nets'"
     echo "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
index c34d831..45db340 100755 (executable)
--- a/openmano
+++ b/openmano
 '''
 openmano client used to interact with openmano-server (openmanod) 
 '''
-__author__="Alfonso Tierno, Gerardo Garcia"
+__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ ="$09-oct-2014 09:09:48$"
-__version__="0.4.11-r517"
-version_date="Jan 2017"
+__version__="0.4.13-r519"
+version_date="Mar 2017"
 
 from argcomplete.completers import FilesCompleter
 import os
@@ -120,6 +120,7 @@ def _print_verbose(mano_response, verbose_level=0):
         return result
 
     if mano_response.status_code == 200:
+        uuid = None
         for content in content_list:
             if "uuid" in content:
                 uuid = content['uuid']
@@ -931,6 +932,38 @@ def datacenter_attach(args):
             print "Try to specify a different name with --vim-tenant-name"
     return result
 
+
+def datacenter_edit_vim_tenant(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    if not (args.vim_tenant_id or args.vim_tenant_name or args.user or args.password or args.config):
+        raise OpenmanoCLIError("Error. At least one parameter must be updated.")
+
+    datacenter_dict = {}
+    if args.vim_tenant_id != None:
+        datacenter_dict['vim_tenant'] = args.vim_tenant_id
+    if args.vim_tenant_name != None:
+        datacenter_dict['vim_tenant_name'] = args.vim_tenant_name
+    if args.user != None:
+        datacenter_dict['vim_username'] = args.user
+    if args.password != None:
+        datacenter_dict['vim_password'] = args.password
+    if args.config != None:
+        datacenter_dict["config"] = _load_file_or_yaml(args.config)
+    payload_req = json.dumps({"datacenter": datacenter_dict})
+
+    # print payload_req
+
+    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s" % (mano_host, mano_port, tenant, datacenter)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+
+    return result
+
 def datacenter_detach(args):
     if args.all:
         tenant = "any"
@@ -960,7 +993,13 @@ def datacenter_create(args):
     if args.url!=None:
         datacenter_dict["vim_url_admin"] = args.url_admin 
     if args.config!=None:
-        datacenter_dict["config"] = _load_file_or_yaml(args.config) 
+        datacenter_dict["config"] = _load_file_or_yaml(args.config)
+    if args.sdn_controller!=None:
+        tenant = _get_tenant()
+        sdn_controller = _get_item_uuid("sdn_controllers", args.sdn_controller, tenant)
+        if not 'config' in datacenter_dict:
+            datacenter_dict['config'] = {}
+        datacenter_dict['config']['sdn-controller'] = sdn_controller
     payload_req = json.dumps( {"datacenter": datacenter_dict })
     
     #print payload_req
@@ -1007,6 +1046,181 @@ def datacenter_list(args):
         args.verbose += 1
     return _print_verbose(mano_response, args.verbose)
 
+def datacenter_sdn_port_mapping_set(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    if not args.file:
+        raise OpenmanoCLIError(
+            "No yaml/json has been provided specifying the SDN port mapping")
+
+    port_mapping = yaml.load(datacenter_sdn_port_mapping_list(args))
+    if len(port_mapping["sdn_port_mapping"]["ports_mapping"]) > 0:
+        if not args.force:
+            r = raw_input("Datacenter %s already contains a port mapping. Overwrite? (y/N)? " % (datacenter))
+            if not (len(r) > 0 and r[0].lower() == "y"):
+                return 0
+        args.force = True
+        print datacenter_sdn_port_mapping_clear(args)
+
+    sdn_port_mapping = _load_file_or_yaml(args.file)
+    payload_req = json.dumps({"sdn_port_mapping": sdn_port_mapping})
+
+    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+
+    if mano_response.status_code == 200:
+        return yaml.safe_dump(mano_response.json())
+    else:
+        return mano_response.content
+
+def datacenter_sdn_port_mapping_list(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name, tenant)
+
+    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+
+    if mano_response.status_code != 200:
+        return mano_response.content
+
+    return yaml.safe_dump(mano_response.json())
+
+def datacenter_sdn_port_mapping_clear(args):
+    tenant = _get_tenant()
+    datacenter = _get_datacenter(args.name, tenant)
+
+    if not args.force:
+        r = raw_input("Clean SDN port mapping for datacenter %s (y/N)? " %(datacenter))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return 0
+
+    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+
+    if mano_response.status_code != 200:
+        if "No port mapping for datacenter" in mano_response.content:
+            return "No port mapping for datacenter " + datacenter + " has been found"
+        return mano_response.content
+
+    return yaml.safe_dump(mano_response.json())
+
+def sdn_controller_create(args):
+    tenant = _get_tenant()
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    if not (args.ip and args.port and args.dpid and args.type):
+        raise OpenmanoCLIError("The following arguments are required: ip, port, dpid, type")
+
+    controller_dict = {}
+    controller_dict['name'] = args.name
+    controller_dict['ip'] = args.ip
+    controller_dict['port'] = int(args.port)
+    controller_dict['dpid'] = args.dpid
+    controller_dict['type'] = args.type
+    if args.description != None:
+        controller_dict['description'] = args.description
+    if args.user != None:
+        controller_dict['user'] = args.user
+    if args.password != None:
+        controller_dict['password'] = args.password
+
+    payload_req = json.dumps({"sdn_controller": controller_dict})
+
+    # print payload_req
+
+    URLrequest = "http://%s:%s/openmano/%s/sdn_controllers" % (mano_host, mano_port, tenant)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+
+    return result
+
+def sdn_controller_edit(args):
+    tenant = _get_tenant()
+    controller_uuid = _get_item_uuid("sdn_controllers", args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    if not (args.new_name or args.ip or args.port or args.dpid or args.type):
+        raise OpenmanoCLIError("At least one parameter must be editd")
+
+    if not args.force:
+        r = raw_input("Update SDN controller %s (y/N)? " %(args.name))
+        if  not (len(r)>0  and r[0].lower()=="y"):
+            return 0
+
+    controller_dict = {}
+    if args.new_name != None:
+        controller_dict['name'] = args.new_name
+    if args.ip != None:
+        controller_dict['ip'] = args.ip
+    if args.port != None:
+        controller_dict['port'] = int(args.port)
+    if args.dpid != None:
+        controller_dict['dpid'] = args.dpid
+    if args.type != None:
+        controller_dict['type'] = args.type
+    if args.description != None:
+        controller_dict['description'] = args.description
+    if args.user != None:
+        controller_dict['user'] = args.user
+    if args.password != None:
+        controller_dict['password'] = args.password
+
+    payload_req = json.dumps({"sdn_controller": controller_dict})
+
+    # print payload_req
+
+    URLrequest = "http://%s:%s/openmano/%s/sdn_controllers/%s" % (mano_host, mano_port, tenant, controller_uuid)
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+
+    return result
+
+def sdn_controller_list(args):
+    tenant = _get_tenant()
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+
+    if args.name:
+        toshow = _get_item_uuid("sdn_controllers", args.name, tenant)
+        URLrequest = "http://%s:%s/openmano/%s/sdn_controllers/%s" %(mano_host, mano_port, tenant, toshow)
+    else:
+        URLrequest = "http://%s:%s/openmano/%s/sdn_controllers" %(mano_host, mano_port, tenant)
+    #print URLrequest
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text )
+    if args.verbose==None:
+        args.verbose=0
+    if args.name!=None:
+        args.verbose += 1
+
+    result = json.dumps(mano_response.json(), indent=4)
+    return result
+
+def sdn_controller_delete(args):
+    tenant = _get_tenant()
+    controller_uuid = _get_item_uuid("sdn_controllers", args.name, tenant)
+
+    if not args.force:
+        r = raw_input("Delete SDN controller %s (y/N)? " % (args.name))
+        if not (len(r) > 0 and r[0].lower() == "y"):
+            return 0
+
+    URLrequest = "http://%s:%s/openmano/%s/sdn_controllers/%s" % (mano_host, mano_port, tenant, controller_uuid)
+    mano_response = requests.delete(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    result = _print_verbose(mano_response, args.verbose)
+
+    return result
+
 def vim_action(args):
     #print "datacenter-net-action",args
     tenant = _get_tenant()
@@ -1045,11 +1259,11 @@ def vim_action(args):
             create_dict[args.item]['name'] = args.name
         #if args.description:
         #    create_dict[args.item]['description'] = args.description
-        if args.item=="vim-net":
+        if args.item=="network":
             if args.bind_net:
                 create_dict[args.item]['bind_net'] = args.bind_net
-            if args.bind_type:
-                create_dict[args.item]['bind_type'] = args.bind_type
+            if args.type:
+                create_dict[args.item]['type'] = args.type
             if args.shared:
                 create_dict[args.item]['shared'] = args.shared
         if "name" not in create_dict[args.item]:
@@ -1201,6 +1415,53 @@ def element_edit(args):
     return _print_verbose(mano_response, args.verbose)
 
 
+def datacenter_edit(args):
+    tenant = _get_tenant()
+    element = _get_item_uuid('datacenters', args.name, tenant)
+    headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
+    URLrequest = "http://%s:%s/openmano/datacenters/%s" % (mano_host, mano_port, element)
+
+    has_arguments = False
+    if args.file != None:
+        has_arguments = True
+        payload = _load_file_or_yaml(args.file)
+    else:
+        payload = {}
+
+    if args.sdn_controller != None:
+        has_arguments = True
+        if not 'config' in payload:
+            payload['config'] = {}
+        if not 'sdn-controller' in payload['config']:
+            payload['config']['sdn-controller'] = {}
+        if args.sdn_controller == 'null':
+            payload['config']['sdn-controller'] = None
+        else:
+            payload['config']['sdn-controller'] = _get_item_uuid("sdn_controllers", args.sdn_controller, tenant)
+
+    if not has_arguments:
+        raise OpenmanoCLIError("At least one argument must be provided to modify the datacenter")
+
+    if 'datacenter' not in payload:
+        payload = {'datacenter': payload}
+    payload_req = json.dumps(payload)
+
+    # print payload_req
+    if not args.force or (args.name == None and args.filer == None):
+        r = raw_input(" Edit datacenter " + args.name + " (y/N)? ")
+        if len(r) > 0 and r[0].lower() == "y":
+            pass
+        else:
+            return 0
+    logger.debug("openmano request: %s", payload_req)
+    mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
+    logger.debug("openmano response: %s", mano_response.text)
+    if args.verbose == None:
+        args.verbose = 0
+    if args.name != None:
+        args.verbose += 1
+    return _print_verbose(mano_response, args.verbose)
+
 global mano_host
 global mano_port
 global mano_tenant
@@ -1329,7 +1590,7 @@ if __name__=="__main__":
     tenant_list_parser.add_argument("name", nargs='?', help="name or uuid of the tenant")
     tenant_list_parser.set_defaults(func=tenant_list)
 
-    item_list=('tenant','datacenter') #put tenant before so that help appear in order
+    item_list=('tenant') #put tenant before so that help appear in order
     for item in item_list:
         element_edit_parser = subparsers.add_parser(item+'-edit', parents=[parent_parser], help="edits one "+item)
         element_edit_parser.add_argument("name", help="name or uuid of the "+item)
@@ -1344,6 +1605,7 @@ if __name__=="__main__":
     datacenter_create_parser.add_argument("--type", action="store", help="datacenter type: openstack or openvim (default)")
     datacenter_create_parser.add_argument("--config", action="store", help="aditional configuration in json/yaml format")
     datacenter_create_parser.add_argument("--description", action="store", help="description of the datacenter")
+    datacenter_create_parser.add_argument("--sdn-controller", action="store", help="Name or uuid of the SDN controller to be used", dest='sdn_controller')
     datacenter_create_parser.set_defaults(func=datacenter_create)
 
     datacenter_delete_parser = subparsers.add_parser('datacenter-delete', parents=[parent_parser], help="deletes a datacenter from the catalogue")
@@ -1351,6 +1613,14 @@ if __name__=="__main__":
     datacenter_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
     datacenter_delete_parser.set_defaults(func=datacenter_delete)
 
+    datacenter_edit_parser = subparsers.add_parser('datacenter-edit', parents=[parent_parser], help="Edit datacenter")
+    datacenter_edit_parser.add_argument("name", help="name or uuid of the datacenter")
+    datacenter_edit_parser.add_argument("--file", help="json/yaml text or file with the changes").completer = FilesCompleter
+    datacenter_edit_parser.add_argument("--sdn-controller", action="store",
+                                          help="Name or uuid of the SDN controller to be used. Specify 'null' to clear entry", dest='sdn_controller')
+    datacenter_edit_parser.add_argument("-f", "--force", action="store_true", help="do not prompt for confirmation")
+    datacenter_edit_parser.set_defaults(func=datacenter_edit)
+
     datacenter_list_parser = subparsers.add_parser('datacenter-list', parents=[parent_parser], help="lists information about a datacenter")
     datacenter_list_parser.add_argument("name", nargs='?', help="name or uuid of the datacenter")
     datacenter_list_parser.add_argument("-a", "--all", action="store_true", help="shows all datacenters, not only datacenters attached to tenant")
@@ -1365,11 +1635,107 @@ if __name__=="__main__":
     datacenter_attach_parser.add_argument("--config", action="store", help="aditional configuration in json/yaml format")
     datacenter_attach_parser.set_defaults(func=datacenter_attach)
 
+    datacenter_edit_vim_tenant_parser = subparsers.add_parser('datacenter-edit-vim-tenant', parents=[parent_parser],
+                                                     help="Edit the association of a datacenter to the operating tenant")
+    datacenter_edit_vim_tenant_parser.add_argument("name", help="name or uuid of the datacenter")
+    datacenter_edit_vim_tenant_parser.add_argument('--vim-tenant-id', action='store',
+                                          help="specify a datacenter tenant to use. A new one is created by default")
+    datacenter_edit_vim_tenant_parser.add_argument('--vim-tenant-name', action='store', help="specify a datacenter tenant name.")
+    datacenter_edit_vim_tenant_parser.add_argument("--user", action="store", help="user credentials for the datacenter")
+    datacenter_edit_vim_tenant_parser.add_argument("--password", action="store", help="password credentials for the datacenter")
+    datacenter_edit_vim_tenant_parser.add_argument("--config", action="store",
+                                          help="aditional configuration in json/yaml format")
+    datacenter_edit_vim_tenant_parser.set_defaults(func=datacenter_edit_vim_tenant)
+
     datacenter_detach_parser = subparsers.add_parser('datacenter-detach', parents=[parent_parser], help="removes the association between a datacenter and the operating tenant")
     datacenter_detach_parser.add_argument("name", help="name or uuid of the datacenter")
     datacenter_detach_parser.add_argument("-a", "--all", action="store_true", help="removes all associations from this datacenter")
     datacenter_detach_parser.set_defaults(func=datacenter_detach)
 
+    #=======================datacenter_sdn_port_mapping_xxx section=======================
+    #datacenter_sdn_port_mapping_set
+    datacenter_sdn_port_mapping_set_parser = subparsers.add_parser('datacenter-sdn-port-mapping-set',
+                                                                   parents=[parent_parser],
+                                                                   help="Load a file with the mapping of physical ports "
+                                                                        "and the ports of the dataplaneswitch controlled "
+                                                                        "by a datacenter")
+    datacenter_sdn_port_mapping_set_parser.add_argument("name", action="store", help="specifies the datacenter")
+    datacenter_sdn_port_mapping_set_parser.add_argument("file",
+                                                        help="json/yaml text or file with the port mapping").completer = FilesCompleter
+    datacenter_sdn_port_mapping_set_parser.add_argument("-f", "--force", action="store_true",
+                                                          help="forces overwriting without asking")
+    datacenter_sdn_port_mapping_set_parser.set_defaults(func=datacenter_sdn_port_mapping_set)
+
+    #datacenter_sdn_port_mapping_list
+    datacenter_sdn_port_mapping_list_parser = subparsers.add_parser('datacenter-sdn-port-mapping-list',
+                                                                    parents=[parent_parser],
+                                                                    help="Show the SDN port mapping in a datacenter")
+    datacenter_sdn_port_mapping_list_parser.add_argument("name", action="store", help="specifies the datacenter")
+    datacenter_sdn_port_mapping_list_parser.set_defaults(func=datacenter_sdn_port_mapping_list)
+
+    # datacenter_sdn_port_mapping_clear
+    datacenter_sdn_port_mapping_clear_parser = subparsers.add_parser('datacenter-sdn-port-mapping-clear',
+                                                                    parents=[parent_parser],
+                                                                    help="Clean the the SDN port mapping in a datacenter")
+    datacenter_sdn_port_mapping_clear_parser.add_argument("name", action="store",
+                                                         help="specifies the datacenter")
+    datacenter_sdn_port_mapping_clear_parser.add_argument("-f", "--force", action="store_true",
+                                              help="forces clearing without asking")
+    datacenter_sdn_port_mapping_clear_parser.set_defaults(func=datacenter_sdn_port_mapping_clear)
+    # =======================
+
+    # =======================sdn_controller_xxx section=======================
+    # sdn_controller_create
+    sdn_controller_create_parser = subparsers.add_parser('sdn-controller-create', parents=[parent_parser],
+                                                        help="Creates an SDN controller entity within RO")
+    sdn_controller_create_parser.add_argument("name", help="name of the SDN controller")
+    sdn_controller_create_parser.add_argument("--description", action="store", help="description of the SDN controller")
+    sdn_controller_create_parser.add_argument("--ip", action="store", help="IP of the SDN controller")
+    sdn_controller_create_parser.add_argument("--port", action="store", help="Port of the SDN controller")
+    sdn_controller_create_parser.add_argument("--dpid", action="store",
+                                             help="DPID of the dataplane switch controlled by this SDN controller")
+    sdn_controller_create_parser.add_argument("--type", action="store",
+                                             help="Specify the SDN controller type. Valid types are 'opendaylight' and 'floodlight'")
+    sdn_controller_create_parser.add_argument("--user", action="store", help="user credentials for the SDN controller")
+    sdn_controller_create_parser.add_argument("--passwd", action="store", dest='password',
+                                             help="password credentials for the SDN controller")
+    sdn_controller_create_parser.set_defaults(func=sdn_controller_create)
+
+    # sdn_controller_edit
+    sdn_controller_edit_parser = subparsers.add_parser('sdn-controller-edit', parents=[parent_parser],
+                                                        help="Update one or more options of a SDN controller")
+    sdn_controller_edit_parser.add_argument("name", help="name or uuid of the SDN controller", )
+    sdn_controller_edit_parser.add_argument("--name", action="store", help="Update the name of the SDN controller",
+                                              dest='new_name')
+    sdn_controller_edit_parser.add_argument("--description", action="store", help="description of the SDN controller")
+    sdn_controller_edit_parser.add_argument("--ip", action="store", help="IP of the SDN controller")
+    sdn_controller_edit_parser.add_argument("--port", action="store", help="Port of the SDN controller")
+    sdn_controller_edit_parser.add_argument("--dpid", action="store",
+                                             help="DPID of the dataplane switch controlled by this SDN controller")
+    sdn_controller_edit_parser.add_argument("--type", action="store",
+                                             help="Specify the SDN controller type. Valid types are 'opendaylight' and 'floodlight'")
+    sdn_controller_edit_parser.add_argument("--user", action="store", help="user credentials for the SDN controller")
+    sdn_controller_edit_parser.add_argument("--password", action="store",
+                                             help="password credentials for the SDN controller", dest='password')
+    sdn_controller_edit_parser.add_argument("-f", "--force", action="store_true", help="do not prompt for confirmation")
+    #TODO: include option --file
+    sdn_controller_edit_parser.set_defaults(func=sdn_controller_edit)
+
+    #sdn_controller_list
+    sdn_controller_list_parser = subparsers.add_parser('sdn-controller-list',
+                                                                    parents=[parent_parser],
+                                                                    help="List the SDN controllers")
+    sdn_controller_list_parser.add_argument("name", nargs='?', help="name or uuid of the SDN controller")
+    sdn_controller_list_parser.set_defaults(func=sdn_controller_list)
+
+    # sdn_controller_delete
+    sdn_controller_delete_parser = subparsers.add_parser('sdn-controller-delete',
+                                                                    parents=[parent_parser],
+                                                                    help="Delete the the SDN controller")
+    sdn_controller_delete_parser.add_argument("name", help="name or uuid of the SDN controller")
+    sdn_controller_delete_parser.add_argument("-f", "--force", action="store_true", help="forces deletion without asking")
+    sdn_controller_delete_parser.set_defaults(func=sdn_controller_delete)
+    # =======================
 
     action_dict={'net-update': 'retrieves external networks from datacenter',
                  'net-edit': 'edits an external network',
index 0611d4e..a63a1f0 100755 (executable)
--- a/openmanod
+++ b/openmanod
@@ -33,9 +33,9 @@ It loads the configuration file and launches the http_server thread that will li
 '''
 __author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ ="$26-aug-2014 11:09:29$"
-__version__="0.5.8-r518"
-version_date="Jan 2017"
-database_version="0.19"      #expected database schema version
+__version__="0.5.9-r519"
+version_date="Mar 2017"
+database_version="0.20"      #expected database schema version
 
 import time
 import sys
@@ -57,14 +57,16 @@ class LoadConfigurationException(Exception):
     pass
 
 def load_configuration(configuration_file):
-    default_tokens ={'http_port':9090,
-                     'http_host':'localhost',
-                     'http_console_proxy': True,
-                     'http_console_host': None,
-                     'log_level': 'DEBUG',
-                     'log_socket_port': 9022,
-                     'auto_push_VNF_to_VIMs': True
-                    }
+    default_tokens = {'http_port':9090,
+                      'http_host':'localhost',
+                      'http_console_proxy': True,
+                      'http_console_host': None,
+                      'log_level': 'DEBUG',
+                      'log_socket_port': 9022,
+                      'auto_push_VNF_to_VIMs': True,
+                      'db_host': 'localhost',
+                      'db_ovim_host': 'localhost'
+    }
     try:
         #Check config file exists
         with open(configuration_file, 'r') as f:
@@ -241,7 +243,7 @@ if __name__=="__main__":
         logger.critical("Starting openmano server version: '%s %s' command: '%s'",  
                          __version__, version_date, " ".join(sys.argv))
         
-        for log_module in ("nfvo", "http", "vim", "db", "console"):
+        for log_module in ("nfvo", "http", "vim", "db", "console", "ovim"):
             log_level_module = "log_level_" + log_module
             log_file_module = "log_file_" + log_module
             logger_module = logging.getLogger('openmano.' + log_module)
@@ -315,6 +317,4 @@ if __name__=="__main__":
     nfvo.stop_service()
     if httpthread:
         httpthread.join(1)
-    for thread in global_config["console_thread"]:
-        thread.terminate = True
 
index 10f9404..4a87721 100644 (file)
@@ -265,6 +265,8 @@ class db_base():
         '''
         if data==None:
             return 'Null'
+        elif isinstance(data[1], str):
+            return json.dumps(data)
         else:
             return json.dumps(str(data))
     
@@ -277,6 +279,8 @@ class db_base():
         '''
         if data[1]==None:
             return str(data[0]) + "=Null"
+        elif isinstance(data[1], str):
+            return str(data[0]) + '=' + json.dumps(data[1])
         else:
             return str(data[0]) + '=' + json.dumps(str(data[1]))
     
@@ -289,24 +293,10 @@ class db_base():
         '''
         if data[1]==None:
             return str(data[0]) + " is Null"
-        
-#         if type(data[1]) is tuple:  #this can only happen in a WHERE_OR clause
-#             text =[]
-#             for d in data[1]:
-#                 if d==None:
-#                     text.append(str(data[0]) + " is Null")
-#                     continue
-#                 out=str(d)
-#                 if "'" not in out:
-#                     text.append( str(data[0]) + "='" + out + "'" )
-#                 elif '"' not in out:
-#                     text.append( str(data[0]) + '="' + out + '"' )
-#                 else:
-#                     text.append( str(data[0]) + '=' + json.dumps(out) )
-#             return " OR ".join(text)
-
-        out=str(data[1])
-        return str(data[0]) + '=' + json.dumps(out)
+        elif isinstance(data[1], str):
+            return str(data[0]) + '=' + json.dumps(data[1])
+        else:
+            return str(data[0]) + '=' + json.dumps(str(data[1]))
 
     def __tuple2db_format_where_not(self, data):
         '''Compose the needed text for a SQL WHERE(not). parameter 'data' is a pair tuple (A,B),
@@ -317,8 +307,10 @@ class db_base():
         '''
         if data[1]==None:
             return str(data[0]) + " is not Null"
-        out=str(data[1])
-        return str(data[0]) + '<>' + json.dumps(out)
+        elif isinstance(data[1], str):
+            return str(data[0]) + '<>' + json.dumps(data[1])
+        else:
+            return str(data[0]) + '<>' + json.dumps(str(data[1]))
     
     def __remove_quotes(self, data):
         '''remove single quotes ' of any string content of data dictionary'''
index a0216e1..4841a98 100644 (file)
@@ -42,7 +42,9 @@ from openmano_schemas import vnfd_schema_v01, vnfd_schema_v02, \
                             scenario_action_schema, instance_scenario_action_schema, instance_scenario_create_schema_v01, \
                             tenant_schema, tenant_edit_schema,\
                             datacenter_schema, datacenter_edit_schema, datacenter_action_schema, datacenter_associate_schema,\
-                            object_schema, netmap_new_schema, netmap_edit_schema
+                            object_schema, netmap_new_schema, netmap_edit_schema, sdn_controller_schema, sdn_controller_edit_schema, \
+                            sdn_port_mapping_schema
+
 import nfvo
 import utils
 from db_base import db_base_Exception
@@ -506,7 +508,7 @@ def http_get_datacenter_id(tenant_id, datacenter_id):
 
 @bottle.route(url_base + '/datacenters', method='POST')
 def http_post_datacenters():
-    '''insert a tenant into the catalogue. '''
+    '''insert a datacenter into the catalogue. '''
     #parse input data
     logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
     http_content,_ = format_in( datacenter_schema )
@@ -544,6 +546,138 @@ def http_edit_datacenter_id(datacenter_id_name):
         logger.error("Unexpected exception: ", exc_info=True)
         bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='POST')
+def http_post_sdn_controller(tenant_id):
+    '''insert a sdn controller into the catalogue. '''
+    #parse input data
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content,_ = format_in( sdn_controller_schema )
+    try:
+        logger.debug("tenant_id: "+tenant_id)
+        #logger.debug("content: {}".format(http_content['sdn_controller']))
+
+        data = nfvo.sdn_controller_create(mydb, tenant_id, http_content['sdn_controller'])
+        return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, data)})
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='PUT')
+def http_put_sdn_controller_update(tenant_id, controller_id):
+    '''Update sdn controller'''
+    #parse input data
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content,_ = format_in( sdn_controller_edit_schema )
+#    r = utils.remove_extra_items(http_content, datacenter_schema)
+#    if r:
+#        logger.debug("Remove received extra items %s", str(r))
+    try:
+        #logger.debug("tenant_id: "+tenant_id)
+        logger.debug("content: {}".format(http_content['sdn_controller']))
+
+        data = nfvo.sdn_controller_update(mydb, tenant_id, controller_id, http_content['sdn_controller'])
+        return format_out({"sdn_controller": nfvo.sdn_controller_list(mydb, tenant_id, controller_id)})
+
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_sdn_controller error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers', method='GET')
+def http_get_sdn_controller(tenant_id):
+    '''get sdn controllers list, can use both uuid or name'''
+    try:
+        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+
+        data = {'sdn_controllers': nfvo.sdn_controller_list(mydb, tenant_id)}
+        return format_out(data)
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_sdn_controller error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='GET')
+def http_get_sdn_controller_id(tenant_id, controller_id):
+    '''get sdn controller details, can use both uuid or name'''
+    try:
+        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+        data = nfvo.sdn_controller_list(mydb, tenant_id, controller_id)
+        return format_out({"sdn_controllers": data})
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/sdn_controllers/<controller_id>', method='DELETE')
+def http_delete_sdn_controller_id(tenant_id, controller_id):
+    '''delete sdn controller, can use both uuid or name'''
+    try:
+        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+        data = nfvo.sdn_controller_delete(mydb, tenant_id, controller_id)
+        return format_out(data)
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_delete_sdn_controller_id error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='POST')
+def http_post_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
+    '''Set the sdn port mapping for a datacenter. '''
+    #parse input data
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    http_content, _ = format_in(sdn_port_mapping_schema)
+#    r = utils.remove_extra_items(http_content, datacenter_schema)
+#    if r:
+#        logger.debug("Remove received extra items %s", str(r))
+    try:
+        data = nfvo.datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, http_content['sdn_port_mapping'])
+        return format_out({"sdn_port_mapping": data})
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_post_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='GET')
+def http_get_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
+    '''get datacenter sdn mapping details, can use both uuid or name'''
+    try:
+        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+
+        data = nfvo.datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id)
+        return format_out({"sdn_port_mapping": data})
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_get_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
+
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/sdn_mapping', method='DELETE')
+def http_delete_datacenter_sdn_port_mapping(tenant_id, datacenter_id):
+    '''clean datacenter sdn mapping, can use both uuid or name'''
+    try:
+        logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+        data = nfvo.datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id)
+        return format_out({"result": data})
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_delete_datacenter_sdn_port_mapping error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/networks', method='GET')  #deprecated
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>/netmaps', method='GET')
@@ -739,6 +873,30 @@ def http_associate_datacenters(tenant_id, datacenter_id):
         logger.error("Unexpected exception: ", exc_info=True)
         bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
+@bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='PUT')
+def http_associate_datacenters_edit(tenant_id, datacenter_id):
+    '''associate an existing datacenter to a this tenant. '''
+    logger.debug('FROM %s %s %s', bottle.request.remote_addr, bottle.request.method, bottle.request.url)
+    #parse input data
+    http_content,_ = format_in( datacenter_associate_schema )
+    r = utils.remove_extra_items(http_content, datacenter_associate_schema)
+    if r:
+        logger.debug("Remove received extra items %s", str(r))
+    try:
+        id_ = nfvo.edit_datacenter_to_tenant(mydb, tenant_id, datacenter_id,
+                                    http_content['datacenter'].get('vim_tenant'),
+                                    http_content['datacenter'].get('vim_tenant_name'),
+                                    http_content['datacenter'].get('vim_username'),
+                                    http_content['datacenter'].get('vim_password'),
+                                    http_content['datacenter'].get('config')
+        )
+        return http_get_datacenter_id(tenant_id, id_)
+    except (nfvo.NfvoException, db_base_Exception) as e:
+        logger.error("http_associate_datacenters_edit error {}: {}".format(e.http_code, str(e)))
+        bottle.abort(e.http_code, str(e))
+    except Exception as e:
+        logger.error("Unexpected exception: ", exc_info=True)
+        bottle.abort(HTTP_Internal_Server_Error, type(e).__name__ + ": " + str(e))
 
 @bottle.route(url_base + '/<tenant_id>/datacenters/<datacenter_id>', method='DELETE')
 def http_deassociate_datacenters(tenant_id, datacenter_id):
index 14ec54f..3ea5107 100644 (file)
@@ -39,23 +39,27 @@ import vimconn
 import logging
 import collections
 from db_base import db_base_Exception
+
 import nfvo_db
 from threading import Lock
 from time import time
+import ovim as ovim_module
 
 global global_config
 global vimconn_imported
 global logger
 global default_volume_size
 default_volume_size = '5' #size in GB
-
+global ovim
+ovim = None
+global_config = None
 
 vimconn_imported = {}   # dictionary with VIM type as key, loaded module as value
 vim_threads = {"running":{}, "deleting": {}, "names": []}      # threads running for attached-VIMs
 vim_persistent_info = {}
 logger = logging.getLogger('openmano.nfvo')
 task_lock = Lock()
-task_dict = {}
+global_instance_tasks = {}
 last_task_id = 0.0
 db=None
 db_lock=Lock()
@@ -75,13 +79,11 @@ def get_task_id():
     return "TASK.{:.6f}".format(task_id)
 
 
-def new_task(name, params, store=True, depends=None):
+def new_task(name, params, depends=None):
     task_id = get_task_id()
     task = {"status": "enqueued", "id": task_id, "name": name, "params": params}
     if depends:
         task["depends"] = depends
-    if store:
-        task_dict[task_id] = task
     return task
 
 
@@ -107,6 +109,28 @@ def start_service(mydb):
     global db, global_config
     db = nfvo_db.nfvo_db()
     db.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name'])
+    global ovim
+
+    # Initialize openvim for SDN control
+    # TODO: Avoid static configuration by adding new parameters to openmanod.cfg
+    # TODO: review ovim.py to delete not needed configuration
+    ovim_configuration = {
+        'logger_name': 'openmano.ovim',
+        'network_vlan_range_start': 1000,
+        'network_vlan_range_end': 4096,
+        'db_name': global_config["db_ovim_name"],
+        'db_host': global_config["db_ovim_host"],
+        'db_user': global_config["db_ovim_user"],
+        'db_passwd': global_config["db_ovim_passwd"],
+        'bridge_ifaces': {},
+        'mode': 'normal',
+        'network_type': 'bridge',
+        #TODO: log_level_of should not be needed. To be modified in ovim
+        'log_level_of': 'DEBUG'
+    }
+    ovim = ovim_module.ovim(ovim_configuration)
+    ovim.start_service()
+
     from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join datacenter_tenants as dt on td.datacenter_tenant_id=dt.uuid'
     select_ = ('type','d.config as config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name',
                    'dt.uuid as datacenter_tenant_id','dt.vim_tenant_name as vim_tenant_name','dt.vim_tenant_id as vim_tenant_id',
@@ -114,7 +138,8 @@ def start_service(mydb):
     try:
         vims = mydb.get_rows(FROM=from_, SELECT=select_)
         for vim in vims:
-            extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id')}
+            extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
+                   'datacenter_id': vim.get('datacenter_id')}
             if vim["config"]:
                 extra.update(yaml.load(vim["config"]))
             if vim.get('dt_config'):
@@ -132,7 +157,7 @@ def start_service(mydb):
                     raise NfvoException("Unknown vim type '{}'. Can not open file '{}.py'; {}: {}".format(
                         vim["type"], module, type(e).__name__, str(e)), HTTP_Bad_Request)
 
-            thread_id = vim["datacenter_id"] + "." + vim['nfvo_tenant_id']
+            thread_id = vim['datacenter_tenant_id']
             vim_persistent_info[thread_id] = {}
             try:
                 #if not tenant:
@@ -148,7 +173,7 @@ def start_service(mydb):
                 raise NfvoException("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, str(e)), HTTP_Internal_Server_Error)
             thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['vim_tenant_id'], vim['vim_tenant_name'], vim['vim_tenant_id'])
             new_thread = vim_thread.vim_thread(myvim, task_lock, thread_name, vim['datacenter_name'],
-                                               vim.get('datacenter_tenant_id'), db=db, db_lock=db_lock)
+                                               vim['datacenter_tenant_id'], db=db, db_lock=db_lock, ovim=ovim)
             new_thread.start()
             vim_threads["running"][thread_id] = new_thread
     except db_base_Exception as e:
@@ -156,10 +181,16 @@ def start_service(mydb):
 
 
 def stop_service():
+    global ovim, global_config
+    if ovim:
+        ovim.stop_service()
     for thread_id,thread in vim_threads["running"].items():
-        thread.insert_task(new_task("exit", None, store=False))
+        thread.insert_task(new_task("exit", None))
         vim_threads["deleting"][thread_id] = thread
     vim_threads["running"] = {}
+    if global_config and global_config.get("console_thread"):
+        for thread in global_config["console_thread"]:
+            thread.terminate = True
 
 
 def get_flavorlist(mydb, vnf_id, nfvo_tenant=None):
@@ -229,7 +260,8 @@ def get_vim(mydb, nfvo_tenant=None, datacenter_id=None, datacenter_name=None, da
         vims = mydb.get_rows(FROM=from_, SELECT=select_, WHERE=WHERE_dict )
         vim_dict={}
         for vim in vims:
-            extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id')}
+            extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
+                   'datacenter_id': vim.get('datacenter_id')}
             if vim["config"]:
                 extra.update(yaml.load(vim["config"]))
             if vim.get('dt_config'):
@@ -248,8 +280,8 @@ def get_vim(mydb, nfvo_tenant=None, datacenter_id=None, datacenter_name=None, da
                                             vim["type"], module, type(e).__name__, str(e)), HTTP_Bad_Request)
 
             try:
-                if 'nfvo_tenant_id' in vim:
-                    thread_id = vim["datacenter_id"] + "." + vim['nfvo_tenant_id']
+                if 'datacenter_tenant_id' in vim:
+                    thread_id = vim["datacenter_tenant_id"]
                     if thread_id not in vim_persistent_info:
                         vim_persistent_info[thread_id] = {}
                     persistent_info = vim_persistent_info[thread_id]
@@ -1834,33 +1866,40 @@ def unify_cloud_config(cloud_config_preserve, cloud_config):
     return new_cloud_config
 
 
-def get_vim_thread(tenant_id, datacenter_id_name=None, datacenter_tenant_id=None):
+def get_vim_thread(mydb, tenant_id, datacenter_id_name=None, datacenter_tenant_id=None):
     datacenter_id = None
     datacenter_name = None
     thread = None
-    if datacenter_id_name:
-        if utils.check_valid_uuid(datacenter_id_name):
-            datacenter_id = datacenter_id_name
+    try:
+        if datacenter_tenant_id:
+            thread_id = datacenter_tenant_id
+            thread = vim_threads["running"].get(datacenter_tenant_id)
         else:
-            datacenter_name = datacenter_id_name
-    if datacenter_id:
-        thread = vim_threads["running"].get(datacenter_id + "." + tenant_id)
-    else:
-        for k, v in vim_threads["running"].items():
-            datacenter_tenant = k.split(".")
-            if datacenter_tenant[0] == datacenter_id and datacenter_tenant[1] == tenant_id:
-                if thread:
-                    raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
-                thread = v
-            elif not datacenter_id and datacenter_tenant[1] == tenant_id:
-                if thread.datacenter_name == datacenter_name:
-                    if thread:
-                        raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
-                    thread = v
-    if not thread:
-        raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), HTTP_Not_Found)
-    return thread
-
+            where_={"td.nfvo_tenant_id": tenant_id}
+            if datacenter_id_name:
+                if utils.check_valid_uuid(datacenter_id_name):
+                    datacenter_id = datacenter_id_name
+                    where_["dt.datacenter_id"] = datacenter_id
+                else:
+                    datacenter_name = datacenter_id_name
+                    where_["d.name"] = datacenter_name
+            if datacenter_tenant_id:
+                where_["dt.uuid"] = datacenter_tenant_id
+            datacenters = mydb.get_rows(
+                SELECT=("dt.uuid as datacenter_tenant_id",),
+                FROM="datacenter_tenants as dt join tenants_datacenters as td on dt.uuid=td.datacenter_tenant_id "
+                     "join datacenters as d on d.uuid=dt.datacenter_id",
+                WHERE=where_)
+            if len(datacenters) > 1:
+                raise NfvoException("More than one datacenters found, try to identify with uuid", HTTP_Conflict)
+            elif datacenters:
+                thread_id = datacenters[0]["datacenter_tenant_id"]
+                thread = vim_threads["running"].get(thread_id)
+        if not thread:
+            raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), HTTP_Not_Found)
+        return thread_id, thread
+    except db_base_Exception as e:
+        raise NfvoException("{} {}".format(type(e).__name__ , str(e)), e.http_code)
 
 def get_datacenter_by_name_uuid(mydb, tenant_id, datacenter_id_name=None, **extra_filter):
     datacenter_id = None
@@ -1898,13 +1937,14 @@ def create_instance(mydb, tenant_id, instance_dict):
 
     #find main datacenter
     myvims = {}
-    myvim_threads = {}
-    datacenter2tenant = {}
+    myvim_threads_id = {}
+    instance_tasks={}
+    tasks_to_launch={}
     datacenter = instance_dict.get("datacenter")
     default_datacenter_id, vim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
     myvims[default_datacenter_id] = vim
-    myvim_threads[default_datacenter_id] = get_vim_thread(tenant_id, default_datacenter_id)
-    datacenter2tenant[default_datacenter_id] = vim['config']['datacenter_tenant_id']
+    myvim_threads_id[default_datacenter_id], _ = get_vim_thread(mydb, tenant_id, default_datacenter_id)
+    tasks_to_launch[myvim_threads_id[default_datacenter_id]] = []
     #myvim_tenant = myvim['tenant_id']
 #    default_datacenter_name = vim['name']
     rollbackList=[]
@@ -1924,7 +1964,6 @@ def create_instance(mydb, tenant_id, instance_dict):
     logger.debug("Creating instance from scenario-dict:\n%s", yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))  #TODO remove
     instance_name = instance_dict["name"]
     instance_description = instance_dict.get("description")
-    instance_tasks={}
     try:
         # 0 check correct parameters
         for net_name, net_instance_desc in instance_dict.get("networks",{}).iteritems():
@@ -1944,8 +1983,8 @@ def create_instance(mydb, tenant_id, instance_dict):
                         #Add this datacenter to myvims
                         d, v = get_datacenter_by_name_uuid(mydb, tenant_id, site["datacenter"])
                         myvims[d] = v
-                        myvim_threads[d] = get_vim_thread(tenant_id, site["datacenter"])
-                        datacenter2tenant[d] = v['config']['datacenter_tenant_id']
+                        myvim_threads_id[d],_ = get_vim_thread(mydb, tenant_id, site["datacenter"])
+                        tasks_to_launch[myvim_threads_id[d]] = []
                         site["datacenter"] = d #change name to id
                 else:
                     if site_without_datacenter_field:
@@ -1966,8 +2005,8 @@ def create_instance(mydb, tenant_id, instance_dict):
                 if vnf_instance_desc["datacenter"] not in myvims:
                     d, v = get_datacenter_by_name_uuid(mydb, tenant_id, vnf_instance_desc["datacenter"])
                     myvims[d] = v
-                    myvim_threads[d] = get_vim_thread(tenant_id, vnf_instance_desc["datacenter"])
-                    datacenter2tenant[d] = v['config']['datacenter_tenant_id']
+                    myvim_threads_id[d],_ = get_vim_thread(mydb, tenant_id, vnf_instance_desc["datacenter"])
+                    tasks_to_launch[myvim_threads_id[d]] = []
                 scenario_vnf["datacenter"] = vnf_instance_desc["datacenter"]
 
     #0.1 parse cloud-config parameters
@@ -2018,11 +2057,11 @@ def create_instance(mydb, tenant_id, instance_dict):
                 if site.get("datacenter"):
                     vim = myvims[ site["datacenter"] ]
                     datacenter_id = site["datacenter"]
-                    myvim_thread = myvim_threads[ site["datacenter"] ]
+                    myvim_thread_id = myvim_threads_id[ site["datacenter"] ]
                 else:
                     vim = myvims[ default_datacenter_id ]
                     datacenter_id = default_datacenter_id
-                    myvim_thread = myvim_threads[default_datacenter_id]
+                    myvim_thread_id = myvim_threads_id[default_datacenter_id]
                 net_type = sce_net['type']
                 lookfor_filter = {'admin_state_up': True, 'status': 'ACTIVE'} #'shared': True
                 if sce_net["external"]:
@@ -2081,8 +2120,9 @@ def create_instance(mydb, tenant_id, instance_dict):
                 if create_network:
                     #if network is not external
                     task = new_task("new-net", (net_vim_name, net_type, sce_net.get('ip_profile',None)))
-                    task_id = myvim_thread.insert_task(task)
+                    task_id = task["id"]
                     instance_tasks[task_id] = task
+                    tasks_to_launch[myvim_thread_id].append(task)
                     #network_id = vim.new_network(net_vim_name, net_type, sce_net.get('ip_profile',None))
                     sce_net["vim_id_sites"][datacenter_id] = task_id
                     auxNetDict['scenario'][sce_net['uuid']][datacenter_id] = task_id
@@ -2096,11 +2136,11 @@ def create_instance(mydb, tenant_id, instance_dict):
                 if sce_vnf.get("datacenter"):
                     vim = myvims[ sce_vnf["datacenter"] ]
                     datacenter_id = sce_vnf["datacenter"]
-                    myvim_thread = myvim_threads[ sce_vnf["datacenter"]]
+                    myvim_thread_id = myvim_threads_id[ sce_vnf["datacenter"]]
                 else:
                     vim = myvims[ default_datacenter_id ]
                     datacenter_id = default_datacenter_id
-                    myvim_thread = myvim_threads[default_datacenter_id]
+                    myvim_thread_id = myvim_threads_id[default_datacenter_id]
                 descriptor_net =  instance_dict.get("vnfs",{}).get(sce_vnf["name"],{})
                 net_name = descriptor_net.get("name")
                 if not net_name:
@@ -2108,8 +2148,9 @@ def create_instance(mydb, tenant_id, instance_dict):
                     net_name = net_name[:255]     #limit length
                 net_type = net['type']
                 task = new_task("new-net", (net_name, net_type, net.get('ip_profile',None)))
-                task_id = myvim_thread.insert_task(task)
+                task_id = task["id"]
                 instance_tasks[task_id] = task
+                tasks_to_launch[myvim_thread_id].append(task)
                 # network_id = vim.new_network(net_name, net_type, net.get('ip_profile',None))
                 net['vim_id'] = task_id
                 if sce_vnf['uuid'] not in auxNetDict:
@@ -2127,11 +2168,11 @@ def create_instance(mydb, tenant_id, instance_dict):
         for sce_vnf in scenarioDict['vnfs']:
             if sce_vnf.get("datacenter"):
                 vim = myvims[ sce_vnf["datacenter"] ]
-                myvim_thread = myvim_threads[ sce_vnf["datacenter"] ]
+                myvim_thread_id = myvim_threads_id[ sce_vnf["datacenter"] ]
                 datacenter_id = sce_vnf["datacenter"]
             else:
                 vim = myvims[ default_datacenter_id ]
-                myvim_thread = myvim_threads[ default_datacenter_id ]
+                myvim_thread_id = myvim_threads_id[ default_datacenter_id ]
                 datacenter_id = default_datacenter_id
             sce_vnf["datacenter_id"] =  datacenter_id
             i = 0
@@ -2223,7 +2264,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                                 break
                     else:
                         netDict['net_id'] = auxNetDict[ sce_vnf['uuid'] ][ iface['net_id'] ]
-                    if is_task_id(netDict['net_id']):
+                    if netDict.get('net_id') and is_task_id(netDict['net_id']):
                         task_depends[netDict['net_id']] = instance_tasks[netDict['net_id']]
                     #skip bridge ifaces not connected to any net
                     #if 'net_id' not in netDict or netDict['net_id']==None:
@@ -2241,9 +2282,9 @@ def create_instance(mydb, tenant_id, instance_dict):
                 task = new_task("new-vm", (myVMDict['name'], myVMDict['description'], myVMDict.get('start', None),
                                            myVMDict['imageRef'], myVMDict['flavorRef'], myVMDict['networks'],
                                            cloud_config_vm, myVMDict['disks']), depends=task_depends)
-                vm_id = myvim_thread.insert_task(task)
-                instance_tasks[vm_id] = task
-
+                instance_tasks[task["id"]] = task
+                tasks_to_launch[myvim_thread_id].append(task)
+                vm_id = task["id"]
                 vm['vim_id'] = vm_id
                 rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
                 #put interface uuid back to scenario[vnfs][vms[[interfaces]
@@ -2253,19 +2294,24 @@ def create_instance(mydb, tenant_id, instance_dict):
                             if net["name"]==iface["internal_name"]:
                                 iface["vim_id"]=net["vim_id"]
                                 break
-        scenarioDict["datacenter2tenant"] = datacenter2tenant
+        scenarioDict["datacenter2tenant"] = myvim_threads_id
         logger.debug("create_instance Deployment done scenarioDict: %s",
                     yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False) )
         instance_id = mydb.new_instance_scenario_as_a_whole(tenant_id,instance_name, instance_description, scenarioDict)
-        # Update database with those ended tasks
-        for task in instance_tasks.values():
-            if task["status"] == "ok":
-                if task["name"] == "new-vm":
-                    mydb.update_rows("instance_vms", UPDATE={"vim_vm_id": task["result"]},
-                                     WHERE={"vim_vm_id": task["id"]})
-                elif task["name"] == "new-net":
-                    mydb.update_rows("instance_nets", UPDATE={"vim_net_id": task["result"]},
-                                     WHERE={"vim_net_id": task["id"]})
+        for myvim_thread_id,task_list in tasks_to_launch.items():
+            for task in task_list:
+                vim_threads["running"][myvim_thread_id].insert_task(task)
+
+        global_instance_tasks[instance_id] = instance_tasks
+        # Update database with those ended instance_tasks
+        # for task in instance_tasks.values():
+        #     if task["status"] == "ok":
+        #         if task["name"] == "new-vm":
+        #             mydb.update_rows("instance_vms", UPDATE={"vim_vm_id": task["result"]},
+        #                             WHERE={"vim_vm_id": task["id"]})
+        #         elif task["name"] == "new-net":
+        #             mydb.update_rows("instance_nets", UPDATE={"vim_net_id": task["result"]},
+        #                              WHERE={"vim_net_id": task["id"]})
         return mydb.get_instance_scenario(instance_id)
     except (NfvoException, vimconn.vimconnException,db_base_Exception)  as e:
         message = rollback(mydb, myvims, rollbackList)
@@ -2301,7 +2347,7 @@ def delete_instance(mydb, tenant_id, instance_id):
         datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
         if datacenter_key not in myvims:
             try:
-                myvim_thread = get_vim_thread(tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+                _,myvim_thread = get_vim_thread(mydb, tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
             except NfvoException as e:
                 logger.error(str(e))
                 myvim_thread = None
@@ -2324,7 +2370,7 @@ def delete_instance(mydb, tenant_id, instance_id):
                 task=None
                 if is_task_id(vm['vim_vm_id']):
                     task_id = vm['vim_vm_id']
-                    old_task = task_dict.get(task_id)
+                    old_task = global_instance_tasks[instance_id].get(task_id)
                     if not old_task:
                         error_msg += "\n    VM was scheduled for create, but task {} is not found".format(task_id)
                         continue
@@ -2334,11 +2380,11 @@ def delete_instance(mydb, tenant_id, instance_id):
                         elif old_task["status"] == "error":
                             continue
                         elif old_task["status"] == "processing":
-                            task = new_task("del-vm", task_id, depends={task_id: old_task})
+                            task = new_task("del-vm", (task_id, vm["interfaces"]), depends={task_id: old_task})
                         else: #ok
-                            task = new_task("del-vm", old_task["result"])
+                            task = new_task("del-vm", (old_task["result"], vm["interfaces"]))
                 else:
-                    task = new_task("del-vm", vm['vim_vm_id'], store=False)
+                    task = new_task("del-vm", (vm['vim_vm_id'], vm["interfaces"]) )
                 if task:
                     myvim_thread.insert_task(task)
             except vimconn.vimconnNotFoundException as e:
@@ -2358,7 +2404,7 @@ def delete_instance(mydb, tenant_id, instance_id):
         datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
         if datacenter_key not in myvims:
             try:
-                myvim_thread = get_vim_thread(tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+                _,myvim_thread = get_vim_thread(mydb, tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
             except NfvoException as e:
                 logger.error(str(e))
                 myvim_thread = None
@@ -2380,7 +2426,7 @@ def delete_instance(mydb, tenant_id, instance_id):
             task = None
             if is_task_id(net['vim_net_id']):
                 task_id = net['vim_net_id']
-                old_task = task_dict.get(task_id)
+                old_task = global_instance_tasks[instance_id].get(task_id)
                 if not old_task:
                     error_msg += "\n    NET was scheduled for create, but task {} is not found".format(task_id)
                     continue
@@ -2394,7 +2440,7 @@ def delete_instance(mydb, tenant_id, instance_id):
                     else:  # ok
                         task = new_task("del-net", old_task["result"])
             else:
-                task = new_task("del-net", net['vim_net_id'], store=False)
+                task = new_task("del-net", (net['vim_net_id'], net['sdn_net_id']))
             if task:
                 myvim_thread.insert_task(task)
         except vimconn.vimconnNotFoundException as e:
@@ -2419,161 +2465,161 @@ def refresh_instance(mydb, nfvo_tenant, instanceDict, datacenter=None, vim_tenan
          - result: <0 if there is any unexpected error, n>=0 if no errors where n is the number of vms and nets that couldn't be updated in the database
          - error_msg
     '''
-    # Assumption: nfvo_tenant and instance_id were checked before entering into this function
-    #print "nfvo.refresh_instance begins"
-    #print json.dumps(instanceDict, indent=4)
-
-    #print "Getting the VIM URL and the VIM tenant_id"
-    myvims={}
-
-    # 1. Getting VIM vm and net list
-    vms_updated = [] #List of VM instance uuids in openmano that were updated
-    vms_notupdated=[]
-    vm_list = {}
-    for sce_vnf in instanceDict['vnfs']:
-        datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
-        if datacenter_key not in vm_list:
-            vm_list[datacenter_key] = []
-        if datacenter_key not in myvims:
-            vims = get_vim(mydb, nfvo_tenant, datacenter_id=sce_vnf["datacenter_id"],
-                           datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
-            if len(vims) == 0:
-                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"]))
-                myvims[datacenter_key] = None
-            else:
-                myvims[datacenter_key] = vims.values()[0]
-        for vm in sce_vnf['vms']:
-            vm_list[datacenter_key].append(vm['vim_vm_id'])
-            vms_notupdated.append(vm["uuid"])
-
-    nets_updated = [] #List of VM instance uuids in openmano that were updated
-    nets_notupdated=[]
-    net_list = {}
-    for net in instanceDict['nets']:
-        datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
-        if datacenter_key not in net_list:
-            net_list[datacenter_key] = []
-        if datacenter_key not in myvims:
-            vims = get_vim(mydb, nfvo_tenant, datacenter_id=net["datacenter_id"],
-                           datacenter_tenant_id=net["datacenter_tenant_id"])
-            if len(vims) == 0:
-                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
-                myvims[datacenter_key] = None
-            else:
-                myvims[datacenter_key] = vims.values()[0]
-
-        net_list[datacenter_key].append(net['vim_net_id'])
-        nets_notupdated.append(net["uuid"])
-
-    # 1. Getting the status of all VMs
-    vm_dict={}
-    for datacenter_key in myvims:
-        if not vm_list.get(datacenter_key):
-            continue
-        failed = True
-        failed_message=""
-        if not myvims[datacenter_key]:
-            failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
-        else:
-            try:
-                vm_dict.update(myvims[datacenter_key].refresh_vms_status(vm_list[datacenter_key]) )
-                failed = False
-            except vimconn.vimconnException as e:
-                logger.error("VIM exception %s %s", type(e).__name__, str(e))
-                failed_message = str(e)
-        if failed:
-            for vm in vm_list[datacenter_key]:
-                vm_dict[vm] = {'status': "VIM_ERROR", 'error_msg': failed_message}
-
-    # 2. Update the status of VMs in the instanceDict, while collects the VMs whose status changed
-    for sce_vnf in instanceDict['vnfs']:
-        for vm in sce_vnf['vms']:
-            vm_id = vm['vim_vm_id']
-            interfaces = vm_dict[vm_id].pop('interfaces', [])
-            #2.0 look if contain manamgement interface, and if not change status from ACTIVE:NoMgmtIP to ACTIVE
-            has_mgmt_iface = False
-            for iface in vm["interfaces"]:
-                if iface["type"]=="mgmt":
-                    has_mgmt_iface = True
-            if vm_dict[vm_id]['status'] == "ACTIVE:NoMgmtIP" and not has_mgmt_iface:
-                vm_dict[vm_id]['status'] = "ACTIVE"
-            if vm_dict[vm_id].get('error_msg') and len(vm_dict[vm_id]['error_msg']) >= 1024:
-                vm_dict[vm_id]['error_msg'] = vm_dict[vm_id]['error_msg'][:516] + " ... " + vm_dict[vm_id]['error_msg'][-500:]
-            if vm['status'] != vm_dict[vm_id]['status'] or vm.get('error_msg')!=vm_dict[vm_id].get('error_msg') or vm.get('vim_info')!=vm_dict[vm_id].get('vim_info'):
-                vm['status']    = vm_dict[vm_id]['status']
-                vm['error_msg'] = vm_dict[vm_id].get('error_msg')
-                vm['vim_info']  = vm_dict[vm_id].get('vim_info')
-                # 2.1. Update in openmano DB the VMs whose status changed
-                try:
-                    updates = mydb.update_rows('instance_vms', UPDATE=vm_dict[vm_id], WHERE={'uuid':vm["uuid"]})
-                    vms_notupdated.remove(vm["uuid"])
-                    if updates>0:
-                        vms_updated.append(vm["uuid"])
-                except db_base_Exception as e:
-                    logger.error("nfvo.refresh_instance error database update: %s", str(e))
-            # 2.2. Update in openmano DB the interface VMs
-            for interface in interfaces:
-                #translate from vim_net_id to instance_net_id
-                network_id_list=[]
-                for net in instanceDict['nets']:
-                    if net["vim_net_id"] == interface["vim_net_id"]:
-                        network_id_list.append(net["uuid"])
-                if not network_id_list:
-                    continue
-                del interface["vim_net_id"]
-                try:
-                    for network_id in network_id_list:
-                        mydb.update_rows('instance_interfaces', UPDATE=interface, WHERE={'instance_vm_id':vm["uuid"], "instance_net_id":network_id})
-                except db_base_Exception as e:
-                    logger.error( "nfvo.refresh_instance error with vm=%s, interface_net_id=%s", vm["uuid"], network_id)
-
-    # 3. Getting the status of all nets
-    net_dict = {}
-    for datacenter_key in myvims:
-        if not net_list.get(datacenter_key):
-            continue
-        failed = True
-        failed_message = ""
-        if not myvims[datacenter_key]:
-            failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
-        else:
-            try:
-                net_dict.update(myvims[datacenter_key].refresh_nets_status(net_list[datacenter_key]) )
-                failed = False
-            except vimconn.vimconnException as e:
-                logger.error("VIM exception %s %s", type(e).__name__, str(e))
-                failed_message = str(e)
-        if failed:
-            for net in net_list[datacenter_key]:
-                net_dict[net] = {'status': "VIM_ERROR", 'error_msg': failed_message}
-
-    # 4. Update the status of nets in the instanceDict, while collects the nets whose status changed
-    # TODO: update nets inside a vnf
-    for net in instanceDict['nets']:
-        net_id = net['vim_net_id']
-        if net_dict[net_id].get('error_msg') and len(net_dict[net_id]['error_msg']) >= 1024:
-            net_dict[net_id]['error_msg'] = net_dict[net_id]['error_msg'][:516] + " ... " + net_dict[vm_id]['error_msg'][-500:]
-        if net['status'] != net_dict[net_id]['status'] or net.get('error_msg')!=net_dict[net_id].get('error_msg') or net.get('vim_info')!=net_dict[net_id].get('vim_info'):
-            net['status']    = net_dict[net_id]['status']
-            net['error_msg'] = net_dict[net_id].get('error_msg')
-            net['vim_info']  = net_dict[net_id].get('vim_info')
-            # 5.1. Update in openmano DB the nets whose status changed
-            try:
-                updated = mydb.update_rows('instance_nets', UPDATE=net_dict[net_id], WHERE={'uuid':net["uuid"]})
-                nets_notupdated.remove(net["uuid"])
-                if updated>0:
-                    nets_updated.append(net["uuid"])
-            except db_base_Exception as e:
-                logger.error("nfvo.refresh_instance error database update: %s", str(e))
-
-    # Returns appropriate output
-    #print "nfvo.refresh_instance finishes"
-    logger.debug("VMs updated in the database: %s; nets updated in the database %s; VMs not updated: %s; nets not updated: %s",
-                str(vms_updated), str(nets_updated), str(vms_notupdated), str(nets_notupdated))
+    # Assumption: nfvo_tenant and instance_id were checked before entering into this function
+    # #print "nfvo.refresh_instance begins"
+    # #print json.dumps(instanceDict, indent=4)
+    #
+    # #print "Getting the VIM URL and the VIM tenant_id"
+    myvims={}
+    #
+    # 1. Getting VIM vm and net list
+    vms_updated = [] #List of VM instance uuids in openmano that were updated
+    vms_notupdated=[]
+    vm_list = {}
+    for sce_vnf in instanceDict['vnfs']:
+        datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+        if datacenter_key not in vm_list:
+            vm_list[datacenter_key] = []
+        if datacenter_key not in myvims:
+            vims = get_vim(mydb, nfvo_tenant, datacenter_id=sce_vnf["datacenter_id"],
+                           datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = vims.values()[0]
+        for vm in sce_vnf['vms']:
+            vm_list[datacenter_key].append(vm['vim_vm_id'])
+            vms_notupdated.append(vm["uuid"])
+    #
+    nets_updated = [] #List of VM instance uuids in openmano that were updated
+    nets_notupdated=[]
+    net_list = {}
+    for net in instanceDict['nets']:
+        datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
+        if datacenter_key not in net_list:
+            net_list[datacenter_key] = []
+        if datacenter_key not in myvims:
+            vims = get_vim(mydb, nfvo_tenant, datacenter_id=net["datacenter_id"],
+                           datacenter_tenant_id=net["datacenter_tenant_id"])
+            if len(vims) == 0:
+                logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
+                myvims[datacenter_key] = None
+            else:
+                myvims[datacenter_key] = vims.values()[0]
+    #
+        net_list[datacenter_key].append(net['vim_net_id'])
+        nets_notupdated.append(net["uuid"])
+    #
+    # 1. Getting the status of all VMs
+    vm_dict={}
+    for datacenter_key in myvims:
+        if not vm_list.get(datacenter_key):
+            continue
+        failed = True
+        failed_message=""
+        if not myvims[datacenter_key]:
+            failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
+        else:
+            try:
+                vm_dict.update(myvims[datacenter_key].refresh_vms_status(vm_list[datacenter_key]) )
+                failed = False
+            except vimconn.vimconnException as e:
+                logger.error("VIM exception %s %s", type(e).__name__, str(e))
+                failed_message = str(e)
+        if failed:
+            for vm in vm_list[datacenter_key]:
+                vm_dict[vm] = {'status': "VIM_ERROR", 'error_msg': failed_message}
+    #
+    # 2. Update the status of VMs in the instanceDict, while collects the VMs whose status changed
+    for sce_vnf in instanceDict['vnfs']:
+        for vm in sce_vnf['vms']:
+            vm_id = vm['vim_vm_id']
+            interfaces = vm_dict[vm_id].pop('interfaces', [])
+            #2.0 look if contain manamgement interface, and if not change status from ACTIVE:NoMgmtIP to ACTIVE
+            has_mgmt_iface = False
+            for iface in vm["interfaces"]:
+                if iface["type"]=="mgmt":
+                    has_mgmt_iface = True
+            if vm_dict[vm_id]['status'] == "ACTIVE:NoMgmtIP" and not has_mgmt_iface:
+                vm_dict[vm_id]['status'] = "ACTIVE"
+            if vm_dict[vm_id].get('error_msg') and len(vm_dict[vm_id]['error_msg']) >= 1024:
+                vm_dict[vm_id]['error_msg'] = vm_dict[vm_id]['error_msg'][:516] + " ... " + vm_dict[vm_id]['error_msg'][-500:]
+            if vm['status'] != vm_dict[vm_id]['status'] or vm.get('error_msg')!=vm_dict[vm_id].get('error_msg') or vm.get('vim_info')!=vm_dict[vm_id].get('vim_info'):
+                vm['status']    = vm_dict[vm_id]['status']
+                vm['error_msg'] = vm_dict[vm_id].get('error_msg')
+                vm['vim_info']  = vm_dict[vm_id].get('vim_info')
+                # 2.1. Update in openmano DB the VMs whose status changed
+                try:
+                    updates = mydb.update_rows('instance_vms', UPDATE=vm_dict[vm_id], WHERE={'uuid':vm["uuid"]})
+                    vms_notupdated.remove(vm["uuid"])
+                    if updates>0:
+                        vms_updated.append(vm["uuid"])
+                except db_base_Exception as e:
+                    logger.error("nfvo.refresh_instance error database update: %s", str(e))
+            # 2.2. Update in openmano DB the interface VMs
+            for interface in interfaces:
+                #translate from vim_net_id to instance_net_id
+                network_id_list=[]
+                for net in instanceDict['nets']:
+                    if net["vim_net_id"] == interface["vim_net_id"]:
+                        network_id_list.append(net["uuid"])
+                if not network_id_list:
+                    continue
+                del interface["vim_net_id"]
+                try:
+                    for network_id in network_id_list:
+                        mydb.update_rows('instance_interfaces', UPDATE=interface, WHERE={'instance_vm_id':vm["uuid"], "instance_net_id":network_id})
+                except db_base_Exception as e:
+                    logger.error( "nfvo.refresh_instance error with vm=%s, interface_net_id=%s", vm["uuid"], network_id)
+    #
+    # 3. Getting the status of all nets
+    net_dict = {}
+    for datacenter_key in myvims:
+        if not net_list.get(datacenter_key):
+            continue
+        failed = True
+        failed_message = ""
+        if not myvims[datacenter_key]:
+            failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
+        else:
+            try:
+                net_dict.update(myvims[datacenter_key].refresh_nets_status(net_list[datacenter_key]) )
+                failed = False
+            except vimconn.vimconnException as e:
+                logger.error("VIM exception %s %s", type(e).__name__, str(e))
+                failed_message = str(e)
+        if failed:
+            for net in net_list[datacenter_key]:
+                net_dict[net] = {'status': "VIM_ERROR", 'error_msg': failed_message}
+    #
+    # 4. Update the status of nets in the instanceDict, while collects the nets whose status changed
+    # TODO: update nets inside a vnf
+    for net in instanceDict['nets']:
+        net_id = net['vim_net_id']
+        if net_dict[net_id].get('error_msg') and len(net_dict[net_id]['error_msg']) >= 1024:
+            net_dict[net_id]['error_msg'] = net_dict[net_id]['error_msg'][:516] + " ... " + net_dict[vm_id]['error_msg'][-500:]
+        if net['status'] != net_dict[net_id]['status'] or net.get('error_msg')!=net_dict[net_id].get('error_msg') or net.get('vim_info')!=net_dict[net_id].get('vim_info'):
+            net['status']    = net_dict[net_id]['status']
+            net['error_msg'] = net_dict[net_id].get('error_msg')
+            net['vim_info']  = net_dict[net_id].get('vim_info')
+            # 5.1. Update in openmano DB the nets whose status changed
+            try:
+                updated = mydb.update_rows('instance_nets', UPDATE=net_dict[net_id], WHERE={'uuid':net["uuid"]})
+                nets_notupdated.remove(net["uuid"])
+                if updated>0:
+                    nets_updated.append(net["uuid"])
+            except db_base_Exception as e:
+                logger.error("nfvo.refresh_instance error database update: %s", str(e))
+    #
+    # Returns appropriate output
+    # #print "nfvo.refresh_instance finishes"
+    logger.debug("VMs updated in the database: %s; nets updated in the database %s; VMs not updated: %s; nets not updated: %s",
+                str(vms_updated), str(nets_updated), str(vms_notupdated), str(nets_notupdated))
     instance_id = instanceDict['uuid']
-    if len(vms_notupdated)+len(nets_notupdated)>0:
-        error_msg = "VMs not updated: " + str(vms_notupdated) + "; nets not updated: " + str(nets_notupdated)
-        return len(vms_notupdated)+len(nets_notupdated), 'Scenario instance ' + instance_id + ' refreshed but some elements could not be updated in the database: ' + error_msg
+    if len(vms_notupdated)+len(nets_notupdated)>0:
+        error_msg = "VMs not updated: " + str(vms_notupdated) + "; nets not updated: " + str(nets_notupdated)
+        return len(vms_notupdated)+len(nets_notupdated), 'Scenario instance ' + instance_id + ' refreshed but some elements could not be updated in the database: ' + error_msg
 
     return 0, 'Scenario instance ' + instance_id + ' refreshed.'
 
@@ -2731,7 +2777,10 @@ def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
                     if new_config_dict[k]==None:
                         to_delete.append(k)
 
-                config_dict = yaml.load(datacenter["config"])
+                config_text = datacenter.get("config")
+                if not config_text:
+                    config_text = '{}'
+                config_dict = yaml.load(config_text)
                 config_dict.update(new_config_dict)
                 #delete null fields
                 for k in to_delete:
@@ -2811,12 +2860,48 @@ def associate_datacenter_to_tenant(mydb, nfvo_tenant, datacenter, vim_tenant_id=
     # create thread
     datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_dict['uuid'], datacenter_id)  # reload data
     thread_name = get_non_used_vim_name(datacenter_name, datacenter_id, tenant_dict['name'], tenant_dict['uuid'])
-    new_thread = vim_thread.vim_thread(myvim, task_lock, thread_name, datacenter_name, db=db, db_lock=db_lock)
+    new_thread = vim_thread.vim_thread(myvim, task_lock, thread_name, datacenter_name, db=db, db_lock=db_lock, ovim=ovim)
     new_thread.start()
-    thread_id = datacenter_id + "." + tenant_dict['uuid']
+    thread_id = datacenter_tenants_dict["uuid"]
     vim_threads["running"][thread_id] = new_thread
     return datacenter_id
 
+def edit_datacenter_to_tenant(mydb, nfvo_tenant, datacenter_id, vim_tenant_id=None, vim_tenant_name=None, vim_username=None, vim_password=None, config=None):
+    #Obtain the data of this datacenter_tenant_id
+    vim_data = mydb.get_rows(
+        SELECT=("datacenter_tenants.vim_tenant_name", "datacenter_tenants.vim_tenant_id", "datacenter_tenants.user",
+                "datacenter_tenants.passwd", "datacenter_tenants.config"),
+        FROM="datacenter_tenants JOIN tenants_datacenters ON datacenter_tenants.uuid=tenants_datacenters.datacenter_tenant_id",
+        WHERE={"tenants_datacenters.nfvo_tenant_id": nfvo_tenant,
+               "tenants_datacenters.datacenter_id": datacenter_id})
+
+    logger.debug(str(vim_data))
+    if len(vim_data) < 1:
+        raise NfvoException("Datacenter {} is not attached for tenant {}".format(datacenter_id, nfvo_tenant), HTTP_Conflict)
+
+    v = vim_data[0]
+    if v['config']:
+        v['config'] = yaml.load(v['config'])
+
+    if vim_tenant_id:
+        v['vim_tenant_id'] = vim_tenant_id
+    if vim_tenant_name:
+        v['vim_tenant_name'] = vim_tenant_name
+    if vim_username:
+        v['user'] = vim_username
+    if vim_password:
+        v['passwd'] = vim_password
+    if config:
+        if not v['config']:
+            v['config'] = {}
+        v['config'].update(config)
+
+    logger.debug(str(v))
+    deassociate_datacenter_to_tenant(mydb, nfvo_tenant, datacenter_id, vim_tenant_id=v['vim_tenant_id'])
+    associate_datacenter_to_tenant(mydb, nfvo_tenant, datacenter_id, vim_tenant_id=v['vim_tenant_id'], vim_tenant_name=v['vim_tenant_name'],
+                                   vim_username=v['user'], vim_password=v['passwd'], config=v['config'])
+
+    return datacenter_id
 
 def deassociate_datacenter_to_tenant(mydb, tenant_id, datacenter, vim_tenant_id=None):
     #get datacenter info
@@ -2857,9 +2942,9 @@ def deassociate_datacenter_to_tenant(mydb, tenant_id, datacenter, vim_tenant_id=
         except db_base_Exception as e:
             logger.error("Cannot delete datacenter_tenants " + str(e))
             pass  # the error will be caused because dependencies, vim_tenant can not be deleted
-        thread_id = datacenter_id + "." + tenant_datacenter_item["nfvo_tenant_id"]
+        thread_id = tenant_datacenter_item["datacenter_tenant_id"]
         thread = vim_threads["running"][thread_id]
-        thread.insert_task(new_task("exit", None, store=False))
+        thread.insert_task(new_task("exit", None))
         vim_threads["deleting"][thread_id] = thread
     return "datacenter {} detached. {}".format(datacenter_id, warning)
 
@@ -3054,3 +3139,122 @@ def vim_action_create(mydb, tenant_id, datacenter, item, descriptor):
         raise NfvoException("Not possible to create {} at VIM: {}".format(item, str(e)), e.http_code)
 
     return vim_action_get(mydb, tenant_id, datacenter, item, content)
+
+def sdn_controller_create(mydb, tenant_id, sdn_controller):
+    data = ovim.new_of_controller(sdn_controller)
+    logger.debug('New SDN controller created with uuid {}'.format(data))
+    return data
+
+def sdn_controller_update(mydb, tenant_id, controller_id, sdn_controller):
+    data = ovim.edit_of_controller(controller_id, sdn_controller)
+    msg = 'SDN controller {} updated'.format(data)
+    logger.debug(msg)
+    return msg
+
+def sdn_controller_list(mydb, tenant_id, controller_id=None):
+    if controller_id == None:
+        data = ovim.get_of_controllers()
+    else:
+        data = ovim.show_of_controller(controller_id)
+
+    msg = 'SDN controller list:\n {}'.format(data)
+    logger.debug(msg)
+    return data
+
+def sdn_controller_delete(mydb, tenant_id, controller_id):
+    select_ = ('uuid', 'config')
+    datacenters = mydb.get_rows(FROM='datacenters', SELECT=select_)
+    for datacenter in datacenters:
+        if datacenter['config']:
+            config = yaml.load(datacenter['config'])
+            if 'sdn-controller' in config and config['sdn-controller'] == controller_id:
+                raise NfvoException("SDN controller {} is in use by datacenter {}".format(controller_id, datacenter['uuid']), HTTP_Conflict)
+
+    data = ovim.delete_of_controller(controller_id)
+    msg = 'SDN controller {} deleted'.format(data)
+    logger.debug(msg)
+    return msg
+
+def datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, sdn_port_mapping):
+    controller = mydb.get_rows(FROM="datacenters", SELECT=("config",), WHERE={"uuid":datacenter_id})
+    if len(controller) < 1:
+        raise NfvoException("Datacenter {} not present in the database".format(datacenter_id), HTTP_Not_Found)
+
+    try:
+        sdn_controller_id = yaml.load(controller[0]["config"])["sdn-controller"]
+    except:
+        raise NfvoException("The datacenter {} has not an SDN controller associated".format(datacenter_id), HTTP_Bad_Request)
+
+    sdn_controller = ovim.show_of_controller(sdn_controller_id)
+    switch_dpid = sdn_controller["dpid"]
+
+    maps = list()
+    for compute_node in sdn_port_mapping:
+        #element = {"ofc_id": sdn_controller_id, "region": datacenter_id, "switch_dpid": switch_dpid}
+        element = dict()
+        element["compute_node"] = compute_node["compute_node"]
+        for port in compute_node["ports"]:
+            element["pci"] = port.get("pci")
+            element["switch_port"] = port.get("switch_port")
+            element["switch_mac"] = port.get("switch_mac")
+            if not element["pci"] or not (element["switch_port"] or element["switch_mac"]):
+                raise NfvoException ("The mapping must contain the 'pci' and at least one of the elements 'switch_port'"
+                                     " or 'switch_mac'", HTTP_Bad_Request)
+            maps.append(dict(element))
+
+    return ovim.set_of_port_mapping(maps, ofc_id=sdn_controller_id, switch_dpid=switch_dpid, region=datacenter_id)
+
+def datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id):
+    maps = ovim.get_of_port_mappings(db_filter={"region": datacenter_id})
+
+    result = {
+        "sdn-controller": None,
+        "datacenter-id": datacenter_id,
+        "dpid": None,
+        "ports_mapping": list()
+    }
+
+    datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id)
+    if datacenter['config']:
+        config = yaml.load(datacenter['config'])
+        if 'sdn-controller' in config:
+            controller_id = config['sdn-controller']
+            sdn_controller = sdn_controller_list(mydb, tenant_id, controller_id)
+            result["sdn-controller"] = controller_id
+            result["dpid"] = sdn_controller["dpid"]
+
+    if result["sdn-controller"] == None or result["dpid"] == None:
+        raise NfvoException("Not all SDN controller information for datacenter {} could be found: {}".format(datacenter_id, result),
+                            HTTP_Internal_Server_Error)
+
+    if len(maps) == 0:
+        return result
+
+    ports_correspondence_dict = dict()
+    for link in maps:
+        if result["sdn-controller"] != link["ofc_id"]:
+            raise NfvoException("The sdn-controller specified for different port mappings differ", HTTP_Internal_Server_Error)
+        if result["dpid"] != link["switch_dpid"]:
+            raise NfvoException("The dpid specified for different port mappings differ", HTTP_Internal_Server_Error)
+        element = dict()
+        element["pci"] = link["pci"]
+        if link["switch_port"]:
+            element["switch_port"] = link["switch_port"]
+        if link["switch_mac"]:
+            element["switch_mac"] = link["switch_mac"]
+
+        if not link["compute_node"] in ports_correspondence_dict:
+            content = dict()
+            content["compute_node"] = link["compute_node"]
+            content["ports"] = list()
+            ports_correspondence_dict[link["compute_node"]] = content
+
+        ports_correspondence_dict[link["compute_node"]]["ports"].append(element)
+
+    for key in sorted(ports_correspondence_dict):
+        result["ports_mapping"].append(ports_correspondence_dict[key])
+
+    return result
+
+def datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id):
+    return ovim.clear_of_port_mapping(db_filter={"region":datacenter_id})
index b5ec9a0..ea6d339 100644 (file)
@@ -755,6 +755,8 @@ class nfvo_db(db_base.db_base):
                             INSERT_={'vim_net_id': vim_id, 'created': net.get('created', False), 'instance_scenario_id':instance_uuid } #,  'type': net['type']
                             INSERT_['datacenter_id'] = datacenter_site_id 
                             INSERT_['datacenter_tenant_id'] = scenarioDict["datacenter2tenant"][datacenter_site_id]
+                            if not net.get('created', False):
+                                INSERT_['status'] = "ACTIVE"
                             if sce_net_id:
                                 INSERT_['sce_net_id'] = sce_net_id
                             created_time += 0.00001
@@ -892,7 +894,7 @@ class nfvo_db(db_base.db_base):
                             vm_manage_iface_list=[]
                             #instance_interfaces
                             cmd = "SELECT vim_interface_id, instance_net_id, internal_name,external_name, mac_address,"\
-                                  " ii.ip_address as ip_address, vim_info, i.type as type"\
+                                  " ii.ip_address as ip_address, vim_info, i.type as type, sdn_port_id"\
                                   " FROM instance_interfaces as ii join interfaces as i on ii.interface_id=i.uuid"\
                                   " WHERE instance_vm_id='{}' ORDER BY created_at".format(vm['uuid'])
                             self.logger.debug(cmd)
@@ -912,7 +914,7 @@ class nfvo_db(db_base.db_base):
                     #from_text = "instance_nets join instance_scenarios on instance_nets.instance_scenario_id=instance_scenarios.uuid " + \
                     #            "join sce_nets on instance_scenarios.scenario_id=sce_nets.scenario_id"
                     #where_text = "instance_nets.instance_scenario_id='"+ instance_dict['uuid'] + "'"
-                    cmd = "SELECT uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, net_id as vnf_net_id, datacenter_id, datacenter_tenant_id"\
+                    cmd = "SELECT uuid,vim_net_id,status,error_msg,vim_info,created, sce_net_id, net_id as vnf_net_id, datacenter_id, datacenter_tenant_id, sdn_net_id"\
                             " FROM instance_nets" \
                             " WHERE instance_scenario_id='{}' ORDER BY created_at".format(instance_dict['uuid'])
                     self.logger.debug(cmd)
index 1ea64f6..1dfcffe 100644 (file)
@@ -86,6 +86,10 @@ config_schema = {
         "db_user": nameshort_schema,
         "db_passwd": {"type":"string"},
         "db_name": nameshort_schema,
+        "db_ovim_host": nameshort_schema,
+        "db_ovim_user": nameshort_schema,
+        "db_ovim_passwd": {"type":"string"},
+        "db_ovim_name": nameshort_schema,
         # Next fields will disappear once the MANO API includes appropriate primitives
         "vim_url": http_schema,
         "vim_url_admin": http_schema,
@@ -109,16 +113,18 @@ config_schema = {
         "log_level_nfvo": log_level_schema,
         "log_level_http": log_level_schema,
         "log_level_console": log_level_schema,
+        "log_level_ovim": log_level_schema,
         "log_file_db": path_schema,
         "log_file_vim": path_schema,
         "log_file_nfvo": path_schema,
         "log_file_http": path_schema,
         "log_file_console": path_schema,
+        "log_file_ovim": path_schema,
         "log_socket_host": nameshort_schema,
         "log_socket_port": port_schema,
         "log_file": path_schema,
     },
-    "required": ['db_host', 'db_user', 'db_passwd', 'db_name'],
+    "required": ['db_user', 'db_passwd', 'db_name'],
     "additionalProperties": False
 }
 
@@ -1095,3 +1101,75 @@ instance_scenario_action_schema = {
     #"maxProperties": 1,
     "additionalProperties": False
 }
+
+sdn_controller_properties={
+    "name": name_schema,
+    "dpid": {"type":"string", "pattern":"^[0-9a-fA-F][02468aceACE](:[0-9a-fA-F]{2}){7}$"},
+    "ip": ip_schema,
+    "port": port_schema,
+    "type": {"type": "string", "enum": ["opendaylight","floodlight","onos"]},
+    "version": {"type" : "string", "minLength":1, "maxLength":12},
+    "user": nameshort_schema,
+    "password": passwd_schema
+}
+sdn_controller_schema = {
+    "title":"sdn controller information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "sdn_controller":{
+            "type":"object",
+            "properties":sdn_controller_properties,
+            "required": ["name", "port", 'ip', 'dpid', 'type'],
+            "additionalProperties": False
+        }
+    },
+    "required": ["sdn_controller"],
+    "additionalProperties": False
+}
+
+sdn_controller_edit_schema = {
+    "title":"sdn controller update information schema",
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "type":"object",
+    "properties":{
+        "sdn_controller":{
+            "type":"object",
+            "properties":sdn_controller_properties,
+            "additionalProperties": False
+        }
+    },
+    "required": ["sdn_controller"],
+    "additionalProperties": False
+}
+
+sdn_port_mapping_schema  = {
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "title":"sdn port mapping information schema",
+    "type": "object",
+    "properties": {
+        "sdn_port_mapping": {
+            "type": "array",
+            "items": {
+                "type": "object",
+                "properties": {
+                    "compute_node": nameshort_schema,
+                    "ports": {
+                        "type": "array",
+                        "items": {
+                            "type": "object",
+                            "properties": {
+                                "pci": pci_schema,
+                                "switch_port": nameshort_schema,
+                                "switch_mac": mac_schema
+                            },
+                            "required": ["pci"]
+                        }
+                    }
+                },
+                "required": ["compute_node", "ports"]
+            }
+        }
+    },
+    "required": ["sdn_port_mapping"]
+}
\ No newline at end of file
index c11f747..19a430d 100644 (file)
 '''
 openmano python client used to interact with openmano-server  
 '''
-__author__="Alfonso Tierno"
+__author__="Alfonso Tierno, Pablo Montes"
 __date__ ="$09-Mar-2016 09:09:48$"
-__version__="0.0.1-r467"
-version_date="Mar 2016"
+__version__="0.0.2-r468"
+version_date="Feb 2017"
 
 import requests
 import json
index ebcee2b..adc5b62 100644 (file)
@@ -44,6 +44,12 @@ db_host:   localhost          # by default localhost
 db_user:   mano               # DB user
 db_passwd: manopw             # DB password
 db_name:   mano_db            # Name of the MANO DB
+# Database ovim parameters
+db_ovim_host:   localhost          # by default localhost
+db_ovim_user:   mano               # DB user
+db_ovim_passwd: manopw             # DB password
+db_ovim_name:   mano_vim_db        # Name of the OVIM MANO DB
+
 
 #other MANO parameters
 #  Folder where the VNF descriptors will be stored
@@ -71,6 +77,8 @@ log_level:         DEBUG  #general log levels for internal logging
 #log_file_http:     /opt/openmano/logs/openmano_http.log
 #log_level_console: DEBUG  #proxy console log levels
 #log_file_console:  /opt/openmano/logs/openmano_console.log
+#log_level_ovim:    DEBUG  #ovim library log levels
+#log_file_ovim:     /opt/openmano/logs/openmano_ovim.log
 
 #Uncomment to send logs via IP to an external host
 #log_socket_host:   localhost
index 42279a2..3c35d6c 100644 (file)
@@ -25,7 +25,7 @@
 This is thread that interact with the host and the libvirt to manage VM
 One thread will be launched per host 
 '''
-__author__ = "Alfonso Tierno"
+__author__ = "Alfonso Tierno, Pablo Montes"
 __date__ = "$10-feb-2017 12:07:15$"
 
 import threading
@@ -34,6 +34,7 @@ import Queue
 import logging
 import vimconn
 from db_base import db_base_Exception
+from ovim import ovimException
 
 
 # from logging import Logger
@@ -46,7 +47,7 @@ def is_task_id(id):
 
 class vim_thread(threading.Thread):
 
-    def __init__(self, vimconn, task_lock, name=None, datacenter_name=None, datacenter_tenant_id=None, db=None, db_lock=None):
+    def __init__(self, vimconn, task_lock, name=None, datacenter_name=None, datacenter_tenant_id=None, db=None, db_lock=None, ovim=None):
         """Init a thread.
         Arguments:
             'id' number of thead
@@ -64,6 +65,7 @@ class vim_thread(threading.Thread):
         self.vim = vimconn
         self.datacenter_name = datacenter_name
         self.datacenter_tenant_id = datacenter_tenant_id
+        self.ovim = ovim
         if not name:
             self.name = vimconn["id"] + "." + vimconn["config"]["datacenter_tenant_id"]
         else:
@@ -75,6 +77,239 @@ class vim_thread(threading.Thread):
 
         self.task_lock = task_lock
         self.task_queue = Queue.Queue(2000)
+        self.refresh_list = []
+        """Contains time ordered task list for refreshing the status of VIM VMs and nets"""
+
+    def _refres_elements(self):
+        """Call VIM to get VMs and networks status until 10 elements"""
+        now = time.time()
+        vm_to_refresh_list = []
+        net_to_refresh_list = []
+        vm_to_refresh_dict = {}
+        net_to_refresh_dict = {}
+        items_to_refresh = 0
+        while self.refresh_list:
+            task = self.refresh_list[0]
+            with self.task_lock:
+                if task['status'] == 'deleted':
+                    self.refresh_list.pop(0)
+                    continue
+                if task['time'] > now:
+                    break
+                task["status"] = "processing"
+            self.refresh_list.pop(0)
+            if task["name"] == 'get-vm':
+                vm_to_refresh_list.append(task["vim_id"])
+                vm_to_refresh_dict[task["vim_id"]] = task
+            elif task["name"] == 'get-net':
+                net_to_refresh_list.append(task["vim_id"])
+                net_to_refresh_dict[task["vim_id"]] = task
+            else:
+                error_text = "unknown task {}".format(task["name"])
+                self.logger.error(error_text)
+            items_to_refresh += 1
+            if items_to_refresh == 10:
+                break
+
+        if vm_to_refresh_list:
+            try:
+                vim_dict = self.vim.refresh_vms_status(vm_to_refresh_list)
+                for vim_id, vim_info in vim_dict.items():
+                    #look for task
+                    task = vm_to_refresh_dict[vim_id]
+                    self.logger.debug("get-vm vm_id=%s result=%s", task["vim_id"], str(vim_info))
+
+                    # update database
+                    if vim_info.get("error_msg"):
+                        vim_info["error_msg"] = self._format_vim_error_msg(vim_info["error_msg"])
+                    if task["vim_info"].get("status") != vim_info["status"] or \
+                        task["vim_info"].get("error_msg") != vim_info.get("error_msg") or \
+                        task["vim_info"].get("vim_info") != vim_info["vim_info"]:
+                        with self.db_lock:
+                            temp_dict = {"status": vim_info["status"],
+                                         "error_msg": vim_info.get("error_msg"),
+                                         "vim_info": vim_info["vim_info"]}
+                            self.db.update_rows('instance_vms', UPDATE=temp_dict, WHERE={"vim_vm_id": vim_id})
+                    for interface in vim_info["interfaces"]:
+                        for task_interface in task["vim_info"]["interfaces"]:
+                            if task_interface["vim_net_id"] == interface["vim_net_id"]:
+                                break
+                        else:
+                            task_interface = {"vim_net_id": interface["vim_net_id"]}
+                            task["vim_info"]["interfaces"].append(task_interface)
+                        if task_interface != interface:
+                            #delete old port
+                            if task_interface.get("sdn_port_id"):
+                                try:
+                                    self.ovim.delete_port(task_interface["sdn_port_id"])
+                                    task_interface["sdn_port_id"] = None
+                                except ovimException as e:
+                                    self.logger.error("ovimException deleting external_port={} ".format(
+                                        task_interface["sdn_port_id"]) + str(e), exc_info=True)
+                                    # TODO Set error_msg at instance_nets
+                            vim_net_id = interface.pop("vim_net_id")
+                            sdn_net_id = None
+                            sdn_port_name = None
+                            with self.db_lock:
+                                where_= {'iv.vim_vm_id': vim_id, "ine.vim_net_id": vim_net_id,
+                                            'ine.datacenter_tenant_id': self.datacenter_tenant_id}
+                                # TODO check why vim_interface_id is not present at database
+                                # if interface.get("vim_interface_id"):
+                                #     where_["vim_interface_id"] = interface["vim_interface_id"]
+                                db_ifaces = self.db.get_rows(
+                                    FROM="instance_interfaces as ii left join instance_nets as ine on "
+                                         "ii.instance_net_id=ine.uuid left join instance_vms as iv on "
+                                         "ii.instance_vm_id=iv.uuid",
+                                    SELECT=("ii.uuid as iface_id", "ine.uuid as net_id", "iv.uuid as vm_id", "sdn_net_id"),
+                                    WHERE=where_)
+                            if len(db_ifaces)>1:
+                                self.logger.critical("Refresing interfaces. "
+                                                  "Found more than one interface at database for '{}'".format(where_))
+                            elif len(db_ifaces)==0:
+                                self.logger.critical("Refresing interfaces. "
+                                                  "Not found any interface at database for '{}'".format(where_))
+                                continue
+                            else:
+                                db_iface = db_ifaces[0]
+                                if db_iface.get("sdn_net_id") and interface.get("compute_node") and interface.get("pci"):
+                                    sdn_net_id = db_iface["sdn_net_id"]
+                                    sdn_port_name = sdn_net_id + "." + db_iface["vm_id"]
+                                    sdn_port_name = sdn_port_name[:63]
+                                    try:
+                                        sdn_port_id = self.ovim.new_external_port(
+                                            {"compute_node": interface["compute_node"],
+                                             "pci": interface["pci"],
+                                             "vlan": interface.get("vlan"),
+                                             "net_id": sdn_net_id,
+                                             "region": self.vim["config"]["datacenter_id"],
+                                             "name": sdn_port_name,
+                                             "mac": interface.get("mac_address")})
+                                        interface["sdn_port_id"] = sdn_port_id
+                                    except (ovimException, Exception) as e:
+                                        self.logger.error(
+                                            "ovimException creating new_external_port compute_node={} " \
+                                            "pci={} vlan={} ".format(
+                                                interface["compute_node"],
+                                                interface["pci"],
+                                                interface.get("vlan")) + str(e),
+                                            exc_info=True)
+                                        # TODO Set error_msg at instance_nets
+                                with self.db_lock:
+                                    self.db.update_rows('instance_interfaces', UPDATE=interface,
+                                                    WHERE={'uuid': db_iface["iface_id"]})
+                                # TODO insert instance_id
+                            interface["vim_net_id"] = vim_net_id
+
+                    task["vim_info"] = vim_info
+                    if task["vim_info"]["status"] == "BUILD":
+                        self._insert_refresh(task, now+5)  # 5seconds
+                    else:
+                        self._insert_refresh(task, now+300) # 5minutes
+            except vimconn.vimconnException as e:
+                self.logger.error("vimconnException Exception when trying to refresh vms " + str(e))
+                self._insert_refresh(task, now + 300)  # 5minutes
+
+        if net_to_refresh_list:
+            try:
+                vim_dict = self.vim.refresh_nets_status(net_to_refresh_list)
+                for vim_id, vim_info in vim_dict.items():
+                    #look for task
+                    task = net_to_refresh_dict[vim_id]
+                    self.logger.debug("get-net net_id=%s result=%s", task["vim_id"], str(vim_info))
+
+                    #get database info
+                    where_ = {"vim_net_id": vim_id, 'datacenter_tenant_id': self.datacenter_tenant_id}
+                    with self.db_lock:
+                        db_nets = self.db.get_rows(
+                            FROM="instance_nets",
+                            SELECT=("uuid as net_id", "sdn_net_id"),
+                            WHERE=where_)
+                    if len(db_nets) > 1:
+                        self.logger.critical("Refresing networks. "
+                                          "Found more than one instance-networks at database for '{}'".format(where_))
+                    elif len(db_nets) == 0:
+                        self.logger.critical("Refresing networks. "
+                                          "Not found any instance-network at database for '{}'".format(where_))
+                        continue
+                    else:
+                        db_net = db_nets[0]
+                        if db_net.get("sdn_net_id"):
+                            # get ovim status
+                            try:
+                                sdn_net = self.ovim.show_network(db_net["sdn_net_id"])
+                                if sdn_net["status"] == "ERROR":
+                                    if not vim_info.get("error_msg"):
+                                        vim_info["error_msg"] = sdn_net["error_msg"]
+                                    else:
+                                        vim_info["error_msg"] = "VIM_ERROR: {} && SDN_ERROR: {}".format(
+                                            self._format_vim_error_msg(vim_info["error_msg"], 1024//2-14),
+                                            self._format_vim_error_msg(sdn_net["error_msg"], 1024//2-14))
+                                    if vim_info["status"] == "VIM_ERROR":
+                                        vim_info["status"] = "VIM_SDN_ERROR"
+                                    else:
+                                        vim_info["status"] = "SDN_ERROR"
+
+                            except (ovimException, Exception) as e:
+                                self.logger.error(
+                                    "ovimException getting network infor snd_net_id={}".format(db_net["sdn_net_id"]),
+                                    exc_info=True)
+                                # TODO Set error_msg at instance_nets
+
+                    # update database
+                    if vim_info.get("error_msg"):
+                        vim_info["error_msg"] = self._format_vim_error_msg(vim_info["error_msg"])
+                    if task["vim_info"].get("status") != vim_info["status"] or \
+                                    task["vim_info"].get("error_msg") != vim_info.get("error_msg") or \
+                                    task["vim_info"].get("vim_info") != vim_info["vim_info"]:
+                        with self.db_lock:
+                            temp_dict = {"status": vim_info["status"],
+                                         "error_msg": vim_info.get("error_msg"),
+                                         "vim_info": vim_info["vim_info"]}
+                            self.db.update_rows('instance_nets', UPDATE=temp_dict, WHERE={"vim_net_id": vim_id})
+
+                    task["vim_info"] = vim_info
+                    if task["vim_info"]["status"] == "BUILD":
+                        self._insert_refresh(task, now+5)    # 5seconds
+                    else:
+                        self._insert_refresh(task, now+300)  # 5minutes
+            except vimconn.vimconnException as e:
+                self.logger.error("vimconnException Exception when trying to refresh nets " + str(e))
+                self._insert_refresh(task, now + 300)  # 5minutes
+
+        if not items_to_refresh:
+            time.sleep(1)
+
+    def _insert_refresh(self, task, threshold_time):
+        """Insert a task at list of refreshing elements. The refreshing list is ordered by threshold_time (task['time']
+        It is assumed that this is called inside this thread
+        """
+        task["time"] = threshold_time
+        for index in range(0, len(self.refresh_list)):
+            if self.refresh_list[index]["time"] > threshold_time:
+                self.refresh_list.insert(index, task)
+                break
+        else:
+            index = len(self.refresh_list)
+            self.refresh_list.append(task)
+        self.logger.debug("new refresh task={} name={}, time={} index={}".format(
+            task["id"], task["name"], task["time"], index))
+
+    def _remove_refresh(self, task_name, vim_id):
+        """Remove a task with this name and vim_id from the list of refreshing elements.
+        It is assumed that this is called inside this thread outside _refres_elements method
+        Return True if self.refresh_list is modified, task is found
+        Return False if not found
+        """
+        index_to_delete = None
+        for index in range(0, len(self.refresh_list)):
+            if self.refresh_list[index]["name"] == task_name and self.refresh_list[index]["vim_id"] == vim_id:
+                index_to_delete = index
+                break
+        else:
+            return False
+        if index_to_delete != None:
+            del self.refresh_list[index_to_delete]
+        return True
 
     def insert_task(self, task):
         try:
@@ -97,64 +332,113 @@ class vim_thread(threading.Thread):
         while True:
             #TODO reload service
             while True:
-                if not self.task_queue.empty():
-                    task = self.task_queue.get()
-                    self.task_lock.acquire()
-                    if task["status"] == "deleted":
+                try:
+                    if not self.task_queue.empty():
+                        task = self.task_queue.get()
+                        self.task_lock.acquire()
+                        if task["status"] == "deleted":
+                            self.task_lock.release()
+                            continue
+                        task["status"] = "processing"
                         self.task_lock.release()
+                    else:
+                        self._refres_elements()
                         continue
-                    task["status"] == "processing"
-                    self.task_lock.release()
-                else:
-                    now=time.time()
-                    time.sleep(1)
-                    continue
-                self.logger.debug("processing task id={} name={} params={}".format(task["id"], task["name"],
-                                                                                   str(task["params"])))
-                if task["name"] == 'exit' or task["name"] == 'reload':
-                    result, content = self.terminate(task)
-                elif task["name"] == 'new-vm':
-                    result, content = self.new_vm(task)
-                elif task["name"] == 'del-vm':
-                    result, content = self.del_vm(task)
-                elif task["name"] == 'new-net':
-                    result, content = self.new_net(task)
-                elif task["name"] == 'del-net':
-                    result, content = self.del_net(task)
-                else:
-                    error_text = "unknown task {}".format(task["name"])
-                    self.logger.error(error_text)
-                    result = False
-                    content = error_text
+                    self.logger.debug("processing task id={} name={} params={}".format(task["id"], task["name"],
+                                                                                       str(task["params"])))
+                    if task["name"] == 'exit' or task["name"] == 'reload':
+                        result, content = self.terminate(task)
+                    elif task["name"] == 'new-vm':
+                        result, content = self.new_vm(task)
+                    elif task["name"] == 'del-vm':
+                        result, content = self.del_vm(task)
+                    elif task["name"] == 'new-net':
+                        result, content = self.new_net(task)
+                    elif task["name"] == 'del-net':
+                        result, content = self.del_net(task)
+                    else:
+                        error_text = "unknown task {}".format(task["name"])
+                        self.logger.error(error_text)
+                        result = False
+                        content = error_text
+                    self.logger.debug("task id={} name={} result={}:{} params={}".format(task["id"], task["name"],
+                                                                                        result, content,
+                                                                                        str(task["params"])))
 
-                with self.task_lock:
-                    task["status"] = "done" if result else "error"
-                    task["result"] = content
-                self.task_queue.task_done()
+                    with self.task_lock:
+                        task["status"] = "done" if result else "error"
+                        task["result"] = content
+                    self.task_queue.task_done()
 
-                if task["name"] == 'exit':
-                    return 0
-                elif task["name"] == 'reload':
-                    break
+                    if task["name"] == 'exit':
+                        return 0
+                    elif task["name"] == 'reload':
+                        break
+                except Exception as e:
+                    self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
 
         self.logger.debug("Finishing")
 
     def terminate(self, task):
         return True, None
 
+    def _format_vim_error_msg(self, error_text, max_length=1024):
+        if error_text and len(error_text) >= max_length:
+            return error_text[:max_length//2-3] + " ... " + error_text[-max_length//2+3:]
+        return error_text
+
     def new_net(self, task):
         try:
             task_id = task["id"]
             params = task["params"]
             net_id = self.vim.new_network(*params)
+
+            net_name = params[0]
+            net_type = params[1]
+
+            network = None
+            sdn_net_id = None
+            sdn_controller = self.vim.config.get('sdn-controller')
+            if sdn_controller and (net_type == "data" or net_type == "ptp"):
+                network = {"name": net_name, "type": net_type}
+
+                vim_net = self.vim.get_network(net_id)
+                if vim_net.get('encapsulation') != 'vlan':
+                    raise vimconn.vimconnException(
+                        "net '{}' defined as type '{}' has not vlan encapsulation '{}'".format(
+                            net_name, net_type, vim_net['encapsulation']))
+                network["vlan"] = vim_net.get('segmentation_id')
+                try:
+                    sdn_net_id = self.ovim.new_network(network)
+                except (ovimException, Exception) as e:
+                    self.logger.error("task=%s cannot create SDN network vim_net_id=%s input='%s' ovimException='%s'",
+                                      str(task_id), net_id, str(network), str(e))
             with self.db_lock:
-                self.db.update_rows("instance_nets", UPDATE={"vim_net_id": net_id}, WHERE={"vim_net_id": task_id})
+                self.db.update_rows("instance_nets", UPDATE={"vim_net_id": net_id, "sdn_net_id": sdn_net_id},
+                                    WHERE={"vim_net_id": task_id})
+            new_refresh_task = {"status": "enqueued",
+                                "id": task_id,
+                                "name": "get-net",
+                                "vim_id": net_id,
+                                "vim_info": {} }
+            self._insert_refresh(new_refresh_task, time.time())
             return True, net_id
         except db_base_Exception as e:
             self.logger.error("Error updating database %s", str(e))
             return True, net_id
         except vimconn.vimconnException as e:
+            self.logger.error("Error creating NET, task=%s: %s", str(task_id), str(e))
+            try:
+                with self.db_lock:
+                    self.db.update_rows("instance_nets",
+                                        UPDATE={"error_msg": self._format_vim_error_msg(str(e)), "status": "VIM_ERROR"},
+                                        WHERE={"vim_net_id": task_id})
+            except db_base_Exception as e:
+                self.logger.error("Error updating database %s", str(e))
             return False, str(e)
+        #except ovimException as e:
+        #    self.logger.error("Error creating NET in ovim, task=%s: %s", str(task_id), str(e))
+        #    return False, str(e)
 
     def new_vm(self, task):
         try:
@@ -162,33 +446,63 @@ class vim_thread(threading.Thread):
             task_id = task["id"]
             depends = task.get("depends")
             net_list = params[5]
+            error_text = ""
             for net in net_list:
-                if is_task_id(net["net_id"]):  # change task_id into network_id
+                if "net_id" in net and is_task_id(net["net_id"]):  # change task_id into network_id
                     try:
                         task_net = depends[net["net_id"]]
                         with self.task_lock:
                             if task_net["status"] == "error":
-                                return False, "Cannot create VM because depends on a network that cannot be created: " + \
+                                error_text = "Cannot create VM because depends on a network that cannot be created: " +\
                                        str(task_net["result"])
+                                break
                             elif task_net["status"] == "enqueued" or task_net["status"] == "processing":
-                                return False, "Cannot create VM because depends on a network still not created"
+                                error_text = "Cannot create VM because depends on a network still not created"
+                                break
                             network_id = task_net["result"]
                         net["net_id"] = network_id
                     except Exception as e:
-                        return False, "Error trying to map from task_id={} to task result: {}".format(net["net_id"],
-                                                                                                      str(e))
-            vm_id = self.vim.new_vminstance(*params)
-            with self.db_lock:
-                self.db.update_rows("instance_vms", UPDATE={"vim_vm_id": vm_id}, WHERE={"vim_vm_id": task_id})
-            return True, vm_id
-        except db_base_Exception as e:
-            self.logger.error("Error updtaing database %s", str(e))
+                        error_text = "Error trying to map from task_id={} to task result: {}".format(
+                            net["net_id"],str(e))
+                        break
+            if not error_text:
+                vm_id = self.vim.new_vminstance(*params)
+            try:
+                with self.db_lock:
+                    if error_text:
+                        update = self.db.update_rows("instance_vms",
+                                                     UPDATE={"status": "VIM_ERROR", "error_msg": error_text},
+                                                     WHERE={"vim_vm_id": task_id})
+                    else:
+                        update = self.db.update_rows("instance_vms", UPDATE={"vim_vm_id": vm_id}, WHERE={"vim_vm_id": task_id})
+                    if not update:
+                        self.logger.error("task id={} name={} database not updated vim_vm_id={}".format(
+                            task["id"], task["name"], vm_id))
+            except db_base_Exception as e:
+                self.logger.error("Error updating database %s", str(e))
+            if error_text:
+                return False, error_text
+            new_refresh_task = {"status": "enqueued",
+                                "id": task_id,
+                                "name": "get-vm",
+                                "vim_id": vm_id,
+                                "vim_info": {"interfaces":[]} }
+            self._insert_refresh(new_refresh_task, time.time())
             return True, vm_id
         except vimconn.vimconnException as e:
+            self.logger.error("Error creating VM, task=%s: %s", str(task_id), str(e))
+            try:
+                with self.db_lock:
+                    self.db.update_rows("instance_vms",
+                                        UPDATE={"error_msg": self._format_vim_error_msg(str(e)), "status": "VIM_ERROR"},
+                                        WHERE={"vim_vm_id": task_id})
+            except db_base_Exception as edb:
+                self.logger.error("Error updating database %s", str(edb))
             return False, str(e)
 
     def del_vm(self, task):
-        vm_id = task["params"]
+        vm_id = task["params"][0]
+        interfaces = task["params"][1]
         if is_task_id(vm_id):
             try:
                 task_create = task["depends"][vm_id]
@@ -196,17 +510,28 @@ class vim_thread(threading.Thread):
                     if task_create["status"] == "error":
                         return True, "VM was not created. It has error: " + str(task_create["result"])
                     elif task_create["status"] == "enqueued" or task_create["status"] == "processing":
-                        return False, "Cannot delete VM because still creating"
+                        return False, "Cannot delete VM vim_id={} because still creating".format(vm_id)
                     vm_id = task_create["result"]
             except Exception as e:
                 return False, "Error trying to get task_id='{}':".format(vm_id, str(e))
         try:
+            self._remove_refresh("get-vm", vm_id)
+            for iface in interfaces:
+                if iface.get("sdn_port_id"):
+                    try:
+                        self.ovim.delete_port(iface["sdn_port_id"])
+                    except ovimException as e:
+                        self.logger.error("ovimException deleting external_port={} at VM vim_id={} deletion ".format(
+                            iface["sdn_port_id"], vm_id) + str(e), exc_info=True)
+                        # TODO Set error_msg at instance_nets
+
             return True, self.vim.delete_vminstance(vm_id)
         except vimconn.vimconnException as e:
             return False, str(e)
 
     def del_net(self, task):
-        net_id = task["params"]
+        net_id = task["params"][0]
+        sdn_net_id = task["params"][1]
         if is_task_id(net_id):
             try:
                 task_create = task["depends"][net_id]
@@ -219,8 +544,16 @@ class vim_thread(threading.Thread):
             except Exception as e:
                 return False, "Error trying to get task_id='{}':".format(net_id, str(e))
         try:
-            return True, self.vim.delete_network(net_id)
+            self._remove_refresh("get-net", net_id)
+            result = self.vim.delete_network(net_id)
+            if sdn_net_id:
+                with self.db_lock:
+                    self.ovim.delete_network(sdn_net_id)
+            return True, result
         except vimconn.vimconnException as e:
             return False, str(e)
+        except ovimException as e:
+            logging.error("Error deleting network from ovim. net_id: {}, sdn_net_id: {}".format(net_id, sdn_net_id))
+            return False, str(e)
 
 
index a9bd9be..18f4334 100644 (file)
@@ -232,6 +232,8 @@ class vimconnector():
             'id': (mandatory) VIM network id
             'name': (mandatory) VIM network name
             'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+            'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'
+            'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id
             'error_msg': (optional) text that explains the ERROR status
             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
         List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
@@ -430,9 +432,9 @@ class vimconnector():
                         vim_net_id:       #network id where this interface is connected, if provided at creation
                         vim_interface_id: #interface/port VIM id
                         ip_address:       #null, or text with IPv4, IPv6 address
-                        physical_compute: #identification of compute node where PF,VF interface is allocated
-                        physical_pci:     #PCI address of the NIC that hosts the PF,VF
-                        physical_vlan:    #physical VLAN used for VF
+                        compute_node:     #identification of compute node where PF,VF interface is allocated
+                        pci:              #PCI address of the NIC that hosts the PF,VF
+                        vlan:             #physical VLAN used for VF
         """
         raise vimconnNotImplemented( "Should have implemented this" )
     
index b501d9d..35cffae 100644 (file)
@@ -389,6 +389,8 @@ class vimconnector(vimconn.vimconnector):
                 subnet = {"id": subnet_id, "fault": str(e)}
             subnets.append(subnet)
         net["subnets"] = subnets
+        net["encapsulation"] = net.get('provider:network_type')
+        net["segmentation_id"] = net.get('provider:segmentation_id')
         return net
 
     def delete_network(self, net_id):
@@ -550,10 +552,10 @@ class vimconnector(vimconn.vimconnector):
                             elif 'threads' in numa:
                                 vcpus = numa['threads']
                                 numa_properties["hw:cpu_policy"] = "isolated"
-                            for interface in numa.get("interfaces",() ):
-                                if interface["dedicated"]=="yes":
-                                    raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
-                                #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
+                            for interface in numa.get("interfaces",() ):
+                                if interface["dedicated"]=="yes":
+                                    raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
+                                #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
                                 
                 #create flavor                 
                 new_flavor=self.nova.flavors.create(name, 
@@ -731,35 +733,39 @@ class vimconnector(vimconn.vimconnector):
             for net in net_list:
                 if not net.get("net_id"): #skip non connected iface
                     continue
-                if net["type"]=="virtual" or net["type"]=="VF":
-                    port_dict={
-                        "network_id": net["net_id"],
-                        "name": net.get("name"),
-                        "admin_state_up": True
-                    }    
-                    if net["type"]=="virtual":
-                        if "vpci" in net:
-                            metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
-                    else: # for VF
-                        if "vpci" in net:
-                            if "VF" not in metadata_vpci:
-                                metadata_vpci["VF"]=[]
-                            metadata_vpci["VF"].append([ net["vpci"], "" ])
-                        port_dict["binding:vnic_type"]="direct"
-                    if not port_dict["name"]:
-                        port_dict["name"]=name
-                    if net.get("mac_address"):
-                        port_dict["mac_address"]=net["mac_address"]
-                    if net.get("port_security") == False:
-                        port_dict["port_security_enabled"]=net["port_security"]
-                    new_port = self.neutron.create_port({"port": port_dict })
-                    net["mac_adress"] = new_port["port"]["mac_address"]
-                    net["vim_id"] = new_port["port"]["id"]
-                    net["ip"] = new_port["port"].get("fixed_ips", [{}])[0].get("ip_address")
-                    net_list_vim.append({"port-id": new_port["port"]["id"]})
-                else:   # for PF
-                    self.logger.warn("new_vminstance: Warning, can not connect a passthrough interface ")
-                    #TODO insert this when openstack consider passthrough ports as openstack neutron ports
+
+                port_dict={
+                    "network_id": net["net_id"],
+                    "name": net.get("name"),
+                    "admin_state_up": True
+                }
+                if net["type"]=="virtual":
+                    if "vpci" in net:
+                        metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
+                elif net["type"]=="VF": # for VF
+                    if "vpci" in net:
+                        if "VF" not in metadata_vpci:
+                            metadata_vpci["VF"]=[]
+                        metadata_vpci["VF"].append([ net["vpci"], "" ])
+                    port_dict["binding:vnic_type"]="direct"
+                else: #For PT
+                    if "vpci" in net:
+                        if "PF" not in metadata_vpci:
+                            metadata_vpci["PF"]=[]
+                        metadata_vpci["PF"].append([ net["vpci"], "" ])
+                    port_dict["binding:vnic_type"]="direct-physical"
+                if not port_dict["name"]:
+                    port_dict["name"]=name
+                if net.get("mac_address"):
+                    port_dict["mac_address"]=net["mac_address"]
+                if net.get("port_security") == False:
+                    port_dict["port_security_enabled"]=net["port_security"]
+                new_port = self.neutron.create_port({"port": port_dict })
+                net["mac_adress"] = new_port["port"]["mac_address"]
+                net["vim_id"] = new_port["port"]["id"]
+                net["ip"] = new_port["port"].get("fixed_ips", [{}])[0].get("ip_address")
+                net_list_vim.append({"port-id": new_port["port"]["id"]})
+
                 if net.get('floating_ip', False):
                     net['exit_on_floating_ip_error'] = True
                     external_network.append(net)
@@ -1092,6 +1098,9 @@ class vimconnector(vimconn.vimconnector):
                         vim_net_id:       #network id where this interface is connected
                         vim_interface_id: #interface/port VIM id
                         ip_address:       #null, or text with IPv4, IPv6 address
+                        compute_node:     #identification of compute node where PF,VF interface is allocated
+                        pci:              #PCI address of the NIC that hosts the PF,VF
+                        vlan:             #physical VLAN used for VF
         '''
         vm_dict={}
         self.logger.debug("refresh_vms status: Getting tenant VM instance information from VIM")
@@ -1124,6 +1133,21 @@ class vimconnector(vimconn.vimconnector):
                         interface["mac_address"] = port.get("mac_address")
                         interface["vim_net_id"] = port["network_id"]
                         interface["vim_interface_id"] = port["id"]
+                        interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host']
+                        interface["pci"] = None
+                        if port['binding:profile'].get('pci_slot'):
+                            # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting the slot to 0x00
+                            # TODO: This is just a workaround valid for niantinc. Find a better way to do so
+                            #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
+                            pci = port['binding:profile']['pci_slot']
+                            # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
+                            interface["pci"] = pci
+                        interface["vlan"] = None
+                        #if network is of type vlan and port is of type direct (sr-iov) then set vlan id
+                        network = self.neutron.show_network(port["network_id"])
+                        if network['network'].get('provider:network_type') == 'vlan' and \
+                            port.get("binding:vnic_type") == "direct":
+                            interface["vlan"] = network['network'].get('provider:segmentation_id')
                         ips=[]
                         #look for floating ip address
                         floating_ip_dict = self.neutron.list_floatingips(port_id=port["id"])
index a224255..5b04458 100644 (file)
@@ -63,6 +63,7 @@ import hashlib
 import socket
 import struct
 import netaddr
+import random
 
 # global variable for vcd connector type
 STANDALONE = 'standalone'
@@ -71,13 +72,9 @@ STANDALONE = 'standalone'
 FLAVOR_RAM_KEY = 'ram'
 FLAVOR_VCPUS_KEY = 'vcpus'
 FLAVOR_DISK_KEY = 'disk'
-DEFAULT_IP_PROFILE = {'gateway_address':"192.168.1.1",
-                      'dhcp_count':50,
-                      'subnet_address':"192.168.1.0/24",
+DEFAULT_IP_PROFILE = {'dhcp_count':50,
                       'dhcp_enabled':True,
-                      'dhcp_start_address':"192.168.1.3",
-                      'ip_version':"IPv4",
-                      'dns_address':"192.168.1.2"
+                      'ip_version':"IPv4"
                       }
 # global variable for wait time
 INTERVAL_TIME = 5
@@ -181,10 +178,6 @@ class vimconnector(vimconn.vimconnector):
         self.nsx_manager = None
         self.nsx_user = None
         self.nsx_password = None
-        self.vcenter_ip = None
-        self.vcenter_port = None
-        self.vcenter_user = None
-        self.vcenter_password = None
 
         if tenant_name is not None:
             orgnameandtenant = tenant_name.split(":")
@@ -217,6 +210,14 @@ class vimconnector(vimconn.vimconnector):
         self.vcenter_user = config.get("vcenter_user", None)
         self.vcenter_password = config.get("vcenter_password", None)
 
+# ############# Stub code for SRIOV #################
+#         try:
+#             self.dvs_name = config['dv_switch_name']
+#         except KeyError:
+#             raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
+#
+#         self.vlanID_range = config.get("vlanID_range", None)
+
         self.org_uuid = None
         self.vca = None
 
@@ -473,6 +474,12 @@ class vimconnector(vimconn.vimconnector):
         if shared:
             isshared = 'true'
 
+# ############# Stub code for SRIOV #################
+#         if net_type == "data" or net_type == "ptp":
+#             if self.config.get('dv_switch_name') == None:
+#                  raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
+#             network_uuid = self.create_dvPort_group(net_name)
+
         network_uuid = self.create_network(network_name=net_name, net_type=net_type,
                                            ip_profile=ip_profile, isshared=isshared)
         if network_uuid is not None:
@@ -558,11 +565,11 @@ class vimconnector(vimconn.vimconnector):
         if vdc is None:
             raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
 
-        vdcid = vdc.get_id().split(":")[3]
-        networks = vca.get_networks(vdc.get_name())
-        network_list = []
-
         try:
+            vdcid = vdc.get_id().split(":")[3]
+            networks = vca.get_networks(vdc.get_name())
+            network_list = []
+
             for network in networks:
                 filter_entry = {}
                 net_uuid = network.get_id().split(":")
@@ -610,13 +617,13 @@ class vimconnector(vimconn.vimconnector):
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed")
 
-        vdc = vca.get_vdc(self.tenant_name)
-        vdc_id = vdc.get_id().split(":")[3]
+        try:
+            vdc = vca.get_vdc(self.tenant_name)
+            vdc_id = vdc.get_id().split(":")[3]
 
-        networks = vca.get_networks(vdc.get_name())
-        filter_dict = {}
+            networks = vca.get_networks(vdc.get_name())
+            filter_dict = {}
 
-        try:
             for network in networks:
                 vdc_network_id = network.get_id().split(":")
                 if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
@@ -649,6 +656,18 @@ class vimconnector(vimconn.vimconnector):
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() for tenant {} is failed.".format(self.tenant_name))
 
+        # ############# Stub code for SRIOV #################
+#         dvport_group = self.get_dvport_group(net_id)
+#         if dvport_group:
+#             #delete portgroup
+#             status = self.destroy_dvport_group(net_id)
+#             if status:
+#                 # Remove vlanID from persistent info
+#                 if net_id in self.persistent_info["used_vlanIDs"]:
+#                     del self.persistent_info["used_vlanIDs"][net_id]
+#
+#                 return net_id
+
         vcd_network = self.get_vcd_network(network_uuid=net_id)
         if vcd_network is not None and vcd_network:
             if self.delete_network_action(network_uuid=net_id):
@@ -844,117 +863,124 @@ class vimconnector(vimconn.vimconnector):
         #  create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
         #  status change.
         #  if VCD can parse OVF we upload VMDK file
-        for catalog in vca.get_catalogs():
-            if catalog_name != catalog.name:
-                continue
-            link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
-                                       link.get_rel() == 'add', catalog.get_Link())
-            assert len(link) == 1
-            data = """
-            <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
-            """ % (escape(catalog_name), escape(description))
-            headers = vca.vcloud_session.get_vcloud_headers()
-            headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
-            response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
-            if response.status_code == requests.codes.created:
-                catalogItem = XmlElementTree.fromstring(response.content)
-                entity = [child for child in catalogItem if
-                          child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
-                href = entity.get('href')
-                template = href
-                response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
-                                    verify=vca.verify, logger=self.logger)
-
-                if response.status_code == requests.codes.ok:
-                    media = mediaType.parseString(response.content, True)
-                    link = filter(lambda link: link.get_rel() == 'upload:default',
-                                  media.get_Files().get_File()[0].get_Link())[0]
-                    headers = vca.vcloud_session.get_vcloud_headers()
-                    headers['Content-Type'] = 'Content-Type text/xml'
-                    response = Http.put(link.get_href(),
-                                        data=open(media_file_name, 'rb'),
-                                        headers=headers,
+        try:
+            for catalog in vca.get_catalogs():
+                if catalog_name != catalog.name:
+                    continue
+                link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
+                                           link.get_rel() == 'add', catalog.get_Link())
+                assert len(link) == 1
+                data = """
+                <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
+                """ % (escape(catalog_name), escape(description))
+                headers = vca.vcloud_session.get_vcloud_headers()
+                headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
+                response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
+                if response.status_code == requests.codes.created:
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if
+                              child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    href = entity.get('href')
+                    template = href
+                    response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
                                         verify=vca.verify, logger=self.logger)
-                    if response.status_code != requests.codes.ok:
-                        self.logger.debug(
-                            "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
-                                                                                                  media_file_name))
-                        return False
 
-                # TODO fix this with aync block
-                time.sleep(5)
-
-                self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
-
-                # uploading VMDK file
-                # check status of OVF upload and upload remaining files.
-                response = Http.get(template,
-                                    headers=vca.vcloud_session.get_vcloud_headers(),
-                                    verify=vca.verify,
-                                    logger=self.logger)
+                    if response.status_code == requests.codes.ok:
+                        media = mediaType.parseString(response.content, True)
+                        link = filter(lambda link: link.get_rel() == 'upload:default',
+                                      media.get_Files().get_File()[0].get_Link())[0]
+                        headers = vca.vcloud_session.get_vcloud_headers()
+                        headers['Content-Type'] = 'Content-Type text/xml'
+                        response = Http.put(link.get_href(),
+                                            data=open(media_file_name, 'rb'),
+                                            headers=headers,
+                                            verify=vca.verify, logger=self.logger)
+                        if response.status_code != requests.codes.ok:
+                            self.logger.debug(
+                                "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
+                                                                                                      media_file_name))
+                            return False
+
+                    # TODO fix this with aync block
+                    time.sleep(5)
+
+                    self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
+
+                    # uploading VMDK file
+                    # check status of OVF upload and upload remaining files.
+                    response = Http.get(template,
+                                        headers=vca.vcloud_session.get_vcloud_headers(),
+                                        verify=vca.verify,
+                                        logger=self.logger)
 
-                if response.status_code == requests.codes.ok:
-                    media = mediaType.parseString(response.content, True)
-                    number_of_files = len(media.get_Files().get_File())
-                    for index in xrange(0, number_of_files):
-                        links_list = filter(lambda link: link.get_rel() == 'upload:default',
-                                            media.get_Files().get_File()[index].get_Link())
-                        for link in links_list:
-                            # we skip ovf since it already uploaded.
-                            if 'ovf' in link.get_href():
-                                continue
-                            # The OVF file and VMDK must be in a same directory
-                            head, tail = os.path.split(media_file_name)
-                            file_vmdk = head + '/' + link.get_href().split("/")[-1]
-                            if not os.path.isfile(file_vmdk):
-                                return False
-                            statinfo = os.stat(file_vmdk)
-                            if statinfo.st_size == 0:
-                                return False
-                            hrefvmdk = link.get_href()
-
-                            if progress:
-                                print("Uploading file: {}".format(file_vmdk))
-                            if progress:
-                                widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
-                                           FileTransferSpeed()]
-                                progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
-
-                            bytes_transferred = 0
-                            f = open(file_vmdk, 'rb')
-                            while bytes_transferred < statinfo.st_size:
-                                my_bytes = f.read(chunk_bytes)
-                                if len(my_bytes) <= chunk_bytes:
-                                    headers = vca.vcloud_session.get_vcloud_headers()
-                                    headers['Content-Range'] = 'bytes %s-%s/%s' % (
-                                        bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
-                                    headers['Content-Length'] = str(len(my_bytes))
-                                    response = Http.put(hrefvmdk,
-                                                        headers=headers,
-                                                        data=my_bytes,
-                                                        verify=vca.verify,
-                                                        logger=None)
-
-                                    if response.status_code == requests.codes.ok:
-                                        bytes_transferred += len(my_bytes)
-                                        if progress:
-                                            progress_bar.update(bytes_transferred)
-                                    else:
-                                        self.logger.debug(
-                                            'file upload failed with error: [%s] %s' % (response.status_code,
-                                                                                        response.content))
-
-                                        f.close()
-                                        return False
-                            f.close()
-                            if progress:
-                                progress_bar.finish()
-                            time.sleep(10)
-                    return True
-                else:
-                    self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
-                                      format(catalog_name, media_file_name))
-                    return False
+                    if response.status_code == requests.codes.ok:
+                        media = mediaType.parseString(response.content, True)
+                        number_of_files = len(media.get_Files().get_File())
+                        for index in xrange(0, number_of_files):
+                            links_list = filter(lambda link: link.get_rel() == 'upload:default',
+                                                media.get_Files().get_File()[index].get_Link())
+                            for link in links_list:
+                                # we skip ovf since it already uploaded.
+                                if 'ovf' in link.get_href():
+                                    continue
+                                # The OVF file and VMDK must be in a same directory
+                                head, tail = os.path.split(media_file_name)
+                                file_vmdk = head + '/' + link.get_href().split("/")[-1]
+                                if not os.path.isfile(file_vmdk):
+                                    return False
+                                statinfo = os.stat(file_vmdk)
+                                if statinfo.st_size == 0:
+                                    return False
+                                hrefvmdk = link.get_href()
+
+                                if progress:
+                                    print("Uploading file: {}".format(file_vmdk))
+                                if progress:
+                                    widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
+                                               FileTransferSpeed()]
+                                    progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
+
+                                bytes_transferred = 0
+                                f = open(file_vmdk, 'rb')
+                                while bytes_transferred < statinfo.st_size:
+                                    my_bytes = f.read(chunk_bytes)
+                                    if len(my_bytes) <= chunk_bytes:
+                                        headers = vca.vcloud_session.get_vcloud_headers()
+                                        headers['Content-Range'] = 'bytes %s-%s/%s' % (
+                                            bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
+                                        headers['Content-Length'] = str(len(my_bytes))
+                                        response = Http.put(hrefvmdk,
+                                                            headers=headers,
+                                                            data=my_bytes,
+                                                            verify=vca.verify,
+                                                            logger=None)
+
+                                        if response.status_code == requests.codes.ok:
+                                            bytes_transferred += len(my_bytes)
+                                            if progress:
+                                                progress_bar.update(bytes_transferred)
+                                        else:
+                                            self.logger.debug(
+                                                'file upload failed with error: [%s] %s' % (response.status_code,
+                                                                                            response.content))
+
+                                            f.close()
+                                            return False
+                                f.close()
+                                if progress:
+                                    progress_bar.finish()
+                                time.sleep(10)
+                        return True
+                    else:
+                        self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
+                                          format(catalog_name, media_file_name))
+                        return False
+        except Exception as exp:
+            self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                .format(catalog_name,media_file_name, exp))
+            raise vimconn.vimconnException(
+                "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                .format(catalog_name,media_file_name, exp))
 
         self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
         return False
@@ -1012,6 +1038,25 @@ class vimconnector(vimconn.vimconnector):
                 return catalog.name
         return None
 
+    def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
+        """  Method check catalog and return catalog name lookup done by catalog UUID.
+
+        Args
+            catalog_name: catalog name as string
+            catalogs:  list of catalogs.
+
+        Return: catalogs name or None
+        """
+
+        if not self.validate_uuid4(uuid_string=catalog_uuid):
+            return None
+
+        for catalog in catalogs:
+            catalog_id = catalog.get_id().split(":")[3]
+            if catalog_id == catalog_uuid:
+                return catalog
+        return None
+
     def get_image_id_from_path(self, path=None, progress=False):
         """  Method upload OVF image to vCloud director.
 
@@ -1058,7 +1103,12 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("File name {} Catalog Name {} file path {} "
                           "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
 
-        catalogs = vca.get_catalogs()
+        try:
+            catalogs = vca.get_catalogs()
+        except Exception as exp:
+            self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
+            raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
+
         if len(catalogs) == 0:
             self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
             result = self.create_vimcatalog(vca, catalog_md5_name)
@@ -1232,8 +1282,8 @@ class vimconnector(vimconn.vimconnector):
         """
 
         self.logger.info("Creating new instance for entry {}".format(name))
-        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".format(
-                                    description, start, image_id, flavor_id, net_list, cloud_config))
+        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
+                                    description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
         vca = self.connect()
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed.")
@@ -1266,11 +1316,10 @@ class vimconnector(vimconn.vimconnector):
 
 
         # Set vCPU and Memory based on flavor.
-        #
         vm_cpus = None
         vm_memory = None
         vm_disk = None
-        pci_devices_info = []
+
         if flavor_id is not None:
             if flavor_id not in vimconnector.flavorlist:
                 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
@@ -1285,11 +1334,7 @@ class vimconnector(vimconn.vimconnector):
                     extended = flavor.get("extended", None)
                     if extended:
                         numas=extended.get("numas", None)
-                        if numas:
-                            for numa in numas:
-                                for interface in numa.get("interfaces",() ):
-                                    if interface["dedicated"].strip()=="yes":
-                                        pci_devices_info.append(interface)
+
                 except Exception as exp:
                     raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
 
@@ -1325,31 +1370,52 @@ class vimconnector(vimconn.vimconnector):
 
         # use: 'data', 'bridge', 'mgmt'
         # create vApp.  Set vcpu and ram based on flavor id.
-        vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
-                                   self.get_catalogbyid(image_id, catalogs),
-                                   network_name=None,  # None while creating vapp
-                                   network_mode=network_mode,
-                                   vm_name=vmname_andid,
-                                   vm_cpus=vm_cpus,  # can be None if flavor is None
-                                   vm_memory=vm_memory)  # can be None if flavor is None
-
-        if vapptask is None or vapptask is False:
-            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): failed deploy vApp {}".format(vmname_andid))
-        if type(vapptask) is VappTask:
-            vca.block_until_completed(vapptask)
+        try:
+            vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
+                                       self.get_catalogbyid(image_id, catalogs),
+                                       network_name=None,  # None while creating vapp
+                                       network_mode=network_mode,
+                                       vm_name=vmname_andid,
+                                       vm_cpus=vm_cpus,  # can be None if flavor is None
+                                       vm_memory=vm_memory)  # can be None if flavor is None
+
+            if vapptask is None or vapptask is False:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+            if type(vapptask) is VappTask:
+                vca.block_until_completed(vapptask)
+
+        except Exception as exp:
+            raise vimconn.vimconnUnexpectedResponse(
+                "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
 
         # we should have now vapp in undeployed state.
-        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
-        vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+        try:
+            vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+            vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+        except Exception as exp:
+            raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                    .format(vmname_andid, exp))
+
         if vapp is None:
             raise vimconn.vimconnUnexpectedResponse(
-                "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format(
+                "new_vminstance(): Failed to retrieve vApp {} after creation".format(
                                                                             vmname_andid))
 
-        #Add PCI passthrough configrations
-        PCI_devices_status = False
+        #Add PCI passthrough/SRIOV configrations
         vm_obj = None
-        si = None
+        pci_devices_info = []
+        sriov_net_info = []
+        reserve_memory = False
+
+        for net in net_list:
+            if net["type"]=="PF":
+                pci_devices_info.append(net)
+            elif  (net["type"]=="VF" or  net["type"]=="VFnotShared") and 'net_id'in net:
+                sriov_net_info.append(net)
+
+        #Add PCI
         if len(pci_devices_info) > 0:
             self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
                                                                         vmname_andid ))
@@ -1361,18 +1427,47 @@ class vimconnector(vimconn.vimconnector):
                                                             pci_devices_info,
                                                             vmname_andid)
                                  )
+                reserve_memory = True
             else:
                 self.logger.info("Fail to add PCI devives {} to VM {}".format(
                                                             pci_devices_info,
                                                             vmname_andid)
                                  )
-        # add vm disk
+        # Modify vm disk
         if vm_disk:
             #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
             result = self.modify_vm_disk(vapp_uuid, vm_disk)
             if result :
                 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
 
+        #Add new or existing disks to vApp
+        if disk_list:
+            added_existing_disk = False
+            for disk in disk_list:
+                if "image_id" in disk and disk["image_id"] is not None:
+                    self.logger.debug("Adding existing disk from image {} to vm {} ".format(
+                                                                    disk["image_id"] , vapp_uuid))
+                    self.add_existing_disk(catalogs=catalogs,
+                                           image_id=disk["image_id"],
+                                           size = disk["size"],
+                                           template_name=templateName,
+                                           vapp_uuid=vapp_uuid
+                                           )
+                    added_existing_disk = True
+                else:
+                    #Wait till added existing disk gets reflected into vCD database/API
+                    if added_existing_disk:
+                        time.sleep(5)
+                        added_existing_disk = False
+                    self.add_new_disk(vca, vapp_uuid, disk['size'])
+
+        if numas:
+            # Assigning numa affinity setting
+            for numa in numas:
+                if 'paired-threads-id' in numa:
+                    paired_threads_id = numa['paired-threads-id']
+                    self.set_numa_affinity(vapp_uuid, paired_threads_id)
+
         # add NICs & connect to networks in netlist
         try:
             self.logger.info("Request to connect VM to a network: {}".format(net_list))
@@ -1407,49 +1502,93 @@ class vimconnector(vimconn.vimconnector):
                         if type(task) is GenericTask:
                             vca.block_until_completed(task)
                         # connect network to VM - with all DHCP by default
-                        self.logger.info("new_vminstance(): Connecting VM to a network {}".format(nets[0].name))
-                        task = vapp.connect_vms(nets[0].name,
-                                                connection_index=nicIndex,
-                                                connections_primary_index=primary_nic_index,
-                                                ip_allocation_mode='DHCP')
-                        if type(task) is GenericTask:
-                            vca.block_until_completed(task)
+
+                        type_list = ['PF','VF','VFnotShared']
+                        if 'type' in net and net['type'] not in type_list:
+                            # fetching nic type from vnf
+                            if 'model' in net:
+                                nic_type = net['model']
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                          "to a network {}".format(nets[0].name))
+                                self.add_network_adapter_to_vms(vapp, nets[0].name,
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net,
+                                                                nic_type=nic_type)
+                            else:
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                         "to a network {}".format(nets[0].name))
+                                self.add_network_adapter_to_vms(vapp, nets[0].name,
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net)
                 nicIndex += 1
-        except KeyError:
-            # it might be a case if specific mandatory entry in dict is empty
-            self.logger.debug("Key error {}".format(KeyError.message))
-            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
 
-        # deploy and power on vm
-        self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
-        deploytask = vapp.deploy(powerOn=False)
-        if type(deploytask) is GenericTask:
-            vca.block_until_completed(deploytask)
-
-        # If VM has PCI devices reserve memory for VM
-        if PCI_devices_status and vm_obj and vcenter_conect:
-            memReserve = vm_obj.config.hardware.memoryMB
-            spec = vim.vm.ConfigSpec()
-            spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
-            task = vm_obj.ReconfigVM_Task(spec=spec)
-            if task:
-                result = self.wait_for_vcenter_task(task, vcenter_conect)
-                self.logger.info("Reserved memmoery {} MB for "\
-                                 "VM VM status: {}".format(str(memReserve),result))
-            else:
-                self.logger.info("Fail to reserved memmoery {} to VM {}".format(
-                                                            str(memReserve),str(vm_obj)))
+            # cloud-init for ssh-key injection
+            if cloud_config:
+                self.cloud_init(vapp,cloud_config)
+
+            # deploy and power on vm
+            self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
+            deploytask = vapp.deploy(powerOn=False)
+            if type(deploytask) is GenericTask:
+                vca.block_until_completed(deploytask)
+
+        # ############# Stub code for SRIOV #################
+        #Add SRIOV
+#         if len(sriov_net_info) > 0:
+#             self.logger.info("Need to add SRIOV adapters {} into VM {}".format(sriov_net_info,
+#                                                                         vmname_andid ))
+#             sriov_status, vm_obj, vcenter_conect = self.add_sriov(vapp_uuid,
+#                                                                   sriov_net_info,
+#                                                                   vmname_andid)
+#             if sriov_status:
+#                 self.logger.info("Added SRIOV {} to VM {}".format(
+#                                                             sriov_net_info,
+#                                                             vmname_andid)
+#                                  )
+#                 reserve_memory = True
+#             else:
+#                 self.logger.info("Fail to add SRIOV {} to VM {}".format(
+#                                                             sriov_net_info,
+#                                                             vmname_andid)
+#                                  )
+
+            # If VM has PCI devices or SRIOV reserve memory for VM
+            if reserve_memory:
+                memReserve = vm_obj.config.hardware.memoryMB
+                spec = vim.vm.ConfigSpec()
+                spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
+                task = vm_obj.ReconfigVM_Task(spec=spec)
+                if task:
+                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                    self.logger.info("Reserved memmoery {} MB for "\
+                                     "VM VM status: {}".format(str(memReserve),result))
+                else:
+                    self.logger.info("Fail to reserved memmoery {} to VM {}".format(
+                                                                str(memReserve),str(vm_obj)))
 
-        self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
-        poweron_task = vapp.poweron()
-        if type(poweron_task) is GenericTask:
-            vca.block_until_completed(poweron_task)
+            self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
+            poweron_task = vapp.poweron()
+            if type(poweron_task) is GenericTask:
+                vca.block_until_completed(poweron_task)
+
+        except Exception as exp :
+            # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
+            self.logger.debug("new_vminstance(): Failed create new vm instance {}".format(name, exp))
+            raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {}".format(name, exp))
 
         # check if vApp deployed and if that the case return vApp UUID otherwise -1
         wait_time = 0
         vapp_uuid = None
         while wait_time <= MAX_WAIT_TIME:
-            vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+            try:
+                vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+            except Exception as exp:
+                raise vimconn.vimconnUnexpectedResponse(
+                        "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                        .format(vmname_andid, exp))
+
             if vapp and vapp.me.deployed:
                 vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
                 break
@@ -1673,39 +1812,6 @@ class vimconnector(vimconn.vimconnector):
 
         self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
 
-        mac_ip_addr={}
-        rheaders = {'Content-Type': 'application/xml'}
-        iso_edges = ['edge-2','edge-3','edge-6','edge-7','edge-8','edge-9','edge-10']
-
-        try:
-            for edge in iso_edges:
-                nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
-                self.logger.debug("refresh_vms_status: NSX Manager url: {}".format(nsx_api_url))
-
-                resp = requests.get(self.nsx_manager + nsx_api_url,
-                                    auth = (self.nsx_user, self.nsx_password),
-                                    verify = False, headers = rheaders)
-
-                if resp.status_code == requests.codes.ok:
-                    dhcp_leases = XmlElementTree.fromstring(resp.text)
-                    for child in dhcp_leases:
-                        if child.tag == 'dhcpLeaseInfo':
-                            dhcpLeaseInfo = child
-                            for leaseInfo in dhcpLeaseInfo:
-                                for elem in leaseInfo:
-                                    if (elem.tag)=='macAddress':
-                                        mac_addr = elem.text
-                                    if (elem.tag)=='ipAddress':
-                                        ip_addr = elem.text
-                                if (mac_addr) is not None:
-                                    mac_ip_addr[mac_addr]= ip_addr
-                    self.logger.debug("NSX Manager DHCP Lease info: mac_ip_addr : {}".format(mac_ip_addr))
-                else:
-                    self.logger.debug("Error occurred while getting DHCP lease info from NSX Manager: {}".format(resp.content))
-        except KeyError:
-            self.logger.debug("Error in response from NSX Manager {}".format(KeyError.message))
-            self.logger.debug(traceback.format_exc())
-
         vca = self.connect()
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed.")
@@ -1715,44 +1821,156 @@ class vimconnector(vimconn.vimconnector):
             raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
 
         vms_dict = {}
+        nsx_edge_list = []
         for vmuuid in vm_list:
             vmname = self.get_namebyvappid(vca, vdc, vmuuid)
             if vmname is not None:
 
-                the_vapp = vca.get_vapp(vdc, vmname)
-                vm_info = the_vapp.get_vms_details()
-                vm_status = vm_info[0]['status']
-                vm_pci_details = self.get_vm_pci_details(vmuuid)
-                vm_info[0].update(vm_pci_details)
+                try:
+                    the_vapp = vca.get_vapp(vdc, vmname)
+                    vm_info = the_vapp.get_vms_details()
+                    vm_status = vm_info[0]['status']
+                    vm_pci_details = self.get_vm_pci_details(vmuuid)
+                    vm_info[0].update(vm_pci_details)
 
-                vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
-                           'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
-                           'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
+                    vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
+                               'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
+                               'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
 
-                # get networks
-                try:
+                    # get networks
                     vm_app_networks = the_vapp.get_vms_network_info()
                     for vapp_network in vm_app_networks:
                         for vm_network in vapp_network:
                             if vm_network['name'] == vmname:
                                 #Assign IP Address based on MAC Address in NSX DHCP lease info
-                                for mac_adres,ip_adres in mac_ip_addr.iteritems():
-                                    if mac_adres == vm_network['mac']:
-                                        vm_network['ip']=ip_adres
+                                if vm_network['ip'] is None:
+                                    if not nsx_edge_list:
+                                        nsx_edge_list = self.get_edge_details()
+                                        if nsx_edge_list is None:
+                                            raise vimconn.vimconnException("refresh_vms_status:"\
+                                                                           "Failed to get edge details from NSX Manager")
+                                    if vm_network['mac'] is not None:
+                                        vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
+
+                                vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
                                 interface = {"mac_address": vm_network['mac'],
-                                             "vim_net_id": self.get_network_id_by_name(vm_network['network_name']),
-                                             "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']),
+                                             "vim_net_id": vm_net_id,
+                                             "vim_interface_id": vm_net_id,
                                              'ip_address': vm_network['ip']}
                                 # interface['vim_info'] = yaml.safe_dump(vm_network)
                                 vm_dict["interfaces"].append(interface)
                     # add a vm to vm dict
                     vms_dict.setdefault(vmuuid, vm_dict)
-                except KeyError:
-                    self.logger.debug("Error in respond {}".format(KeyError.message))
+                except Exception as exp:
+                    self.logger.debug("Error in response {}".format(exp))
                     self.logger.debug(traceback.format_exc())
 
         return vms_dict
 
+
+    def get_edge_details(self):
+        """Get the NSX edge list from NSX Manager
+           Returns list of NSX edges
+        """
+        edge_list = []
+        rheaders = {'Content-Type': 'application/xml'}
+        nsx_api_url = '/api/4.0/edges'
+
+        self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
+
+        try:
+            resp = requests.get(self.nsx_manager + nsx_api_url,
+                                auth = (self.nsx_user, self.nsx_password),
+                                verify = False, headers = rheaders)
+            if resp.status_code == requests.codes.ok:
+                paged_Edge_List = XmlElementTree.fromstring(resp.text)
+                for edge_pages in paged_Edge_List:
+                    if edge_pages.tag == 'edgePage':
+                        for edge_summary in edge_pages:
+                            if edge_summary.tag == 'pagingInfo':
+                                for element in edge_summary:
+                                    if element.tag == 'totalCount' and element.text == '0':
+                                        raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
+                                                                       .format(self.nsx_manager))
+
+                            if edge_summary.tag == 'edgeSummary':
+                                for element in edge_summary:
+                                    if element.tag == 'id':
+                                        edge_list.append(element.text)
+                    else:
+                        raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
+                                                       .format(self.nsx_manager))
+
+                if not edge_list:
+                    raise vimconn.vimconnException("get_edge_details: "\
+                                                   "No NSX edge details found: {}"
+                                                   .format(self.nsx_manager))
+                else:
+                    self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
+                    return edge_list
+            else:
+                self.logger.debug("get_edge_details: "
+                                  "Failed to get NSX edge details from NSX Manager: {}"
+                                  .format(resp.content))
+                return None
+
+        except Exception as exp:
+            self.logger.debug("get_edge_details: "\
+                              "Failed to get NSX edge details from NSX Manager: {}"
+                              .format(exp))
+            raise vimconn.vimconnException("get_edge_details: "\
+                                           "Failed to get NSX edge details from NSX Manager: {}"
+                                           .format(exp))
+
+
+    def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
+        """Get IP address details from NSX edges, using the MAC address
+           PARAMS: nsx_edges : List of NSX edges
+                   mac_address : Find IP address corresponding to this MAC address
+           Returns: IP address corrresponding to the provided MAC address
+        """
+
+        ip_addr = None
+        rheaders = {'Content-Type': 'application/xml'}
+
+        self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
+
+        try:
+            for edge in nsx_edges:
+                nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
+
+                resp = requests.get(self.nsx_manager + nsx_api_url,
+                                    auth = (self.nsx_user, self.nsx_password),
+                                    verify = False, headers = rheaders)
+
+                if resp.status_code == requests.codes.ok:
+                    dhcp_leases = XmlElementTree.fromstring(resp.text)
+                    for child in dhcp_leases:
+                        if child.tag == 'dhcpLeaseInfo':
+                            dhcpLeaseInfo = child
+                            for leaseInfo in dhcpLeaseInfo:
+                                for elem in leaseInfo:
+                                    if (elem.tag)=='macAddress':
+                                        edge_mac_addr = elem.text
+                                    if (elem.tag)=='ipAddress':
+                                        ip_addr = elem.text
+                                if edge_mac_addr is not None:
+                                    if edge_mac_addr == mac_address:
+                                        self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
+                                                          .format(ip_addr, mac_address,edge))
+                                        return ip_addr
+                else:
+                    self.logger.debug("get_ipaddr_from_NSXedge: "\
+                                      "Error occurred while getting DHCP lease info from NSX Manager: {}"
+                                      .format(resp.content))
+
+            self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
+            return None
+
+        except XmlElementTree.ParseError as Err:
+            self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
+
+
     def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
         """Send and action over a VM instance from VIM
         Returns the vm_id if the action was successfully sent to the VIM"""
@@ -1782,54 +2000,53 @@ class vimconnector(vimconn.vimconnector):
             if "start" in action_dict:
                 vm_info = the_vapp.get_vms_details()
                 vm_status = vm_info[0]['status']
-                self.logger.info("Power on vApp: vm_status:{} {}".format(type(vm_status),vm_status))
+                self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
                 if vm_status == "Suspended" or vm_status == "Powered off":
                     power_on_task = the_vapp.poweron()
-                    if power_on_task is not None and type(power_on_task) is GenericTask:
-                        result = vca.block_until_completed(power_on_task)
-                        if result:
-                            self.logger.info("action_vminstance: Powered on vApp: {}".format(vapp_name))
-                        else:
-                            self.logger.info("action_vminstance: Failed to power on vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Wait for vApp {} to power on".format(vapp_name))
-            elif "rebuild" in action_dict:
-                self.logger.info("action_vminstance: Rebuilding vApp: {}".format(vapp_name))
-                power_on_task = the_vapp.deploy(powerOn=True)
-                if type(power_on_task) is GenericTask:
                     result = vca.block_until_completed(power_on_task)
-                    if result:
-                        self.logger.info("action_vminstance: Rebuilt vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Failed to rebuild vApp: {}".format(vapp_name))
-                else:
-                    self.logger.info("action_vminstance: Wait for vApp rebuild {} to power on".format(vapp_name))
+                    self.instance_actions_result("start", result, vapp_name)
+            elif "rebuild" in action_dict:
+                self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
+                rebuild_task = the_vapp.deploy(powerOn=True)
+                result = vca.block_until_completed(rebuild_task)
+                self.instance_actions_result("rebuild", result, vapp_name)
             elif "pause" in action_dict:
-                pass
-                ## server.pause()
+                self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
+                pause_task = the_vapp.undeploy(action='suspend')
+                result = vca.block_until_completed(pause_task)
+                self.instance_actions_result("pause", result, vapp_name)
             elif "resume" in action_dict:
-                pass
-                ## server.resume()
+                self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
+                power_task = the_vapp.poweron()
+                result = vca.block_until_completed(power_task)
+                self.instance_actions_result("resume", result, vapp_name)
             elif "shutoff" in action_dict or "shutdown" in action_dict:
+                action_name , value = action_dict.items()[0]
+                self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
                 power_off_task = the_vapp.undeploy(action='powerOff')
-                if type(power_off_task) is GenericTask:
-                    result = vca.block_until_completed(power_off_task)
-                    if result:
-                        self.logger.info("action_vminstance: Powered off vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Failed to power off vApp: {}".format(vapp_name))
+                result = vca.block_until_completed(power_off_task)
+                if action_name == "shutdown":
+                    self.instance_actions_result("shutdown", result, vapp_name)
                 else:
-                    self.logger.info("action_vminstance: Wait for vApp {} to power off".format(vapp_name))
+                    self.instance_actions_result("shutoff", result, vapp_name)
             elif "forceOff" in action_dict:
-                the_vapp.reset()
-            elif "terminate" in action_dict:
-                the_vapp.delete()
-            # elif "createImage" in action_dict:
-            #     server.create_image()
+                result = the_vapp.undeploy(action='force')
+                self.instance_actions_result("forceOff", result, vapp_name)
+            elif "reboot" in action_dict:
+                self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
+                reboot_task = the_vapp.reboot()
             else:
-                pass
-        except:
-            pass
+                raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
+            return vm__vim_uuid
+        except Exception as exp :
+            self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
+            raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
+
+    def instance_actions_result(self, action, result, vapp_name):
+        if result:
+            self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
+        else:
+            self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
 
     def get_vminstance_console(self, vm_id, console_type="vnc"):
         """
@@ -2303,8 +2520,8 @@ class vimconnector(vimconn.vimconnector):
         if network_uuid is None:
             return network_uuid
 
-        content = self.get_network_action(network_uuid=network_uuid)
         try:
+            content = self.get_network_action(network_uuid=network_uuid)
             vm_list_xmlroot = XmlElementTree.fromstring(content)
 
             network_configuration['status'] = vm_list_xmlroot.get("status")
@@ -2320,8 +2537,9 @@ class vimconnector(vimconn.vimconnector):
                         if tagKey != "":
                             network_configuration[tagKey] = configuration.text.strip()
             return network_configuration
-        except:
-            pass
+        except Exception as exp :
+            self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
+            raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
 
         return network_configuration
 
@@ -2387,7 +2605,7 @@ class vimconnector(vimconn.vimconnector):
             vm_list_xmlroot = XmlElementTree.fromstring(content)
             vcd_uuid = vm_list_xmlroot.get('id').split(":")
             if len(vcd_uuid) == 4:
-                self.logger.info("Create new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
+                self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
                 return vcd_uuid[3]
         except:
             self.logger.debug("Failed create network {}".format(network_name))
@@ -2473,26 +2691,50 @@ class vimconnector(vimconn.vimconnector):
                 except:
                     return None
 
-            #Configure IP profile of the network
-            ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
-
-            gateway_address=ip_profile['gateway_address']
-            dhcp_count=int(ip_profile['dhcp_count'])
-            subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+            try:
+                #Configure IP profile of the network
+                ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
 
-            if ip_profile['dhcp_enabled']==True:
-                dhcp_enabled='true'
-            else:
-                dhcp_enabled='false'
-            dhcp_start_address=ip_profile['dhcp_start_address']
+                if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
+                    subnet_rand = random.randint(0, 255)
+                    ip_base = "192.168.{}.".format(subnet_rand)
+                    ip_profile['subnet_address'] = ip_base + "0/24"
+                else:
+                    ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
+
+                if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
+                    ip_profile['gateway_address']=ip_base + "1"
+                if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
+                    ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
+                if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
+                    ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
+                if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
+                    ip_profile['dhcp_start_address']=ip_base + "3"
+                if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
+                    ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
+                if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
+                    ip_profile['dns_address']=ip_base + "2"
+
+                gateway_address=ip_profile['gateway_address']
+                dhcp_count=int(ip_profile['dhcp_count'])
+                subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+
+                if ip_profile['dhcp_enabled']==True:
+                    dhcp_enabled='true'
+                else:
+                    dhcp_enabled='false'
+                dhcp_start_address=ip_profile['dhcp_start_address']
 
-            #derive dhcp_end_address from dhcp_start_address & dhcp_count
-            end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
-            end_ip_int += dhcp_count - 1
-            dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
+                #derive dhcp_end_address from dhcp_start_address & dhcp_count
+                end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
+                end_ip_int += dhcp_count - 1
+                dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
 
-            ip_version=ip_profile['ip_version']
-            dns_address=ip_profile['dns_address']
+                ip_version=ip_profile['ip_version']
+                dns_address=ip_profile['dns_address']
+            except KeyError as exp:
+                self.logger.debug("Create Network REST: Key error {}".format(exp))
+                raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
 
             # either use client provided UUID or search for a first available
             #  if both are not defined we return none
@@ -2500,64 +2742,36 @@ class vimconnector(vimconn.vimconnector):
                 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
                 add_vdc_rest_url = ''.join(url_list)
 
-            if net_type=='ptp':
-                fence_mode="isolated"
-                isshared='false'
-                is_inherited='false'
-                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
-                                <Description>Openmano created</Description>
-                                        <Configuration>
-                                            <IpScopes>
-                                                <IpScope>
-                                                    <IsInherited>{1:s}</IsInherited>
-                                                    <Gateway>{2:s}</Gateway>
-                                                    <Netmask>{3:s}</Netmask>
-                                                    <Dns1>{4:s}</Dns1>
-                                                    <IsEnabled>{5:s}</IsEnabled>
-                                                    <IpRanges>
-                                                        <IpRange>
-                                                            <StartAddress>{6:s}</StartAddress>
-                                                            <EndAddress>{7:s}</EndAddress>
-                                                        </IpRange>
-                                                    </IpRanges>
-                                                </IpScope>
-                                            </IpScopes>
-                                            <FenceMode>{8:s}</FenceMode>
-                                        </Configuration>
-                                        <IsShared>{9:s}</IsShared>
-                            </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
-                                                        subnet_address, dns_address, dhcp_enabled,
-                                                        dhcp_start_address, dhcp_end_address, fence_mode, isshared)
-
-            else:
-                fence_mode="bridged"
-                is_inherited='false'
-                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
-                                <Description>Openmano created</Description>
-                                        <Configuration>
-                                            <IpScopes>
-                                                <IpScope>
-                                                    <IsInherited>{1:s}</IsInherited>
-                                                    <Gateway>{2:s}</Gateway>
-                                                    <Netmask>{3:s}</Netmask>
-                                                    <Dns1>{4:s}</Dns1>
-                                                    <IsEnabled>{5:s}</IsEnabled>
-                                                    <IpRanges>
-                                                        <IpRange>
-                                                            <StartAddress>{6:s}</StartAddress>
-                                                            <EndAddress>{7:s}</EndAddress>
-                                                        </IpRange>
-                                                    </IpRanges>
-                                                </IpScope>
-                                            </IpScopes>
-                                            <ParentNetwork href="{8:s}"/>
-                                            <FenceMode>{9:s}</FenceMode>
-                                        </Configuration>
-                                        <IsShared>{10:s}</IsShared>
-                            </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
-                                                        subnet_address, dns_address, dhcp_enabled,
-                                                        dhcp_start_address, dhcp_end_address, available_networks,
-                                                        fence_mode, isshared)
+            #Creating all networks as Direct Org VDC type networks.
+            #Unused in case of Underlay (data/ptp) network interface.
+            fence_mode="bridged"
+            is_inherited='false'
+            data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+                            <Description>Openmano created</Description>
+                                    <Configuration>
+                                        <IpScopes>
+                                            <IpScope>
+                                                <IsInherited>{1:s}</IsInherited>
+                                                <Gateway>{2:s}</Gateway>
+                                                <Netmask>{3:s}</Netmask>
+                                                <Dns1>{4:s}</Dns1>
+                                                <IsEnabled>{5:s}</IsEnabled>
+                                                <IpRanges>
+                                                    <IpRange>
+                                                        <StartAddress>{6:s}</StartAddress>
+                                                        <EndAddress>{7:s}</EndAddress>
+                                                    </IpRange>
+                                                </IpRanges>
+                                            </IpScope>
+                                        </IpScopes>
+                                        <ParentNetwork href="{8:s}"/>
+                                        <FenceMode>{9:s}</FenceMode>
+                                    </Configuration>
+                                    <IsShared>{10:s}</IsShared>
+                        </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
+                                                    subnet_address, dns_address, dhcp_enabled,
+                                                    dhcp_start_address, dhcp_end_address, available_networks,
+                                                    fence_mode, isshared)
 
             headers = vca.vcloud_session.get_vcloud_headers()
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
@@ -2569,8 +2783,8 @@ class vimconnector(vimconn.vimconnector):
                                      logger=vca.logger)
 
                 if response.status_code != 201:
-                    self.logger.debug("Create Network POST REST API call failed. Return status code {}"
-                                      .format(response.status_code))
+                    self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
+                                      .format(response.status_code,response.content))
                 else:
                     network = networkType.parseString(response.content, True)
                     create_nw_task = network.get_Tasks().get_Task()[0]
@@ -2578,7 +2792,7 @@ class vimconnector(vimconn.vimconnector):
                     # if we all ok we respond with content after network creation completes
                     # otherwise by default return None
                     if create_nw_task is not None:
-                        self.logger.debug("Create Network REST : Waiting for Nw creation complete")
+                        self.logger.debug("Create Network REST : Waiting for Network creation complete")
                         status = vca.block_until_completed(create_nw_task)
                         if status:
                             return response.content
@@ -3081,33 +3295,16 @@ class vimconnector(vimconn.vimconnector):
                 vcenter_conect object
         """
         vm_obj = None
-        vcenter_conect = None
         self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
-        try:
-            vm_vcenter_info = self.get_vm_vcenter_info(vapp_uuid)
-        except Exception as exp:
-            self.logger.error("Error occurred while getting vCenter infromationn"\
-                             " for VM : {}".format(exp))
-            raise vimconn.vimconnException(message=exp)
+        vcenter_conect, content = self.get_vcenter_content()
+        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
 
-        if vm_vcenter_info["vm_moref_id"]:
-            context = None
-            if hasattr(ssl, '_create_unverified_context'):
-                context = ssl._create_unverified_context()
+        if vm_moref_id:
             try:
                 no_of_pci_devices = len(pci_devices)
                 if no_of_pci_devices > 0:
-                    vcenter_conect = SmartConnect(
-                                            host=vm_vcenter_info["vm_vcenter_ip"],
-                                            user=vm_vcenter_info["vm_vcenter_user"],
-                                            pwd=vm_vcenter_info["vm_vcenter_password"],
-                                            port=int(vm_vcenter_info["vm_vcenter_port"]),
-                                            sslContext=context)
-                    atexit.register(Disconnect, vcenter_conect)
-                    content = vcenter_conect.RetrieveContent()
-
                     #Get VM and its host
-                    host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
                     self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
                     if host_obj and vm_obj:
                         #get PCI devies from host on which vapp is currently installed
@@ -3376,7 +3573,7 @@ class vimconnector(vimconn.vimconnector):
                                                                              exp))
         return task
 
-    def get_vm_vcenter_info(self , vapp_uuid):
+    def get_vm_vcenter_info(self):
         """
         Method to get details of vCenter and vm
 
@@ -3409,16 +3606,8 @@ class vimconnector(vimconn.vimconnector):
         else:
             raise vimconn.vimconnException(message="vCenter user password is not provided."\
                                            " Please provide vCenter user password while attaching datacenter to tenant in --config")
-        try:
-            vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
-            if vm_details and "vm_vcenter_info" in vm_details:
-                vm_vcenter_info["vm_moref_id"] = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
-
-            return vm_vcenter_info
 
-        except Exception as exp:
-            self.logger.error("Error occurred while getting vCenter infromationn"\
-                             " for VM : {}".format(exp))
+        return vm_vcenter_info
 
 
     def get_vm_pci_details(self, vmuuid):
@@ -3434,23 +3623,12 @@ class vimconnector(vimconn.vimconnector):
         """
         vm_pci_devices_info = {}
         try:
-            vm_vcenter_info = self.get_vm_vcenter_info(vmuuid)
-            if vm_vcenter_info["vm_moref_id"]:
-                context = None
-                if hasattr(ssl, '_create_unverified_context'):
-                    context = ssl._create_unverified_context()
-                vcenter_conect = SmartConnect(host=vm_vcenter_info["vm_vcenter_ip"],
-                                        user=vm_vcenter_info["vm_vcenter_user"],
-                                        pwd=vm_vcenter_info["vm_vcenter_password"],
-                                        port=int(vm_vcenter_info["vm_vcenter_port"]),
-                                        sslContext=context
-                                    )
-                atexit.register(Disconnect, vcenter_conect)
-                content = vcenter_conect.RetrieveContent()
-
+            vcenter_conect, content = self.get_vcenter_content()
+            vm_moref_id = self.get_vm_moref_id(vmuuid)
+            if vm_moref_id:
                 #Get VM and its host
                 if content:
-                    host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
                     if host_obj and vm_obj:
                         vm_pci_devices_info["host_name"]= host_obj.name
                         vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
@@ -3469,3 +3647,1144 @@ class vimconnector(vimconn.vimconnector):
                              " for VM : {}".format(exp))
             raise vimconn.vimconnException(message=exp)
 
+    def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
+        """
+            Method to add network adapter type to vm
+            Args :
+                network_name - name of network
+                primary_nic_index - int value for primary nic index
+                nicIndex - int value for nic index
+                nic_type - specify model name to which add to vm
+            Returns:
+                None
+        """
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
+
+        try:
+            ip_address = None
+            floating_ip = False
+            if 'floating_ip' in net: floating_ip = net['floating_ip']
+
+            # Stub for ip_address feature
+            if 'ip_address' in net: ip_address = net['ip_address']
+
+            if floating_ip:
+                allocation_mode = "POOL"
+            elif ip_address:
+                allocation_mode = "MANUAL"
+            else:
+                allocation_mode = "DHCP"
+
+            if not nic_type:
+                for vms in vapp._get_vms():
+                    vm_id = (vms.id).split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
+
+                    response = Http.get(url=url_rest_call,
+                                        headers=vca.vcloud_session.get_vcloud_headers(),
+                                        verify=vca.verify,
+                                        logger=vca.logger)
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {}".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                         "network connection section")
+
+                    data = response.content
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                                         allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
+                    else:
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                          allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
+
+                    headers = vca.vcloud_session.get_vcloud_headers()
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+                    response = Http.put(url=url_rest_call, headers=headers, data=data,
+                                                                   verify=vca.verify,
+                                                                   logger=vca.logger)
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {} ".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                            "network connection section")
+                    else:
+                        nic_task = taskType.parseString(response.content, True)
+                        if isinstance(nic_task, GenericTask):
+                            vca.block_until_completed(nic_task)
+                            self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
+                                                               "default NIC type".format(vm_id))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
+                                                              "connect NIC type".format(vm_id))
+            else:
+                for vms in vapp._get_vms():
+                    vm_id = (vms.id).split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
+
+                    response = Http.get(url=url_rest_call,
+                                        headers=vca.vcloud_session.get_vcloud_headers(),
+                                        verify=vca.verify,
+                                        logger=vca.logger)
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                        "network connection section")
+                    data = response.content
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                <NetworkAdapterType>{}</NetworkAdapterType>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                               allocation_mode, nic_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
+                    else:
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    <NetworkAdapterType>{}</NetworkAdapterType>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                allocation_mode, nic_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
+
+                    headers = vca.vcloud_session.get_vcloud_headers()
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+                    response = Http.put(url=url_rest_call, headers=headers, data=data,
+                                                                   verify=vca.verify,
+                                                                   logger=vca.logger)
+
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                           "network connection section")
+                    else:
+                        nic_task = taskType.parseString(response.content, True)
+                        if isinstance(nic_task, GenericTask):
+                            vca.block_until_completed(nic_task)
+                            self.logger.info("add_network_adapter_to_vms(): VM {} "\
+                                               "conneced to NIC type {}".format(vm_id, nic_type))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} "\
+                                               "failed to connect NIC type {}".format(vm_id, nic_type))
+        except Exception as exp:
+            self.logger.error("add_network_adapter_to_vms() : exception occurred "\
+                                               "while adding Network adapter")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def set_numa_affinity(self, vmuuid, paired_threads_id):
+        """
+            Method to assign numa affinity in vm configuration parammeters
+            Args :
+                vmuuid - vm uuid
+                paired_threads_id - one or more virtual processor
+                                    numbers
+            Returns:
+                return if True
+        """
+        try:
+            vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
+            if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
+                context = None
+                if hasattr(ssl, '_create_unverified_context'):
+                    context = ssl._create_unverified_context()
+                    vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
+                                  pwd=self.passwd, port=int(vm_vcenter_port),
+                                  sslContext=context)
+                    atexit.register(Disconnect, vcenter_conect)
+                    content = vcenter_conect.RetrieveContent()
+
+                    host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
+                    if vm_obj:
+                        config_spec = vim.vm.ConfigSpec()
+                        config_spec.extraConfig = []
+                        opt = vim.option.OptionValue()
+                        opt.key = 'numa.nodeAffinity'
+                        opt.value = str(paired_threads_id)
+                        config_spec.extraConfig.append(opt)
+                        task = vm_obj.ReconfigVM_Task(config_spec)
+                        if task:
+                            result = self.wait_for_vcenter_task(task, vcenter_conect)
+                            extra_config = vm_obj.config.extraConfig
+                            flag = False
+                            for opts in extra_config:
+                                if 'numa.nodeAffinity' in opts.key:
+                                    flag = True
+                                    self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
+                                                             "value {} for vm {}".format(opt.value, vm_obj))
+                            if flag:
+                                return
+                    else:
+                        self.logger.error("set_numa_affinity: Failed to assign numa affinity")
+        except Exception as exp:
+            self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
+                                                       "for VM {} : {}".format(vm_obj, vm_moref_id))
+            raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
+                                                                           "affinity".format(exp))
+
+
+
+    def cloud_init(self, vapp, cloud_config):
+        """
+        Method to inject ssh-key
+        vapp - vapp object
+        cloud_config a dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) string is a text script to be passed directly to cloud-init
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
+        """
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
+
+        try:
+            if isinstance(cloud_config, dict):
+                key_pairs = []
+                userdata = []
+                if "key-pairs" in cloud_config:
+                    key_pairs = cloud_config["key-pairs"]
+
+                if "users" in cloud_config:
+                    userdata = cloud_config["users"]
+
+            for key in key_pairs:
+                for user in userdata:
+                    if 'name' in user: user_name = user['name']
+                    if 'key-pairs' in user and len(user['key-pairs']) > 0:
+                        for user_key in user['key-pairs']:
+                            customize_script = """
+                        #!/bin/bash
+                        echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+                        if [ "$1" = "precustomization" ];then
+                            echo performing precustomization tasks   on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+                            if [ ! -d /root/.ssh ];then
+                                mkdir /root/.ssh
+                                chown root:root /root/.ssh
+                                chmod 700 /root/.ssh
+                                touch /root/.ssh/authorized_keys
+                                chown root:root /root/.ssh/authorized_keys
+                                chmod 600 /root/.ssh/authorized_keys
+                                # make centos with selinux happy
+                                which restorecon && restorecon -Rv /root/.ssh
+                                echo '{key}' >> /root/.ssh/authorized_keys
+                            else
+                                touch /root/.ssh/authorized_keys
+                                chown root:root /root/.ssh/authorized_keys
+                                chmod 600 /root/.ssh/authorized_keys
+                                echo '{key}' >> /root/.ssh/authorized_keys
+                            fi
+                            if [ -d /home/{user_name} ];then
+                                if [ ! -d /home/{user_name}/.ssh ];then
+                                    mkdir /home/{user_name}/.ssh
+                                    chown {user_name}:{user_name} /home/{user_name}/.ssh
+                                    chmod 700 /home/{user_name}/.ssh
+                                    touch /home/{user_name}/.ssh/authorized_keys
+                                    chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+                                    chmod 600 /home/{user_name}/.ssh/authorized_keys
+                                    # make centos with selinux happy
+                                    which restorecon && restorecon -Rv /home/{user_name}/.ssh
+                                    echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+                                else
+                                    touch /home/{user_name}/.ssh/authorized_keys
+                                    chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+                                    chmod 600 /home/{user_name}/.ssh/authorized_keys
+                                    echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+                                fi
+                            fi
+                        fi""".format(key=key, user_name=user_name, user_key=user_key)
+
+                            for vm in vapp._get_vms():
+                                vm_name = vm.name
+                                task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
+                                if isinstance(task, GenericTask):
+                                    vca.block_until_completed(task)
+                                    self.logger.info("cloud_init : customized guest os task "\
+                                                        "completed for VM {}".format(vm_name))
+                                else:
+                                    self.logger.error("cloud_init : task for customized guest os"\
+                                                               "failed for VM {}".format(vm_name))
+        except Exception as exp:
+            self.logger.error("cloud_init : exception occurred while injecting "\
+                                                                       "ssh-key")
+            raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
+                                                               "ssh-key".format(exp))
+
+
+    def add_new_disk(self, vca, vapp_uuid, disk_size):
+        """
+            Method to create an empty vm disk
+
+            Args:
+                vapp_uuid - is vapp identifier.
+                disk_size - size of disk to be created in GB
+
+            Returns:
+                None
+        """
+        status = False
+        vm_details = None
+        try:
+            #Disk size in GB, convert it into MB
+            if disk_size is not None:
+                disk_size_mb = int(disk_size) * 1024
+                vm_details = self.get_vapp_details_rest(vapp_uuid)
+
+            if vm_details and "vm_virtual_hardware" in vm_details:
+                self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+                disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+                status = self.add_new_disk_rest(vca, disk_href, disk_size_mb)
+
+        except Exception as exp:
+            msg = "Error occurred while creating new disk {}.".format(exp)
+            self.rollback_newvm(vapp_uuid, msg)
+
+        if status:
+            self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+        else:
+            #If failed to add disk, delete VM
+            msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def add_new_disk_rest(self, vca, disk_href, disk_size_mb):
+        """
+        Retrives vApp Disks section & add new empty disk
+
+        Args:
+            disk_href: Disk section href to addd disk
+            disk_size_mb: Disk size in MB
+
+            Returns: Status of add new disk task
+        """
+        status = False
+        if vca.vcloud_session and vca.vcloud_session.organization:
+            response = Http.get(url=disk_href,
+                                headers=vca.vcloud_session.get_vcloud_headers(),
+                                verify=vca.verify,
+                                logger=vca.logger)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
+                              .format(disk_href, response.status_code))
+            return status
+        try:
+            #Find but type & max of instance IDs assigned to disks
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+            instance_id = 0
+            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                if item.find("rasd:Description",namespaces).text == "Hard disk":
+                    inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
+                    if inst_id > instance_id:
+                        instance_id = inst_id
+                        disk_item = item.find("rasd:HostResource" ,namespaces)
+                        bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
+                        bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
+
+            instance_id = instance_id + 1
+            new_item =   """<Item>
+                                <rasd:Description>Hard disk</rasd:Description>
+                                <rasd:ElementName>New disk</rasd:ElementName>
+                                <rasd:HostResource
+                                    xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
+                                    vcloud:capacity="{}"
+                                    vcloud:busSubType="{}"
+                                    vcloud:busType="{}"></rasd:HostResource>
+                                <rasd:InstanceID>{}</rasd:InstanceID>
+                                <rasd:ResourceType>17</rasd:ResourceType>
+                            </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
+
+            new_data = response.content
+            #Add new item at the bottom
+            new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
+
+            # Send PUT request to modify virtual hardware section with new disk
+            headers = vca.vcloud_session.get_vcloud_headers()
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+            response = Http.put(url=disk_href,
+                                data=new_data,
+                                headers=headers,
+                                verify=vca.verify, logger=self.logger)
+
+            if response.status_code != 202:
+                self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
+                                  .format(disk_href, response.status_code, response.content))
+            else:
+                add_disk_task = taskType.parseString(response.content, True)
+                if type(add_disk_task) is GenericTask:
+                    status = vca.block_until_completed(add_disk_task)
+                    if not status:
+                        self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
+
+        except Exception as exp:
+            self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
+
+        return status
+
+
+    def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
+        """
+            Method to add existing disk to vm
+            Args :
+                catalogs - List of VDC catalogs
+                image_id - Catalog ID
+                template_name - Name of template in catalog
+                vapp_uuid - UUID of vApp
+            Returns:
+                None
+        """
+        disk_info = None
+        vcenter_conect, content = self.get_vcenter_content()
+        #find moref-id of vm in image
+        catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
+                                                         image_id=image_id,
+                                                        )
+
+        if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
+            if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
+                catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
+                if catalog_vm_moref_id:
+                    self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
+                    host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
+                    if catalog_vm_obj:
+                        #find existing disk
+                        disk_info = self.find_disk(catalog_vm_obj)
+                    else:
+                        exp_msg = "No VM with image id {} found".format(image_id)
+                        self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+        else:
+            exp_msg = "No Image found with image ID {} ".format(image_id)
+            self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+
+        if disk_info:
+            self.logger.info("Existing disk_info : {}".format(disk_info))
+            #get VM
+            vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+            host, vm_obj = self.get_vm_obj(content, vm_moref_id)
+            if vm_obj:
+                status = self.add_disk(vcenter_conect=vcenter_conect,
+                                       vm=vm_obj,
+                                       disk_info=disk_info,
+                                       size=size,
+                                       vapp_uuid=vapp_uuid
+                                       )
+            if status:
+                self.logger.info("Disk from image id {} added to {}".format(image_id,
+                                                                            vm_obj.config.name)
+                                 )
+        else:
+            msg = "No disk found with image id {} to add in VM {}".format(
+                                                            image_id,
+                                                            vm_obj.config.name)
+            self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
+
+
+    def find_disk(self, vm_obj):
+        """
+         Method to find details of existing disk in VM
+            Args :
+                vm_obj - vCenter object of VM
+                image_id - Catalog ID
+            Returns:
+                disk_info : dict of disk details
+        """
+        disk_info = {}
+        if vm_obj:
+            try:
+                devices = vm_obj.config.hardware.device
+                for device in devices:
+                    if type(device) is vim.vm.device.VirtualDisk:
+                        if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
+                            disk_info["full_path"] = device.backing.fileName
+                            disk_info["datastore"] = device.backing.datastore
+                            disk_info["capacityKB"] = device.capacityInKB
+                            break
+            except Exception as exp:
+                self.logger.error("find_disk() : exception occurred while "\
+                                  "getting existing disk details :{}".format(exp))
+        return disk_info
+
+
+    def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
+        """
+         Method to add existing disk in VM
+            Args :
+                vcenter_conect - vCenter content object
+                vm - vCenter vm object
+                disk_info : dict of disk details
+            Returns:
+                status : status of add disk task
+        """
+        datastore = disk_info["datastore"] if "datastore" in disk_info else None
+        fullpath = disk_info["full_path"] if "full_path" in disk_info else None
+        capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
+        if size is not None:
+            #Convert size from GB to KB
+            sizeKB = int(size) * 1024 * 1024
+            #compare size of existing disk and user given size.Assign whicherver is greater
+            self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
+                                                                    sizeKB, capacityKB))
+            if sizeKB > capacityKB:
+                capacityKB = sizeKB
+
+        if datastore and fullpath and capacityKB:
+            try:
+                spec = vim.vm.ConfigSpec()
+                # get all disks on a VM, set unit_number to the next available
+                unit_number = 0
+                for dev in vm.config.hardware.device:
+                    if hasattr(dev.backing, 'fileName'):
+                        unit_number = int(dev.unitNumber) + 1
+                        # unit_number 7 reserved for scsi controller
+                        if unit_number == 7:
+                            unit_number += 1
+                    if isinstance(dev, vim.vm.device.VirtualDisk):
+                        #vim.vm.device.VirtualSCSIController
+                        controller_key = dev.controllerKey
+
+                self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
+                                                                    unit_number, controller_key))
+                # add disk here
+                dev_changes = []
+                disk_spec = vim.vm.device.VirtualDeviceSpec()
+                disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+                disk_spec.device = vim.vm.device.VirtualDisk()
+                disk_spec.device.backing = \
+                    vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+                disk_spec.device.backing.thinProvisioned = True
+                disk_spec.device.backing.diskMode = 'persistent'
+                disk_spec.device.backing.datastore  = datastore
+                disk_spec.device.backing.fileName  = fullpath
+
+                disk_spec.device.unitNumber = unit_number
+                disk_spec.device.capacityInKB = capacityKB
+                disk_spec.device.controllerKey = controller_key
+                dev_changes.append(disk_spec)
+                spec.deviceChange = dev_changes
+                task = vm.ReconfigVM_Task(spec=spec)
+                status = self.wait_for_vcenter_task(task, vcenter_conect)
+                return status
+            except Exception as exp:
+                exp_msg = "add_disk() : exception {} occurred while adding disk "\
+                          "{} to vm {}".format(exp,
+                                               fullpath,
+                                               vm.config.name)
+                self.rollback_newvm(vapp_uuid, exp_msg)
+        else:
+            msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def get_vcenter_content(self):
+        """
+         Get the vsphere content object
+        """
+        try:
+            vm_vcenter_info = self.get_vm_vcenter_info()
+        except Exception as exp:
+            self.logger.error("Error occurred while getting vCenter infromationn"\
+                             " for VM : {}".format(exp))
+            raise vimconn.vimconnException(message=exp)
+
+        context = None
+        if hasattr(ssl, '_create_unverified_context'):
+            context = ssl._create_unverified_context()
+
+        vcenter_conect = SmartConnect(
+                    host=vm_vcenter_info["vm_vcenter_ip"],
+                    user=vm_vcenter_info["vm_vcenter_user"],
+                    pwd=vm_vcenter_info["vm_vcenter_password"],
+                    port=int(vm_vcenter_info["vm_vcenter_port"]),
+                    sslContext=context
+                )
+        atexit.register(Disconnect, vcenter_conect)
+        content = vcenter_conect.RetrieveContent()
+        return vcenter_conect, content
+
+
+    def get_vm_moref_id(self, vapp_uuid):
+        """
+        Get the moref_id of given VM
+        """
+        try:
+            if vapp_uuid:
+                vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
+                if vm_details and "vm_vcenter_info" in vm_details:
+                    vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
+
+            return vm_moref_id
+
+        except Exception as exp:
+            self.logger.error("Error occurred while getting VM moref ID "\
+                             " for VM : {}".format(exp))
+            return None
+
+
+    def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
+        """
+            Method to get vApp template details
+                Args :
+                    catalogs - list of VDC catalogs
+                    image_id - Catalog ID to find
+                    template_name : template name in catalog
+                Returns:
+                    parsed_respond : dict of vApp tempalte details
+        """
+        parsed_response = {}
+
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+        try:
+            catalog = self.get_catalog_obj(image_id, catalogs)
+            if catalog:
+                template_name = self.get_catalogbyid(image_id, catalogs)
+                catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
+                if len(catalog_items) == 1:
+                    response = Http.get(catalog_items[0].get_href(),
+                                        headers=vca.vcloud_session.get_vcloud_headers(),
+                                        verify=vca.verify,
+                                        logger=vca.logger)
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    vapp_tempalte_href = entity.get("href")
+                    #get vapp details and parse moref id
+
+                    namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                                  'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                                  'vmw': 'http://www.vmware.com/schema/ovf',
+                                  'vm': 'http://www.vmware.com/vcloud/v1.5',
+                                  'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                                  'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
+                                  'xmlns':"http://www.vmware.com/vcloud/v1.5"
+                                }
+
+                    if vca.vcloud_session and vca.vcloud_session.organization:
+                        response = Http.get(url=vapp_tempalte_href,
+                                            headers=vca.vcloud_session.get_vcloud_headers(),
+                                            verify=vca.verify,
+                                            logger=vca.logger
+                                            )
+
+                        if response.status_code != requests.codes.ok:
+                            self.logger.debug("REST API call {} failed. Return status code {}".format(
+                                                vapp_tempalte_href, response.status_code))
+
+                        else:
+                            xmlroot_respond = XmlElementTree.fromstring(response.content)
+                            children_section = xmlroot_respond.find('vm:Children/', namespaces)
+                            if children_section is not None:
+                                vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                            if vCloud_extension_section is not None:
+                                vm_vcenter_info = {}
+                                vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+                                vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                                if vmext is not None:
+                                    vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                                parsed_response["vm_vcenter_info"]= vm_vcenter_info
+
+        except Exception as exp :
+            self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+
+        return parsed_response
+
+
+    def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
+        """
+            Method to delete vApp
+                Args :
+                    vapp_uuid - vApp UUID
+                    msg - Error message to be logged
+                    exp_type : Exception type
+                Returns:
+                    None
+        """
+        if vapp_uuid:
+            status = self.delete_vminstance(vapp_uuid)
+        else:
+            msg = "No vApp ID"
+        self.logger.error(msg)
+        if exp_type == "Genric":
+            raise vimconn.vimconnException(msg)
+        elif exp_type == "NotFound":
+            raise vimconn.vimconnNotFoundException(message=msg)
+
+    def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
+        """
+            Method to attach SRIOV adapters to VM
+
+             Args:
+                vapp_uuid - uuid of vApp/VM
+                sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
+                vmname_andid - vmname
+
+            Returns:
+                The status of add SRIOV adapter task , vm object and
+                vcenter_conect object
+        """
+        vm_obj = None
+        vcenter_conect, content = self.get_vcenter_content()
+        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+
+        if vm_moref_id:
+            try:
+                no_of_sriov_devices = len(sriov_nets)
+                if no_of_sriov_devices > 0:
+                    #Get VM and its host
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                    self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+                    if host_obj and vm_obj:
+                        #get SRIOV devies from host on which vapp is currently installed
+                        avilable_sriov_devices = self.get_sriov_devices(host_obj,
+                                                                no_of_sriov_devices,
+                                                                )
+
+                        if len(avilable_sriov_devices) == 0:
+                            #find other hosts with active pci devices
+                            new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
+                                                                content,
+                                                                no_of_sriov_devices,
+                                                                )
+
+                            if new_host_obj is not None and len(avilable_sriov_devices)> 0:
+                                #Migrate vm to the host where SRIOV devices are available
+                                self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
+                                                                                    new_host_obj))
+                                task = self.relocate_vm(new_host_obj, vm_obj)
+                                if task is not None:
+                                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                                    self.logger.info("Migrate VM status: {}".format(result))
+                                    host_obj = new_host_obj
+                                else:
+                                    self.logger.info("Fail to migrate VM : {}".format(result))
+                                    raise vimconn.vimconnNotFoundException(
+                                    "Fail to migrate VM : {} to host {}".format(
+                                                    vmname_andid,
+                                                    new_host_obj)
+                                        )
+
+                        if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
+                            #Add SRIOV devices one by one
+                            for sriov_net in sriov_nets:
+                                network_name = sriov_net.get('net_id')
+                                dvs_portgr_name = self.create_dvPort_group(network_name)
+                                if sriov_net.get('type') == "VF":
+                                    #add vlan ID ,Modify portgroup for vlan ID
+                                    self.configure_vlanID(content, vcenter_conect, network_name)
+
+                                task = self.add_sriov_to_vm(content,
+                                                            vm_obj,
+                                                            host_obj,
+                                                            network_name,
+                                                            avilable_sriov_devices[0]
+                                                            )
+                                if task:
+                                    status= self.wait_for_vcenter_task(task, vcenter_conect)
+                                    if status:
+                                        self.logger.info("Added SRIOV {} to VM {}".format(
+                                                                        no_of_sriov_devices,
+                                                                        str(vm_obj)))
+                                else:
+                                    self.logger.error("Fail to add SRIOV {} to VM {}".format(
+                                                                        no_of_sriov_devices,
+                                                                        str(vm_obj)))
+                                    raise vimconn.vimconnUnexpectedResponse(
+                                    "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
+                                        )
+                            return True, vm_obj, vcenter_conect
+                        else:
+                            self.logger.error("Currently there is no host with"\
+                                              " {} number of avaialble SRIOV "\
+                                              "VFs required for VM {}".format(
+                                                                no_of_sriov_devices,
+                                                                vmname_andid)
+                                              )
+                            raise vimconn.vimconnNotFoundException(
+                                    "Currently there is no host with {} "\
+                                    "number of avaialble SRIOV devices required for VM {}".format(
+                                                                            no_of_sriov_devices,
+                                                                            vmname_andid))
+                else:
+                    self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
+
+            except vmodl.MethodFault as error:
+                self.logger.error("Error occurred while adding SRIOV {} ",error)
+        return None, vm_obj, vcenter_conect
+
+
+    def get_sriov_devices(self,host, no_of_vfs):
+        """
+            Method to get the details of SRIOV devices on given host
+             Args:
+                host - vSphere host object
+                no_of_vfs - number of VFs needed on host
+
+             Returns:
+                array of SRIOV devices
+        """
+        sriovInfo=[]
+        if host:
+            for device in host.config.pciPassthruInfo:
+                if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
+                    if device.numVirtualFunction >= no_of_vfs:
+                        sriovInfo.append(device)
+                        break
+        return sriovInfo
+
+
+    def get_host_and_sriov_devices(self, content, no_of_vfs):
+        """
+         Method to get the details of SRIOV devices infromation on all hosts
+
+            Args:
+                content - vSphere host object
+                no_of_vfs - number of pci VFs needed on host
+
+            Returns:
+                 array of SRIOV devices and host object
+        """
+        host_obj = None
+        sriov_device_objs = None
+        try:
+            if content:
+                container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                            [vim.HostSystem], True)
+                for host in container.view:
+                    devices = self.get_sriov_devices(host, no_of_vfs)
+                    if devices:
+                        host_obj = host
+                        sriov_device_objs = devices
+                        break
+        except Exception as exp:
+            self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
+
+        return host_obj,sriov_device_objs
+
+
+    def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
+        """
+         Method to add SRIOV adapter to vm
+
+            Args:
+                host_obj - vSphere host object
+                vm_obj - vSphere vm object
+                content - vCenter content object
+                network_name - name of distributed virtaul portgroup
+                sriov_device - SRIOV device info
+
+            Returns:
+                 task object
+        """
+        devices = []
+        vnic_label = "sriov nic"
+        try:
+            dvs_portgr = self.get_dvport_group(network_name)
+            network_name = dvs_portgr.name
+            nic = vim.vm.device.VirtualDeviceSpec()
+            # VM device
+            nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+            nic.device = vim.vm.device.VirtualSriovEthernetCard()
+            nic.device.addressType = 'assigned'
+            #nic.device.key = 13016
+            nic.device.deviceInfo = vim.Description()
+            nic.device.deviceInfo.label = vnic_label
+            nic.device.deviceInfo.summary = network_name
+            nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
+
+            nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
+            nic.device.backing.deviceName = network_name
+            nic.device.backing.useAutoDetect = False
+            nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+            nic.device.connectable.startConnected = True
+            nic.device.connectable.allowGuestControl = True
+
+            nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
+            nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+            nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
+
+            devices.append(nic)
+            vmconf = vim.vm.ConfigSpec(deviceChange=devices)
+            task = vm_obj.ReconfigVM_Task(vmconf)
+            return task
+        except Exception as exp:
+            self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
+            return None
+
+
+    def create_dvPort_group(self, network_name):
+        """
+         Method to create disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                portgroup key
+        """
+        try:
+            new_network_name = [network_name, '-', str(uuid.uuid4())]
+            network_name=''.join(new_network_name)
+            vcenter_conect, content = self.get_vcenter_content()
+
+            dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
+            if dv_switch:
+                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                dv_pg_spec.name = network_name
+
+                dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
+                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
+                dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
+                dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
+                dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
+
+                task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
+                self.wait_for_vcenter_task(task, vcenter_conect)
+
+                dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
+                if dvPort_group:
+                    self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
+                    return dvPort_group.key
+            else:
+                self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
+
+        except Exception as exp:
+            self.logger.error("Error occurred while creating disributed virtaul port group {}"\
+                             " : {}".format(network_name, exp))
+        return None
+
+    def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
+        """
+         Method to reconfigure disributed virtual portgroup
+
+            Args:
+                dvPort_group_name - name of disributed virtual portgroup
+                content - vCenter content object
+                config_info - disributed virtual portgroup configuration
+
+            Returns:
+                task object
+        """
+        try:
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                dv_pg_spec.configVersion = dvPort_group.config.configVersion
+                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                if "vlanID" in config_info:
+                    dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
+                    dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
+
+                task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
+                return task
+            else:
+                return None
+        except Exception as exp:
+            self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
+                             " : {}".format(dvPort_group_name, exp))
+            return None
+
+
+    def destroy_dvport_group(self , dvPort_group_name):
+        """
+         Method to destroy disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                True if portgroup successfully got deleted else false
+        """
+        vcenter_conect, content = self.get_vcenter_content()
+        try:
+            status = None
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                task = dvPort_group.Destroy_Task()
+                status = self.wait_for_vcenter_task(task, vcenter_conect)
+            return status
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
+                                                                    exp, dvPort_group_name))
+            return None
+
+
+    def get_dvport_group(self, dvPort_group_name):
+        """
+        Method to get disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                portgroup object
+        """
+        vcenter_conect, content = self.get_vcenter_content()
+        dvPort_group = None
+        try:
+            container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
+            for item in container.view:
+                if item.key == dvPort_group_name:
+                    dvPort_group = item
+                    break
+            return dvPort_group
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                            exp, dvPort_group_name))
+            return None
+
+    def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
+        """
+         Method to get disributed virtual portgroup vlanID
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                vlan ID
+        """
+        vlanId = None
+        try:
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                            exp, dvPort_group_name))
+        return vlanId
+
+
+    def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
+        """
+         Method to configure vlanID in disributed virtual portgroup vlanID
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                None
+        """
+        vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
+        if vlanID == 0:
+            #configure vlanID
+            vlanID = self.genrate_vlanID(dvPort_group_name)
+            config = {"vlanID":vlanID}
+            task = self.reconfig_portgroup(content, dvPort_group_name,
+                                    config_info=config)
+            if task:
+                status= self.wait_for_vcenter_task(task, vcenter_conect)
+                if status:
+                    self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
+                                                        dvPort_group_name,vlanID))
+            else:
+                self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
+                                        dvPort_group_name, vlanID))
+
+
+    def genrate_vlanID(self, network_name):
+        """
+         Method to get unused vlanID
+            Args:
+                network_name - name of network/portgroup
+            Returns:
+                vlanID
+        """
+        vlan_id = None
+        used_ids = []
+        if self.config.get('vlanID_range') == None:
+            raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
+                        "at config value before creating sriov network with vlan tag")
+        if "used_vlanIDs" not in self.persistent_info:
+                self.persistent_info["used_vlanIDs"] = {}
+        else:
+            used_ids = self.persistent_info["used_vlanIDs"].values()
+
+        for vlanID_range in self.config.get('vlanID_range'):
+            start_vlanid , end_vlanid = vlanID_range.split("-")
+            if start_vlanid > end_vlanid:
+                raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
+                                                                        vlanID_range))
+
+            for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
+                if id not in used_ids:
+                    vlan_id = id
+                    self.persistent_info["used_vlanIDs"][network_name] = vlan_id
+                    return vlan_id
+        if vlan_id is None:
+            raise vimconn.vimconnConflictException("All Vlan IDs are in use")
+
+
+    def get_obj(self, content, vimtype, name):
+        """
+         Get the vsphere object associated with a given text name
+        """
+        obj = None
+        container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
+        for item in container.view:
+            if item.name == name:
+                obj = item
+                break
+        return obj
+
index a198148..9eda047 100755 (executable)
@@ -61,6 +61,7 @@ function install_packages(){
 }
 
 GIT_URL=https://osm.etsi.org/gerrit/osm/RO.git
+GIT_OVIM_URL=https://osm.etsi.org/gerrit/osm/openvim.git
 DBUSER="root"
 DBPASSWD=""
 DBPASSWD_PARAM=""
@@ -200,9 +201,13 @@ echo '
 #################################################################
 #####               INSTALL REQUIRED PACKAGES               #####
 #################################################################'
-[ "$_DISTRO" == "Ubuntu" ] && install_packages "git screen wget mysql-client"
-[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "git screen wget mariadb-client"
+[ "$_DISTRO" == "Ubuntu" ] && install_packages "git make screen wget mysql-client"
+[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "git make screen wget mariadb-client"
 
+
+
+if [[ -z "$NO_PACKAGES" ]]
+then
 echo '
 #################################################################
 #####        INSTALL PYTHON PACKAGES                        #####
@@ -235,9 +240,22 @@ if [[ -z $NOCLONE ]]; then
 #################################################################'
     su $SUDO_USER -c "git clone ${GIT_URL} ${OPENMANO_BASEFOLDER}"
     su $SUDO_USER -c "cp ${OPENMANO_BASEFOLDER}/.gitignore-common ${OPENMANO_BASEFOLDER}/.gitignore"
-    [[ -z $DEVELOP ]] && su $SUDO_USER -c "git -C  ${OPENMANO_BASEFOLDER} checkout tags/v1.0.2"
+    [[ -z $DEVELOP ]] && su $SUDO_USER -c "git -C ${OPENMANO_BASEFOLDER} checkout tags/v1.1.0"
 fi
 
+echo '
+#################################################################
+#####        INSTALLING OVIM LIBRARY                        #####
+#################################################################'
+su $SUDO_USER -c "git -C ${OPENMANO_BASEFOLDER} clone ${GIT_OVIM_URL} openvim"
+[[ -z $DEVELOP ]] && su $SUDO_USER -c "git -C ${OPENMANO_BASEFOLDER}/openvim checkout master"
+# Install debian dependencies before setup.py
+#[ "$_DISTRO" == "Ubuntu" ] && install_packages "git"
+#[ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ] && install_packages "git"
+make -C ${OPENMANO_BASEFOLDER}/openvim lite
+
+
+
 if [ "$_DISTRO" == "CentOS" -o "$_DISTRO" == "Red" ]
 then
     echo '
@@ -323,7 +341,14 @@ echo '
     if [ -n "$FORCEDB" ]; then
         DB_FORCE='--forcedb'
     fi
-    ${OPENMANO_BASEFOLDER}/scripts/install-db-server.sh -u $DBUSER $DBPASSWD_PARAM $DB_QUIET $DB_FORCE || exit 1
+    ${OPENMANO_BASEFOLDER}/database_utils/install-db-server.sh -u $DBUSER $DBPASSWD_PARAM $DB_QUIET $DB_FORCE || exit 1
+echo '
+#################################################################
+#####        CREATE AND INIT MANO_VIM DATABASE              #####
+#################################################################'
+# Install mano_vim_db after setup
+    ${OPENMANO_BASEFOLDER}/openvim/database_utils/install-db-server.sh -U $DBUSER ${DBPASSWD_PARAM/p/P} -u mano -p manopw -d mano_vim_db || exit 1
+
 fi
 
 if [[ -n "$INSTALL_AS_A_SERVICE"  ]]
diff --git a/sdn/sdn_port_mapping.yaml b/sdn/sdn_port_mapping.yaml
new file mode 100644 (file)
index 0000000..47da6e0
--- /dev/null
@@ -0,0 +1,44 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+#The mapping is composed of a list of compute nodes. Each compute nodes has two elements:
+#"compute_node": name to identify the compute node within the datacenter
+#"ports": list of ports mapped to a switch for that compute node.
+#The information to identify the SDN controller and the dataplane switch is obtained from the datacenter information
+- compute_node:    "compute node 1"
+  ports:
+    #Each mapped port contains the following information:
+    #"pci": pci address of the port in the compute node. This is a mandatory parameter
+    #"switch_mac": MAC address of the corresponding port in the dataplane switch.
+    #"switch_port": Openflow name of the port in the dataplane switch.
+    #"switch_mac" or "switch_port" must be specified. Both of them could be specified
+    - pci:         "0000:81:00.0"
+      switch_port: "port-2/1"
+    - pci:         "0000:81:00.1"
+      switch_mac:  "52:54:00:94:21:22"
+- compute_node:    "compute node 2"
+  ports:
+    - pci:         "0000:81:00.0"
+      switch_port: "port-2/3"
+      switch_mac:  "52:54:00:94:22:21"
+    - pci:         "0000:81:00.1"
+      switch_port: "port-2/4"
+      switch_mac:  "52:54:00:94:22:22"
diff --git a/test/RO_tests/passthrough/scenario_p2p_passthrough.yaml b/test/RO_tests/passthrough/scenario_p2p_passthrough.yaml
new file mode 100644 (file)
index 0000000..ed6b53a
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_passthrough
+  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
+  vnfs: 
+    passthrough1:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+    passthrough2:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - passthrough1:  eth0       # Node and its interface
+      - passthrough2:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - passthrough1:  xe0       # Node and its interface
+      - passthrough2:  xe0       # Node and its interface
+
diff --git a/test/RO_tests/passthrough/vnfd_1passthrough.yaml b/test/RO_tests/passthrough/vnfd_1passthrough.yaml
new file mode 100644 (file)
index 0000000..94784d5
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        passthrough
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              passthrough-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              passthrough-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        passthrough-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/test/RO_tests/pmp_passthrough/scenario_pmp_passthrough.yaml b/test/RO_tests/pmp_passthrough/scenario_pmp_passthrough.yaml
new file mode 100644 (file)
index 0000000..369907c
--- /dev/null
@@ -0,0 +1,50 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_passthrough
+  description:   Network scenario consisting of 4 machines with a passthrough interconnected between them
+  vnfs: 
+    passthrough1:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+    passthrough2:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+    passthrough3:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+    passthrough4:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - passthrough1:  eth0       # Node and its interface
+      - passthrough2:  eth0       # Node and its interface
+      - passthrough3:  eth0       # Node and its interface
+      - passthrough4:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - passthrough1:  xe0       # Node and its interface
+      - passthrough2:  xe0       # Node and its interface
+      - passthrough3:  xe0       # Node and its interface
+      - passthrough4:  xe0       # Node and its interface
+
diff --git a/test/RO_tests/pmp_passthrough/vnfd_1passthrough.yaml b/test/RO_tests/pmp_passthrough/vnfd_1passthrough.yaml
new file mode 100644 (file)
index 0000000..94784d5
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        passthrough
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              passthrough-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              passthrough-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        passthrough-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/test/RO_tests/pmp_sriov/scenario_pmp_sriov.yaml b/test/RO_tests/pmp_sriov/scenario_pmp_sriov.yaml
new file mode 100644 (file)
index 0000000..05dc91b
--- /dev/null
@@ -0,0 +1,50 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_sriov
+  description:   Network scenario consisting of four machines with a sr-iov interconnected between them
+  vnfs: 
+    sriov1:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    sriov2:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    sriov3:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    sriov4:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - sriov1:  eth0       # Node and its interface
+      - sriov2:  eth0       # Node and its interface
+      - sriov3:  eth0       # Node and its interface
+      - sriov4:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - sriov1:  xe0       # Node and its interface
+      - sriov2:  xe0       # Node and its interface
+      - sriov3:  xe0       # Node and its interface
+      - sriov4:  xe0       # Node and its interface
+
diff --git a/test/RO_tests/pmp_sriov/vnfd_1sriov.yaml b/test/RO_tests/pmp_sriov/vnfd_1sriov.yaml
new file mode 100644 (file)
index 0000000..e424b02
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        sriov
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              sriov-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              sriov-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        sriov-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/test/RO_tests/pmp_sriov_passthrough/scenario_pmp_sriov_passthrough.yaml b/test/RO_tests/pmp_sriov_passthrough/scenario_pmp_sriov_passthrough.yaml
new file mode 100644 (file)
index 0000000..4b917d9
--- /dev/null
@@ -0,0 +1,50 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_sriov_passthrough
+  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
+  vnfs: 
+    sriov1:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    passthrough1:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+    sriov2:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    passthrough2:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - sriov1:  eth0       # Node and its interface
+      - passthrough1:  eth0       # Node and its interface
+      - sriov2:  eth0       # Node and its interface
+      - passthrough2:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - sriov1:  xe0       # Node and its interface
+      - passthrough1:  xe0       # Node and its interface
+      - sriov2:  xe0       # Node and its interface
+      - passthrough2:  xe0       # Node and its interface
+
diff --git a/test/RO_tests/pmp_sriov_passthrough/vnfd_1passthrough.yaml b/test/RO_tests/pmp_sriov_passthrough/vnfd_1passthrough.yaml
new file mode 100644 (file)
index 0000000..94784d5
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        passthrough
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              passthrough-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              passthrough-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        passthrough-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/test/RO_tests/pmp_sriov_passthrough/vnfd_1sriov.yaml b/test/RO_tests/pmp_sriov_passthrough/vnfd_1sriov.yaml
new file mode 100644 (file)
index 0000000..e424b02
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        sriov
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              sriov-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              sriov-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        sriov-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/test/RO_tests/simple_2_vnf/scenario_simple_2_vnf.yaml b/test/RO_tests/simple_2_vnf/scenario_simple_2_vnf.yaml
new file mode 100644 (file)
index 0000000..95b338b
--- /dev/null
@@ -0,0 +1,37 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          simple
+  description:   Simple network scenario consisting of two VNF connected to an external network
+  vnfs: 
+    linux1:                   # vnf/net name in the scenario
+      vnf_name:  linux        # VNF name as introduced in OPENMANO DB
+    linux2:                   # vnf/net name in the scenario
+      vnf_name:  linux        # VNF name as introduced in OPENMANO DB
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - linux1:  eth0       # Node and its interface
+      - linux2:  eth0       # Node and its interface
+
diff --git a/test/RO_tests/simple_2_vnf/vnfd_linux.yaml b/test/RO_tests/simple_2_vnf/vnfd_linux.yaml
new file mode 100644 (file)
index 0000000..47c8498
--- /dev/null
@@ -0,0 +1,42 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        linux
+    description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              linux-VM
+        local_iface_name:  eth0
+        description:       General purpose interface
+    VNFC:
+    -   name:        linux-VM
+        description: Generic Linux Virtual Machine
+        #Copy the image to a compute path and edit this path
+        image name:  image_name.qcow2
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1024         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 10
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:11.0"
+        numas: []
diff --git a/test/RO_tests/sr_iov/scenario_p2p_sriov.yaml b/test/RO_tests/sr_iov/scenario_p2p_sriov.yaml
new file mode 100644 (file)
index 0000000..57cb2c8
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_sriov
+  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
+  vnfs: 
+    sriov1:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    sriov2:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - sriov1:  eth0       # Node and its interface
+      - sriov2:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - sriov1:  xe0       # Node and its interface
+      - sriov2:  xe0       # Node and its interface
+
diff --git a/test/RO_tests/sr_iov/vnfd_1sriov.yaml b/test/RO_tests/sr_iov/vnfd_1sriov.yaml
new file mode 100644 (file)
index 0000000..e424b02
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        sriov
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              sriov-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              sriov-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        sriov-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/test/RO_tests/sriov_passthrough/scenario_p2p_sriov_passthrough.yaml b/test/RO_tests/sriov_passthrough/scenario_p2p_sriov_passthrough.yaml
new file mode 100644 (file)
index 0000000..49d1a2b
--- /dev/null
@@ -0,0 +1,41 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          p2p_sriov_passthrough
+  description:   Network scenario consisting of two machines with a sr-iov interconnected between them
+  vnfs: 
+    sriov:                   # vnf/net name in the scenario
+      vnf_name:  sriov        # VNF name as introduced in OPENMANO DB
+    passthrough:                   # vnf/net name in the scenario
+      vnf_name:  passthrough        # VNF name as introduced in OPENMANO DB
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - sriov:  eth0       # Node and its interface
+      - passthrough:  eth0       # Node and its interface
+    dataplane:                   # provide a name for this net or connection
+      interfaces: 
+      - sriov:  xe0       # Node and its interface
+      - passthrough:  xe0       # Node and its interface
+
diff --git a/test/RO_tests/sriov_passthrough/vnfd_1passthrough.yaml b/test/RO_tests/sriov_passthrough/vnfd_1passthrough.yaml
new file mode 100644 (file)
index 0000000..94784d5
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        passthrough
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              passthrough-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              passthrough-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        passthrough-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "yes"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
diff --git a/test/RO_tests/sriov_passthrough/vnfd_1sriov.yaml b/test/RO_tests/sriov_passthrough/vnfd_1sriov.yaml
new file mode 100644 (file)
index 0000000..e424b02
--- /dev/null
@@ -0,0 +1,53 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+vnf:
+    name:        sriov
+    description: Machine with EPA and a SR-IOV interface
+    external-connections:
+    -   name:              eth0
+        type:              bridge
+        VNFC:              sriov-VM
+        local_iface_name:  eth0
+        description:       management interface
+    -   name:              xe0
+        type:              data
+        VNFC:              sriov-VM
+        local_iface_name:  xe0
+        description:       Dataplane interface
+    VNFC:
+    -   name:        sriov-VM
+        description: Machine with EPA and a SR-IOV interface
+        image name:  centos
+        disk: 20
+        numas: 
+        -   threads: 1          # "cores", "paired-threads", "threads"
+            memory: 1                 # GBytes
+            interfaces:
+            -   name:      xe0
+                vpci:      "0000:00:11.0"
+                dedicated: "no"         # "yes"(passthrough), "no"(sriov with vlan tags), "yes:sriov"(sriovi, but exclusive and without vlan tag)
+                bandwidth: 1 Gbps
+
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:0a.0"
+
index d666755..f5194ac 100755 (executable)
@@ -49,6 +49,23 @@ global test_directory
 global scenario_test_folder
 global test_image_name
 global management_network
+global manual
+
+def check_instance_scenario_active(uuid):
+    instance = client.get_instance(uuid=uuid)
+
+    for net in instance['nets']:
+        status = net['status']
+        if status != 'ACTIVE':
+            return (False, status)
+
+    for vnf in instance['vnfs']:
+        for vm in vnf['vms']:
+            status = vm['status']
+            if status != 'ACTIVE':
+                return (False, status)
+
+    return (True, None)
 
 '''
 IMPORTANT NOTE
@@ -429,11 +446,41 @@ class descriptor_based_scenario_test(unittest.TestCase):
         self.__class__.test_index += 1
 
         instance = client.create_instance(scenario_id=self.__class__.scenario_uuid, name=self.__class__.test_text)
+        self.__class__.instance_scenario_uuid = instance['uuid']
         logger.debug(instance)
         self.__class__.to_delete_list.insert(0, {"item": "instance", "function": client.delete_instance,
                                   "params": {"uuid": instance['uuid']}})
 
-    def test_020_clean_deployment(self):
+    def test_020_check_deployent(self):
+        self.__class__.test_text = "{}.{}. TEST {} {}".format(test_number, self.__class__.test_index,
+                                                           inspect.currentframe().f_code.co_name,
+                                                           scenario_test_folder)
+        self.__class__.test_index += 1
+
+        if manual:
+            raw_input('Scenario has been deployed. Perform manual check and press any key to resume')
+            return
+
+        keep_waiting = 50
+        instance_active = False
+        while(keep_waiting):
+            result = check_instance_scenario_active(self.__class__.instance_scenario_uuid)
+            if result[0]:
+                break
+            elif 'ERROR' in result[1]:
+                msg = 'Got error while waiting for the instance to get active: '+result[1]
+                logging.error(msg)
+                raise Exception(msg)
+
+            keep_waiting -= 1
+            time.sleep(5)
+
+        if keep_waiting == 0:
+            msg = 'Timeout reached while waiting instance scenario to get active'
+            logging.error(msg)
+            raise Exception(msg)
+
+    def test_030_clean_deployment(self):
         self.__class__.test_text = "{}.{}. TEST {} {}".format(test_number, self.__class__.test_index,
                                                               inspect.currentframe().f_code.co_name,
                                                               scenario_test_folder)
@@ -476,6 +523,7 @@ if __name__=="__main__":
                       default=default_logger_file)
     parser.add_option('--list-tests', help='List all available tests', dest='list-tests', action="store_true",
                       default=False)
+    parser.add_option('-m', '--manual-check', help='Pause execution once deployed to allow manual checking of the deployed instance scenario', dest='manual', action="store_true", default=False)
     parser.add_option('--test', '--tests', help='Specify the tests to run', dest='tests', default=None)
 
     #Mandatory arguments
@@ -551,6 +599,7 @@ if __name__=="__main__":
     # set test image name and management network
     test_image_name = options.__dict__['image-name']
     management_network = options.__dict__['mgmt-net']
+    manual = options.__dict__['manual']
 
     #Create the list of tests to be run
     descriptor_based_tests = []