Merge branch 'vio' into v2.0 25/2125/1
authorkate <akate@vmware.com>
Wed, 6 Sep 2017 06:26:28 +0000 (23:26 -0700)
committerkate <akate@vmware.com>
Wed, 6 Sep 2017 06:26:28 +0000 (23:26 -0700)
Change-Id: I380d554f9e9f03b0c4425fd998123fda300b81ff
Signed-off-by: kate <akate@vmware.com>
27 files changed:
Dockerfile
Jenkinsfile
database_utils/migrate_mano_db.sh
devops-stages/stage-archive.sh [new file with mode: 0755]
devops-stages/stage-build.sh [new file with mode: 0755]
devops-stages/stage-test.sh [new file with mode: 0755]
openmano
openmanod
osm_ro/db_base.py
osm_ro/httpserver.py
osm_ro/nfvo.py
osm_ro/nfvo_db.py
osm_ro/openmano_schemas.py
osm_ro/vim_thread.py
osm_ro/vimconn.py
osm_ro/vimconn_aws.py
osm_ro/vimconn_openstack.py
osm_ro/vimconn_openvim.py
osm_ro/vimconn_vmware.py
scripts/install-openmano.sh
test/RO_tests/afiinity_vnf/scenario_simple_2_vnf_afinnity.yaml [new file with mode: 0644]
test/RO_tests/afiinity_vnf/vnfd_linux_2_vnfc_affinity.yaml [new file with mode: 0644]
test/RO_tests/simple_count3/scenario_linux_count3.yaml [new file with mode: 0644]
test/RO_tests/simple_count3/vnfd_count3.yaml [new file with mode: 0644]
test/test_RO.py
test/test_openmanocli.sh [new file with mode: 0755]
vnfs/vnf-template.yaml

index 82c3ca9..254027c 100644 (file)
@@ -1,7 +1,7 @@
 FROM ubuntu:16.04
 
 RUN  apt-get update && \
-  DEBIAN_FRONTEND=noninteractive apt-get -y install git build-essential && \
+  DEBIAN_FRONTEND=noninteractive apt-get -y install git build-essential apt-utils && \
   DEBIAN_FRONTEND=noninteractive apt-get -y install python python-dev python-all python-stdeb fakeroot pypi2deb && \
   DEBIAN_FRONTEND=noninteractive apt-get -y install python-pip libmysqlclient-dev libssl-dev libffi-dev && \
   DEBIAN_FRONTEND=noninteractive pip install --upgrade pip && \
index 13a85d0..bc6c2d0 100644 (file)
@@ -1,36 +1,30 @@
-pipeline {
-       agent any
-       stages {
-               stage("Build") {
-                       agent {
-                               dockerfile true
-                       }
-                       steps {
-                               sh 'make package'
-                               stash name: "deb-files", includes: ".build/*.deb"
-                       }
-               }
-               stage("Unittest") {
-                       agent {
-                               dockerfile true
-                       }
-                       steps {
-                               sh 'echo "UNITTEST"'
-                       }
-               }
-               stage("Repo Component") {
-                       agent any
-                       steps {
-                               unstash "deb-files"
-                               sh '''
-                                       mkdir -p pool/RO
-                                       mv .build/*.deb pool/RO/
-                                       mkdir -p dists/ReleaseOne/unstable/RO/binary-amd64/
-                                       apt-ftparchive packages pool/RO > dists/ReleaseOne/unstable/RO/binary-amd64/Packages
-                                       gzip -9fk dists/ReleaseOne/unstable/RO/binary-amd64/Packages
-                                       '''
-                               archiveArtifacts artifacts: "dists/**,pool/RO/*.deb"
-                       }
-               }
-       }
+properties([
+    parameters([
+        string(defaultValue: env.BRANCH_NAME, description: '', name: 'GERRIT_BRANCH'),
+        string(defaultValue: 'osm/RO', description: '', name: 'GERRIT_PROJECT'),
+        string(defaultValue: env.GERRIT_REFSPEC, description: '', name: 'GERRIT_REFSPEC'),
+        string(defaultValue: env.GERRIT_PATCHSET_REVISION, description: '', name: 'GERRIT_PATCHSET_REVISION'),
+        string(defaultValue: 'https://osm.etsi.org/gerrit', description: '', name: 'PROJECT_URL_PREFIX'),
+        booleanParam(defaultValue: false, description: '', name: 'TEST_INSTALL'),
+    ])
+])
+
+def devops_checkout() {
+    dir('devops') {
+        git url: "${PROJECT_URL_PREFIX}/osm/devops", branch: params.GERRIT_BRANCH
+    }
+}
+
+node {
+    checkout scm
+    devops_checkout()
+
+    ci_helper = load "devops/jenkins/ci-pipelines/ci_stage_2.groovy"
+    ci_helper.ci_pipeline( 'RO',
+                           params.PROJECT_URL_PREFIX,
+                           params.GERRIT_PROJECT,
+                           params.GERRIT_BRANCH,
+                           params.GERRIT_REFSPEC,
+                           params.GERRIT_PATCHSET_REVISION,
+                           params.TEST_INSTALL)
 }
index d57194e..d99203e 100755 (executable)
 
 DBUSER="mano"
 DBPASS=""
-DEFAULT_DBPASS="maopw"
+DEFAULT_DBPASS="manopw"
 DBHOST=""
 DBPORT="3306"
 DBNAME="mano_db"
 QUIET_MODE=""
 #TODO update it with the last database version
-LAST_DB_VERSION=21
+LAST_DB_VERSION=24
  
 # Detect paths
 MYSQL=$(which mysql)
@@ -189,6 +189,9 @@ fi
 #[ $OPENMANO_VER_NUM -ge 5005 ] && DB_VERSION=19  #0.5.5 =>  19
 #[ $OPENMANO_VER_NUM -ge 5009 ] && DB_VERSION=20  #0.5.9 =>  20
 #[ $OPENMANO_VER_NUM -ge 5015 ] && DB_VERSION=21  #0.5.15 =>  21
+#[ $OPENMANO_VER_NUM -ge 5016 ] && DB_VERSION=22  #0.5.16 =>  22
+#[ $OPENMANO_VER_NUM -ge 5020 ] && DB_VERSION=23  #0.5.20 =>  23
+#[ $OPENMANO_VER_NUM -ge 5021 ] && DB_VERSION=24  #0.5.21 =>  24
 #TODO ... put next versions here
 
 function upgrade_to_1(){
@@ -772,6 +775,48 @@ function downgrade_from_21(){
     echo "DELETE FROM schema_version WHERE version_int='21';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
 }
 
+function upgrade_to_22(){
+    # echo "    upgrade database from version 0.21 to version 0.22"
+    echo "      Changed type of ram in 'flavors' from SMALLINT to MEDIUMINT"
+    echo "ALTER TABLE flavors CHANGE COLUMN ram ram MEDIUMINT(7) UNSIGNED NULL DEFAULT NULL AFTER disk;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (22, '0.22', '0.5.16', 'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-06-02');" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+}
+function downgrade_from_22(){
+    # echo "    downgrade database from version 0.22 to version 0.21"
+    echo "      Changed type of ram in 'flavors' from MEDIUMINT to SMALLINT"
+    echo "ALTER TABLE flavors CHANGE COLUMN ram ram SMALLINT(5) UNSIGNED NULL DEFAULT NULL AFTER disk;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "DELETE FROM schema_version WHERE version_int='22';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+}
+
+function upgrade_to_23(){
+    # echo "    upgrade database from version 0.22 to version 0.23"
+    echo "      add column 'availability_zone' at table 'vms'"
+    echo "ALTER TABLE mano_db.vms ADD COLUMN availability_zone VARCHAR(255) NULL AFTER modified_at;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (23, '0.23', '0.5.20', 'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-08-29');" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+}
+function downgrade_from_23(){
+    # echo "    downgrade database from version 0.23 to version 0.22"
+    echo "      remove column 'availability_zone' from table 'vms'"
+    echo "ALTER TABLE mano_db.vms DROP COLUMN availability_zone;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "DELETE FROM schema_version WHERE version_int='23';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+}
+
+function upgrade_to_24(){
+    # echo "    upgrade database from version 0.23 to version 0.24"
+    echo "      Add 'count' to table 'vms'"
+    echo "ALTER TABLE vms ADD COLUMN count SMALLINT NOT NULL DEFAULT '1' AFTER vnf_id;" | $DBCMD ||
+        ! echo "ERROR. Aborted!" || exit -1
+    echo "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+         "VALUES (24, '0.24', '0.5.21', 'Added vnfd fields', '2017-08-29');" | $DBCMD ||
+         ! echo "ERROR. Aborted!" || exit -1
+}
+function downgrade_from_24(){
+    # echo "    downgrade database from version 0.24 to version 0.23"
+    echo "      Remove 'count' from table 'vms'"
+    echo "ALTER TABLE vms DROP COLUMN count;" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+    echo "DELETE FROM schema_version WHERE version_int='24';" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
+}
+
 function upgrade_to_X(){
     echo "      change 'datacenter_nets'"
     echo "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);" | $DBCMD || ! echo "ERROR. Aborted!" || exit -1
diff --git a/devops-stages/stage-archive.sh b/devops-stages/stage-archive.sh
new file mode 100755 (executable)
index 0000000..cc1cfc0
--- /dev/null
@@ -0,0 +1,8 @@
+#!/bin/sh
+rm -rf pool
+rm -rf dists
+mkdir -p pool/RO
+mv .build/*.deb pool/RO/
+mkdir -p dists/unstable/RO/binary-amd64/
+apt-ftparchive packages pool/RO > dists/unstable/RO/binary-amd64/Packages
+gzip -9fk dists/unstable/RO/binary-amd64/Packages
diff --git a/devops-stages/stage-build.sh b/devops-stages/stage-build.sh
new file mode 100755 (executable)
index 0000000..8505499
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/sh
+make package
diff --git a/devops-stages/stage-test.sh b/devops-stages/stage-test.sh
new file mode 100755 (executable)
index 0000000..49296c7
--- /dev/null
@@ -0,0 +1,2 @@
+#!/bin/sh
+echo "UNITTEST"
index 6c0d15b..ec588a9 100755 (executable)
--- a/openmano
+++ b/openmano
 # contact with: nfvlabs@tid.es
 ##
 
-'''
+"""
 openmano client used to interact with openmano-server (openmanod) 
-'''
+"""
 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ = "$09-oct-2014 09:09:48$"
-__version__ = "0.4.14-r521"
-version_date = "May 2017"
+__version__ = "0.4.15-r525"
+version_date = "Jul 2017"
 
 from argcomplete.completers import FilesCompleter
 import os
@@ -1033,6 +1033,7 @@ def datacenter_delete(args):
         print content['error']['description']
     return result
 
+
 def datacenter_list(args):
     #print "datacenter-list",args
     tenant='any' if args.all else _get_tenant()
@@ -1050,6 +1051,7 @@ def datacenter_list(args):
         args.verbose += 1
     return _print_verbose(mano_response, args.verbose)
 
+
 def datacenter_sdn_port_mapping_set(args):
     tenant = _get_tenant()
     datacenter = _get_datacenter(args.name, tenant)
@@ -1058,30 +1060,37 @@ def datacenter_sdn_port_mapping_set(args):
     if not args.file:
         raise OpenmanoCLIError(
             "No yaml/json has been provided specifying the SDN port mapping")
+    sdn_port_mapping = _load_file_or_yaml(args.file)
+    payload_req = json.dumps({"sdn_port_mapping": sdn_port_mapping})
 
-    port_mapping = yaml.load(datacenter_sdn_port_mapping_list(args))
-    if 'error' in port_mapping:
+    # read
+    URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
+    mano_response = requests.get(URLrequest)
+    logger.debug("openmano response: %s", mano_response.text)
+    port_mapping = mano_response.json()
+    if mano_response.status_code != 200:
+        str(mano_response.json())
         raise OpenmanoCLIError("openmano client error: {}".format(port_mapping['error']['description']))
     if len(port_mapping["sdn_port_mapping"]["ports_mapping"]) > 0:
         if not args.force:
             r = raw_input("Datacenter %s already contains a port mapping. Overwrite? (y/N)? " % (datacenter))
             if not (len(r) > 0 and r[0].lower() == "y"):
                 return 0
-        args.force = True
-        print datacenter_sdn_port_mapping_clear(args)
 
-    sdn_port_mapping = _load_file_or_yaml(args.file)
-    payload_req = json.dumps({"sdn_port_mapping": sdn_port_mapping})
+        # clear
+        URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
+        mano_response = requests.delete(URLrequest)
+        logger.debug("openmano response: %s", mano_response.text)
+        if mano_response.status_code != 200:
+            return _print_verbose(mano_response, args.verbose)
 
+    # set
     URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
     logger.debug("openmano request: %s", payload_req)
     mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
     logger.debug("openmano response: %s", mano_response.text)
+    return _print_verbose(mano_response, args.verbose)
 
-    if mano_response.status_code == 200:
-        return yaml.safe_dump(mano_response.json())
-    else:
-        return mano_response.content
 
 def datacenter_sdn_port_mapping_list(args):
     tenant = _get_tenant()
@@ -1091,10 +1100,8 @@ def datacenter_sdn_port_mapping_list(args):
     mano_response = requests.get(URLrequest)
     logger.debug("openmano response: %s", mano_response.text)
 
-    if mano_response.status_code != 200:
-        return mano_response.content
+    return _print_verbose(mano_response, 4)
 
-    return yaml.safe_dump(mano_response.json())
 
 def datacenter_sdn_port_mapping_clear(args):
     tenant = _get_tenant()
@@ -1102,26 +1109,27 @@ def datacenter_sdn_port_mapping_clear(args):
 
     if not args.force:
         r = raw_input("Clean SDN port mapping for datacenter %s (y/N)? " %(datacenter))
-        if  not (len(r)>0  and r[0].lower()=="y"):
+        if not (len(r) > 0 and r[0].lower() == "y"):
             return 0
 
     URLrequest = "http://%s:%s/openmano/%s/datacenters/%s/sdn_mapping" % (mano_host, mano_port, tenant, datacenter)
     mano_response = requests.delete(URLrequest)
     logger.debug("openmano response: %s", mano_response.text)
 
-    if mano_response.status_code != 200:
-        if "No port mapping for datacenter" in mano_response.content:
-            return "No port mapping for datacenter " + datacenter + " has been found"
-        return mano_response.content
+    return _print_verbose(mano_response, args.verbose)
 
-    return yaml.safe_dump(mano_response.json())
 
 def sdn_controller_create(args):
     tenant = _get_tenant()
     headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
 
-    if not (args.ip and args.port and args.dpid and args.type):
-        raise OpenmanoCLIError("The following arguments are required: ip, port, dpid, type")
+    error_msg=[]
+    if not args.ip: error_msg.append("'ip'")
+    if not args.port: error_msg.append("'port'")
+    if not args.dpid: error_msg.append("'dpid'")
+    if not args.type: error_msg.append("'type'")
+    if error_msg:
+        raise OpenmanoCLIError("The following arguments are required: " + ",".join(error_msg))
 
     controller_dict = {}
     controller_dict['name'] = args.name
@@ -1145,42 +1153,41 @@ def sdn_controller_create(args):
     mano_response = requests.post(URLrequest, headers=headers_req, data=payload_req)
     logger.debug("openmano response: %s", mano_response.text)
     result = _print_verbose(mano_response, args.verbose)
-
     return result
 
+
 def sdn_controller_edit(args):
     tenant = _get_tenant()
     controller_uuid = _get_item_uuid("sdn_controllers", args.name, tenant)
     headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
 
-    if not (args.new_name or args.ip or args.port or args.dpid or args.type):
-        raise OpenmanoCLIError("At least one parameter must be editd")
-
-    if not args.force:
-        r = raw_input("Update SDN controller %s (y/N)? " %(args.name))
-        if  not (len(r)>0  and r[0].lower()=="y"):
-            return 0
-
     controller_dict = {}
-    if args.new_name != None:
+    if args.new_name:
         controller_dict['name'] = args.new_name
-    if args.ip != None:
+    if args.ip:
         controller_dict['ip'] = args.ip
-    if args.port != None:
+    if args.port:
         controller_dict['port'] = int(args.port)
-    if args.dpid != None:
+    if args.dpid:
         controller_dict['dpid'] = args.dpid
-    if args.type != None:
+    if args.type:
         controller_dict['type'] = args.type
-    if args.description != None:
+    if args.description:
         controller_dict['description'] = args.description
-    if args.user != None:
+    if args.user:
         controller_dict['user'] = args.user
-    if args.password != None:
+    if args.password:
         controller_dict['password'] = args.password
 
-    payload_req = json.dumps({"sdn_controller": controller_dict})
+    if not controller_dict:
+        raise OpenmanoCLIError("At least one parameter must be edited")
 
+    if not args.force:
+        r = raw_input("Update SDN controller {} (y/N)? ".format(args.name))
+        if not (len(r) > 0 and r[0].lower() == "y"):
+            return 0
+
+    payload_req = json.dumps({"sdn_controller": controller_dict})
     # print payload_req
 
     URLrequest = "http://%s:%s/openmano/%s/sdn_controllers/%s" % (mano_host, mano_port, tenant, controller_uuid)
@@ -1188,9 +1195,9 @@ def sdn_controller_edit(args):
     mano_response = requests.put(URLrequest, headers=headers_req, data=payload_req)
     logger.debug("openmano response: %s", mano_response.text)
     result = _print_verbose(mano_response, args.verbose)
-
     return result
 
+
 def sdn_controller_list(args):
     tenant = _get_tenant()
     headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
@@ -1208,8 +1215,9 @@ def sdn_controller_list(args):
     if args.name!=None:
         args.verbose += 1
 
-    result = json.dumps(mano_response.json(), indent=4)
-    return result
+    # json.dumps(mano_response.json(), indent=4)
+    return _print_verbose(mano_response, args.verbose)
+
 
 def sdn_controller_delete(args):
     tenant = _get_tenant()
@@ -1223,9 +1231,7 @@ def sdn_controller_delete(args):
     URLrequest = "http://%s:%s/openmano/%s/sdn_controllers/%s" % (mano_host, mano_port, tenant, controller_uuid)
     mano_response = requests.delete(URLrequest)
     logger.debug("openmano response: %s", mano_response.text)
-    result = _print_verbose(mano_response, args.verbose)
-
-    return result
+    return _print_verbose(mano_response, args.verbose)
 
 def vim_action(args):
     #print "datacenter-net-action",args
@@ -1284,6 +1290,7 @@ def vim_action(args):
             args.verbose=0
         return _print_verbose(mano_response, args.verbose)
 
+
 def _get_items(item, item_name_id=None, datacenter=None, tenant=None):
     URLrequest = "http://%s:%s/openmano" %(mano_host, mano_port)
     if tenant:
@@ -1299,6 +1306,7 @@ def _get_items(item, item_name_id=None, datacenter=None, tenant=None):
 
     return mano_response
 
+
 def vim_net_sdn_attach(args):
     #Verify the network exists in the vim
     tenant = _get_tenant()
@@ -1324,8 +1332,7 @@ def vim_net_sdn_attach(args):
     mano_response = requests.post(URLrequest, headers=headers_req, data=json.dumps(payload_req))
     logger.debug("openmano response: %s", mano_response.text)
     result = _print_verbose(mano_response, args.verbose)
-
-    return
+    return result
 
 
 def vim_net_sdn_detach(args):
@@ -1358,8 +1365,8 @@ def vim_net_sdn_detach(args):
     mano_response = requests.delete(URLrequest)
     logger.debug("openmano response: %s", mano_response.text)
     result = _print_verbose(mano_response, args.verbose)
+    return result
 
-    return
 
 def datacenter_net_action(args):
     if args.action == "net-update":
@@ -1471,6 +1478,7 @@ def datacenter_netmap_action(args):
     logger.debug("openmano response: %s", mano_response.text )
     return _print_verbose(mano_response, args.verbose)
 
+
 def element_edit(args):
     element = _get_item_uuid(args.element, args.name)
     headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
@@ -1544,6 +1552,7 @@ def datacenter_edit(args):
         args.verbose += 1
     return _print_verbose(mano_response, args.verbose)
 
+
 def version(args):
     headers_req = {'Accept': 'application/json', 'content-type': 'application/json'}
     URLrequest = "http://%s:%s/openmano/version" % (mano_host, mano_port)
index 8f8c3f1..94924bd 100755 (executable)
--- a/openmanod
+++ b/openmanod
@@ -48,13 +48,14 @@ import osm_ro
 
 __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes"
 __date__ = "$26-aug-2014 11:09:29$"
-__version__ = "0.5.15-r524"
-version_date = "Jun 2017"
-database_version = 21      #expected database schema version
+__version__ = "0.5.21-r531"
+version_date = "Aug 2017"
+database_version = 24      # expected database schema version
 
 global global_config
 global logger
 
+
 class LoadConfigurationException(Exception):
     pass
 
index 4a87721..26e4c00 100644 (file)
@@ -336,7 +336,31 @@ class db_base():
         self.logger.debug(cmd)
         self.cur.execute(cmd) 
         return self.cur.rowcount
-    
+
+    def _new_uuid(self, root_uuid=None, used_table=None, created_time=0):
+        """
+        Generate a new uuid. It DOES NOT begin or end the transaction, so self.con.cursor must be created
+        :param root_uuid: master uuid of the transaction
+        :param used_table: the table this uuid is intended for
+        :param created_time: time of creation
+        :return: the created uuid
+        """
+
+        uuid = str(myUuid.uuid1())
+        # defining root_uuid if not provided
+        if root_uuid is None:
+            root_uuid = uuid
+        if created_time:
+            created_at = created_time
+        else:
+            created_at = time.time()
+        # inserting new uuid
+        cmd = "INSERT INTO uuids (uuid, root_uuid, used_at, created_at) VALUES ('{:s}','{:s}','{:s}', {:f})".format(
+            uuid, root_uuid, used_table, created_at)
+        self.logger.debug(cmd)
+        self.cur.execute(cmd)
+        return uuid
+
     def _new_row_internal(self, table, INSERT, add_uuid=False, root_uuid=None, created_time=0):
         ''' Add one row into a table. It DOES NOT begin or end the transaction, so self.con.cursor must be created
         Attribute 
index 94544f6..f9cbb9b 100644 (file)
@@ -1376,10 +1376,18 @@ def http_get_instance_id(tenant_id, instance_id):
             nfvo.refresh_instance(mydb, tenant_id, instance_dict)
         except (nfvo.NfvoException, db_base_Exception) as e:
             logger.warn("nfvo.refresh_instance couldn't refresh the status of the instance: %s" % str(e))
-        #obtain data with results upated
+        # obtain data with results upated
         instance = mydb.get_instance_scenario(instance_id, tenant_id)
+        # Workaround to SO, convert vnfs:vms:interfaces:ip_address from ";" separated list to report the first value
+        for vnf in instance.get("vnfs", ()):
+            for vm in vnf.get("vms", ()):
+                for iface in vm.get("interfaces", ()):
+                    if iface.get("ip_address"):
+                        index = iface["ip_address"].find(";")
+                        if index >= 0:
+                            iface["ip_address"] = iface["ip_address"][:index]
         convert_datetime2str(instance)
-        #print json.dumps(instance, indent=4)
+        # print json.dumps(instance, indent=4)
         return format_out(instance)
     except (nfvo.NfvoException, db_base_Exception) as e:
         logger.error("http_get_instance_id error {}: {}".format(e.http_code, str(e)))
index d45a3c7..fa87205 100644 (file)
@@ -38,6 +38,7 @@ import console_proxy_thread as cli
 import vimconn
 import logging
 import collections
+from uuid import uuid4
 from db_base import db_base_Exception
 
 import nfvo_db
@@ -334,7 +335,9 @@ def rollback(mydb,  vims, rollback_list):
         if item["where"]=="vim":
             if item["vim_id"] not in vims:
                 continue
-            vim=vims[ item["vim_id"] ]
+            if is_task_id(item["uuid"]):
+                continue
+            vim = vims[item["vim_id"]]
             try:
                 if item["what"]=="image":
                     vim.delete_image(item["uuid"])
@@ -392,12 +395,12 @@ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
             name_dict[ interface["name"] ] = "overlay"
         vnfc_interfaces[ vnfc["name"] ] = name_dict
         # check bood-data info
-        if "boot-data" in vnfc:
-            # check that user-data is incompatible with users and config-files
-            if (vnfc["boot-data"].get("users") or vnfc["boot-data"].get("config-files")) and vnfc["boot-data"].get("user-data"):
-                raise NfvoException(
-                    "Error at vnf:VNFC:boot-data, fields 'users' and 'config-files' are not compatible with 'user-data'",
-                    HTTP_Bad_Request)
+        if "boot-data" in vnfc:
+            # check that user-data is incompatible with users and config-files
+            if (vnfc["boot-data"].get("users") or vnfc["boot-data"].get("config-files")) and vnfc["boot-data"].get("user-data"):
+                raise NfvoException(
+                    "Error at vnf:VNFC:boot-data, fields 'users' and 'config-files' are not compatible with 'user-data'",
+                    HTTP_Bad_Request)
 
     #check if the info in external_connections matches with the one in the vnfcs
     name_list=[]
@@ -481,7 +484,7 @@ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
                     HTTP_Bad_Request)
 
 
-def create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error = None):
+def create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=None):
     #look if image exist
     if only_create_at_vim:
         image_mano_id = image_dict['uuid']
@@ -757,6 +760,7 @@ def new_vnf(mydb, tenant_id, vnf_descriptor):
         for vnfc in vnf_descriptor['vnf']['VNFC']:
             VNFCitem={}
             VNFCitem["name"] = vnfc['name']
+            VNFCitem["availability_zone"] = vnfc.get('availability_zone')
             VNFCitem["description"] = vnfc.get("description", 'VM %s of the VNF %s' %(vnfc['name'],vnf_name))
 
             #print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
@@ -827,6 +831,7 @@ def new_vnf(mydb, tenant_id, vnf_descriptor):
             #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
             VNFCDict[vnfc['name']]["image_id"] = image_id
             VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
+            VNFCDict[vnfc['name']]["count"] = vnfc.get('count', 1)
             if vnfc.get("boot-data"):
                 VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
 
@@ -962,6 +967,7 @@ def new_vnf_v02(mydb, tenant_id, vnf_descriptor):
             #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
             VNFCDict[vnfc['name']]["image_id"] = image_id
             VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
+            VNFCDict[vnfc['name']]["count"] = vnfc.get('count', 1)
             if vnfc.get("boot-data"):
                 VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
 
@@ -1665,6 +1671,7 @@ def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instanc
 
         logger.debug("start_scenario 2. Creating new nets (vnf internal nets) in the VIM")
         #For each vnf net, we create it and we add it to instanceNetlist.
+
         for sce_vnf in scenarioDict['vnfs']:
             for net in sce_vnf['nets']:
                 #print "Net name: %s. Description: %s" % (net["name"], net["description"])
@@ -1696,6 +1703,17 @@ def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instanc
         #myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
         i = 0
         for sce_vnf in scenarioDict['vnfs']:
+            vnf_availability_zones = []
+            for vm in sce_vnf['vms']:
+                vm_av = vm.get('availability_zone')
+                if vm_av and vm_av not in vnf_availability_zones:
+                    vnf_availability_zones.append(vm_av)
+
+            # check if there is enough availability zones available at vim level.
+            if myvims[datacenter_id].availability_zone and vnf_availability_zones:
+                if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
+                    raise NfvoException('No enough availability zones at VIM for this deployment', HTTP_Bad_Request)
+
             for vm in sce_vnf['vms']:
                 i += 1
                 myVMDict = {}
@@ -1782,8 +1800,16 @@ def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instanc
                 #print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
                 #print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
                 #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
-                vm_id = myvim.new_vminstance(myVMDict['name'],myVMDict['description'],myVMDict.get('start', None),
-                        myVMDict['imageRef'],myVMDict['flavorRef'],myVMDict['networks'])
+
+                if 'availability_zone' in myVMDict:
+                    av_index = vnf_availability_zones.index(myVMDict['availability_zone'])
+                else:
+                    av_index = None
+
+                vm_id = myvim.new_vminstance(myVMDict['name'], myVMDict['description'], myVMDict.get('start', None),
+                                             myVMDict['imageRef'], myVMDict['flavorRef'], myVMDict['networks'],
+                                             availability_zone_index=av_index,
+                                             availability_zone_list=vnf_availability_zones)
                 #print "VIM vm instance id (server id) for scenario %s: %s" % (scenarioDict['name'],vm_id)
                 vm['vim_id'] = vm_id
                 rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
@@ -1813,10 +1839,10 @@ def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instanc
 
 
 def unify_cloud_config(cloud_config_preserve, cloud_config):
-    ''' join the cloud config information into cloud_config_preserve.
+    """ join the cloud config information into cloud_config_preserve.
     In case of conflict cloud_config_preserve preserves
-    None is admited
-    '''
+    None is allowed
+    """
     if not cloud_config_preserve and not cloud_config:
         return None
 
@@ -1866,10 +1892,19 @@ def unify_cloud_config(cloud_config_preserve, cloud_config):
         new_cloud_config["boot-data-drive"] = cloud_config_preserve["boot-data-drive"]
 
     # user-data
-    if cloud_config and cloud_config.get("user-data") != None:
-        new_cloud_config["user-data"] = cloud_config["user-data"]
-    if cloud_config_preserve and cloud_config_preserve.get("user-data") != None:
-        new_cloud_config["user-data"] = cloud_config_preserve["user-data"]
+    new_cloud_config["user-data"] = []
+    if cloud_config and cloud_config.get("user-data"):
+        if isinstance(cloud_config["user-data"], list):
+            new_cloud_config["user-data"] += cloud_config["user-data"]
+        else:
+            new_cloud_config["user-data"].append(cloud_config["user-data"])
+    if cloud_config_preserve and cloud_config_preserve.get("user-data"):
+        if isinstance(cloud_config_preserve["user-data"], list):
+            new_cloud_config["user-data"] += cloud_config_preserve["user-data"]
+        else:
+            new_cloud_config["user-data"].append(cloud_config_preserve["user-data"])
+    if not new_cloud_config["user-data"]:
+        del new_cloud_config["user-data"]
 
     # config files
     new_cloud_config["config-files"] = []
@@ -1923,6 +1958,7 @@ def get_vim_thread(mydb, tenant_id, datacenter_id_name=None, datacenter_tenant_i
     except db_base_Exception as e:
         raise NfvoException("{} {}".format(type(e).__name__ , str(e)), e.http_code)
 
+
 def get_datacenter_by_name_uuid(mydb, tenant_id, datacenter_id_name=None, **extra_filter):
     datacenter_id = None
     datacenter_name = None
@@ -1978,14 +2014,30 @@ def create_instance(mydb, tenant_id, instance_dict):
     #logger.debug(">>>>>>> InstanceDict:\n{}".format(yaml.safe_dump(instance_dict,default_flow_style=False, width=256)))
     #logger.debug(">>>>>>> ScenarioDict:\n{}".format(yaml.safe_dump(scenarioDict,default_flow_style=False, width=256)))
 
-    scenarioDict['datacenter_id'] = default_datacenter_id
+    uuid_list = []
+    instance_name = instance_dict["name"]
+    instance_uuid = str(uuid4())
+    uuid_list.append(instance_uuid)
+    db_instance_scenario = {
+        "uuid": instance_uuid,
+        "name": instance_name,
+        "tenant_id": tenant_id,
+        "scenario_id": scenarioDict['uuid'],
+        "datacenter_id": default_datacenter_id,
+        # filled bellow 'datacenter_tenant_id'
+        "description": instance_dict.get("description"),
+    }
+    db_ip_profiles=[]
+    if scenarioDict.get("cloud-config"):
+        db_instance_scenario["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"],
+                                                              default_flow_style=True, width=256)
 
+    vnf_net2instance = {}   #Auxiliar dictionary. First key:'scenario' or sce_vnf uuid. Second Key: uuid of the net/sce_net. Value: vim_net_id
+    sce_net2instance = {}
     auxNetDict = {}   #Auxiliar dictionary. First key:'scenario' or sce_vnf uuid. Second Key: uuid of the net/sce_net. Value: vim_net_id
     auxNetDict['scenario'] = {}
 
     logger.debug("Creating instance from scenario-dict:\n%s", yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))  #TODO remove
-    instance_name = instance_dict["name"]
-    instance_description = instance_dict.get("description")
     try:
         # 0 check correct parameters
         for net_name, net_instance_desc in instance_dict.get("networks",{}).iteritems():
@@ -2069,12 +2121,12 @@ def create_instance(mydb, tenant_id, instance_dict):
         #logger.debug(">>>>>>>> Merged dictionary")
         logger.debug("Creating instance scenario-dict MERGED:\n%s", yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))
 
-
         # 1. Creating new nets (sce_nets) in the VIM"
+        db_instance_nets = []
         for sce_net in scenarioDict['nets']:
-            sce_net["vim_id_sites"]={}
-            descriptor_net =  instance_dict.get("networks",{}).get(sce_net["name"],{})
+            descriptor_net = instance_dict.get("networks",{}).get(sce_net["name"],{})
             net_name = descriptor_net.get("vim-network-name")
+            sce_net2instance[sce_net['uuid']] = {}
             auxNetDict['scenario'][sce_net['uuid']] = {}
 
             sites = descriptor_net.get("sites", [ {} ])
@@ -2139,7 +2191,7 @@ def create_instance(mydb, tenant_id, instance_dict):
                         if not create_network:
                             raise NfvoException("No candidate VIM network found for " + filter_text, HTTP_Bad_Request )
                     else:
-                        sce_net["vim_id_sites"][datacenter_id] = vim_nets[0]['id']
+                        vim_id = vim_nets[0]['id']
                         auxNetDict['scenario'][sce_net['uuid']][datacenter_id] = vim_nets[0]['id']
                         create_network = False
                 if create_network:
@@ -2149,13 +2201,41 @@ def create_instance(mydb, tenant_id, instance_dict):
                     instance_tasks[task_id] = task
                     tasks_to_launch[myvim_thread_id].append(task)
                     #network_id = vim.new_network(net_vim_name, net_type, sce_net.get('ip_profile',None))
-                    sce_net["vim_id_sites"][datacenter_id] = task_id
+                    vim_id = task_id
                     auxNetDict['scenario'][sce_net['uuid']][datacenter_id] = task_id
                     rollbackList.append({'what':'network', 'where':'vim', 'vim_id':datacenter_id, 'uuid':task_id})
                     sce_net["created"] = True
 
+                # fill database content
+                net_uuid = str(uuid4())
+                uuid_list.append(net_uuid)
+                sce_net2instance[sce_net['uuid']][datacenter_id] = net_uuid
+                db_net = {
+                    "uuid": net_uuid,
+                    'vim_net_id': vim_id,
+                    "instance_scenario_id": instance_uuid,
+                    "sce_net_id": sce_net["uuid"],
+                    "created": create_network,
+                    'datacenter_id': datacenter_id,
+                    'datacenter_tenant_id': myvim_thread_id,
+                    'status': 'BUILD' if create_network else "ACTIVE"
+                }
+                db_instance_nets.append(db_net)
+            if 'ip_profile' in sce_net:
+                db_ip_profile={
+                    'instance_net_id': net_uuid,
+                    'ip_version': sce_net['ip_profile']['ip_version'],
+                    'subnet_address': sce_net['ip_profile']['subnet_address'],
+                    'gateway_address': sce_net['ip_profile']['gateway_address'],
+                    'dns_address': sce_net['ip_profile']['dns_address'],
+                    'dhcp_enabled': sce_net['ip_profile']['dhcp_enabled'],
+                    'dhcp_start_address': sce_net['ip_profile']['dhcp_start_address'],
+                    'dhcp_count': sce_net['ip_profile']['dhcp_count'],
+                }
+                db_ip_profiles.append(db_ip_profile)
+
         # 2. Creating new nets (vnf internal nets) in the VIM"
-        #For each vnf net, we create it and we add it to instanceNetlist.
+        # For each vnf net, we create it and we add it to instanceNetlist.
         for sce_vnf in scenarioDict['vnfs']:
             for net in sce_vnf['nets']:
                 if sce_vnf.get("datacenter"):
@@ -2177,20 +2257,65 @@ def create_instance(mydb, tenant_id, instance_dict):
                 instance_tasks[task_id] = task
                 tasks_to_launch[myvim_thread_id].append(task)
                 # network_id = vim.new_network(net_name, net_type, net.get('ip_profile',None))
-                net['vim_id'] = task_id
+                vim_id = task_id
+                if sce_vnf['uuid'] not in vnf_net2instance:
+                    vnf_net2instance[sce_vnf['uuid']] = {}
+                vnf_net2instance[sce_vnf['uuid']][net['uuid']] = task_id
                 if sce_vnf['uuid'] not in auxNetDict:
                     auxNetDict[sce_vnf['uuid']] = {}
                 auxNetDict[sce_vnf['uuid']][net['uuid']] = task_id
                 rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':task_id})
                 net["created"] = True
 
-
-        #print "auxNetDict:"
-        #print yaml.safe_dump(auxNetDict, indent=4, default_flow_style=False)
+                # fill database content
+                net_uuid = str(uuid4())
+                uuid_list.append(net_uuid)
+                vnf_net2instance[sce_vnf['uuid']][net['uuid']] = net_uuid
+                db_net = {
+                    "uuid": net_uuid,
+                    'vim_net_id': vim_id,
+                    "instance_scenario_id": instance_uuid,
+                    "net_id": net["uuid"],
+                    "created": True,
+                    'datacenter_id': datacenter_id,
+                    'datacenter_tenant_id': myvim_thread_id,
+                }
+                db_instance_nets.append(db_net)
+                if 'ip_profile' in net:
+                    db_ip_profile = {
+                        'instance_net_id': net_uuid,
+                        'ip_version': net['ip_profile']['ip_version'],
+                        'subnet_address': net['ip_profile']['subnet_address'],
+                        'gateway_address': net['ip_profile']['gateway_address'],
+                        'dns_address': net['ip_profile']['dns_address'],
+                        'dhcp_enabled': net['ip_profile']['dhcp_enabled'],
+                        'dhcp_start_address': net['ip_profile']['dhcp_start_address'],
+                        'dhcp_count': net['ip_profile']['dhcp_count'],
+                    }
+                    db_ip_profiles.append(db_ip_profile)
+
+        #print "vnf_net2instance:"
+        #print yaml.safe_dump(vnf_net2instance, indent=4, default_flow_style=False)
 
         # 3. Creating new vm instances in the VIM
+        db_instance_vnfs = []
+        db_instance_vms = []
+        db_instance_interfaces = []
         #myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
-        for sce_vnf in scenarioDict['vnfs']:
+        sce_vnf_list = sorted(scenarioDict['vnfs'], key=lambda k: k['name']) 
+        #for sce_vnf in scenarioDict['vnfs']:
+        for sce_vnf in sce_vnf_list:
+            vnf_availability_zones = []
+            for vm in sce_vnf['vms']:
+                vm_av = vm.get('availability_zone')
+                if vm_av and vm_av not in vnf_availability_zones:
+                    vnf_availability_zones.append(vm_av)
+
+            # check if there is enough availability zones available at vim level.
+            if myvims[datacenter_id].availability_zone and vnf_availability_zones:
+                if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
+                    raise NfvoException('No enough availability zones at VIM for this deployment', HTTP_Bad_Request)
+
             if sce_vnf.get("datacenter"):
                 vim = myvims[ sce_vnf["datacenter"] ]
                 myvim_thread_id = myvim_threads_id[ sce_vnf["datacenter"] ]
@@ -2199,12 +2324,24 @@ def create_instance(mydb, tenant_id, instance_dict):
                 vim = myvims[ default_datacenter_id ]
                 myvim_thread_id = myvim_threads_id[ default_datacenter_id ]
                 datacenter_id = default_datacenter_id
-            sce_vnf["datacenter_id"] =  datacenter_id
+            sce_vnf["datacenter_id"] = datacenter_id
             i = 0
+
+            vnf_uuid = str(uuid4())
+            uuid_list.append(vnf_uuid)
+            db_instance_vnf = {
+                'uuid': vnf_uuid,
+                'instance_scenario_id': instance_uuid,
+                'vnf_id': sce_vnf['vnf_id'],
+                'sce_vnf_id': sce_vnf['uuid'],
+                'datacenter_id': datacenter_id,
+                'datacenter_tenant_id': myvim_thread_id,
+            }
+            db_instance_vnfs.append(db_instance_vnf)
+
             for vm in sce_vnf['vms']:
-                i += 1
                 myVMDict = {}
-                myVMDict['name'] = "{}.{}.{}".format(instance_name,sce_vnf['name'],chr(96+i))
+                myVMDict['name'] = "{}.{}.{}".format(instance_name[:64], sce_vnf['name'][:64], vm["name"][:64])
                 myVMDict['description'] = myVMDict['name'][0:99]
 #                if not startvms:
 #                    myVMDict['start'] = "no"
@@ -2237,9 +2374,11 @@ def create_instance(mydb, tenant_id, instance_dict):
                 vm['vim_flavor_id'] = flavor_id
                 myVMDict['imageRef'] = vm['vim_image_id']
                 myVMDict['flavorRef'] = vm['vim_flavor_id']
+                myVMDict['availability_zone'] = vm.get('availability_zone')
                 myVMDict['networks'] = []
                 task_depends = {}
                 #TODO ALF. connect_mgmt_interfaces. Connect management interfaces if this is true
+                db_vm_ifaces = []
                 for iface in vm['interfaces']:
                     netDict = {}
                     if iface['type']=="data":
@@ -2286,48 +2425,114 @@ def create_instance(mydb, tenant_id, instance_dict):
                             #print vnf_iface
                             if vnf_iface['interface_id']==iface['uuid']:
                                 netDict['net_id'] = auxNetDict['scenario'][ vnf_iface['sce_net_id'] ][datacenter_id]
+                                instance_net_id = sce_net2instance[ vnf_iface['sce_net_id'] ][datacenter_id]
                                 break
                     else:
                         netDict['net_id'] = auxNetDict[ sce_vnf['uuid'] ][ iface['net_id'] ]
+                        instance_net_id = vnf_net2instance[ sce_vnf['uuid'] ][ iface['net_id'] ]
                     if netDict.get('net_id') and is_task_id(netDict['net_id']):
                         task_depends[netDict['net_id']] = instance_tasks[netDict['net_id']]
                     #skip bridge ifaces not connected to any net
                     #if 'net_id' not in netDict or netDict['net_id']==None:
                     #    continue
                     myVMDict['networks'].append(netDict)
-                #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
-                #print myVMDict['name']
-                #print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
-                #print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
-                #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+                    db_vm_iface={
+                        # "uuid"
+                        # 'instance_vm_id': instance_vm_uuid,
+                        "instance_net_id": instance_net_id,
+                        'interface_id': iface['uuid'],
+                        # 'vim_interface_id': ,
+                        'type': 'external' if iface['external_name'] is not None else 'internal',
+                        'ip_address': iface.get('ip_address'),
+                        'floating_ip': int(iface.get('floating-ip', False)),
+                        'port_security': int(iface.get('port-security', True))
+                    }
+                    db_vm_ifaces.append(db_vm_iface)
+                # print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+                # print myVMDict['name']
+                # print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
+                # print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
+                # print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
                 if vm.get("boot_data"):
                     cloud_config_vm = unify_cloud_config(vm["boot_data"], cloud_config)
                 else:
                     cloud_config_vm = cloud_config
-                task = new_task("new-vm", (myVMDict['name'], myVMDict['description'], myVMDict.get('start', None),
-                                           myVMDict['imageRef'], myVMDict['flavorRef'], myVMDict['networks'],
-                                           cloud_config_vm, myVMDict['disks']), depends=task_depends)
-                instance_tasks[task["id"]] = task
-                tasks_to_launch[myvim_thread_id].append(task)
-                vm_id = task["id"]
-                vm['vim_id'] = vm_id
-                rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
-                #put interface uuid back to scenario[vnfs][vms[[interfaces]
-                for net in myVMDict['networks']:
-                    if "vim_id" in net:
-                        for iface in vm['interfaces']:
-                            if net["name"]==iface["internal_name"]:
-                                iface["vim_id"]=net["vim_id"]
-                                break
+                if myVMDict.get('availability_zone'):
+                    av_index = vnf_availability_zones.index(myVMDict['availability_zone'])
+                else:
+                    av_index = None
+                for vm_index in range(0, vm.get('count', 1)):
+                    vm_index_name = ""
+                    if vm.get('count', 1) > 1:
+                        vm_index_name += "." + chr(97 + vm_index)
+                    task = new_task("new-vm", (myVMDict['name']+vm_index_name, myVMDict['description'],
+                                               myVMDict.get('start', None), myVMDict['imageRef'],
+                                               myVMDict['flavorRef'], myVMDict['networks'],
+                                               cloud_config_vm, myVMDict['disks'], av_index,
+                                               vnf_availability_zones), depends=task_depends)
+                    instance_tasks[task["id"]] = task
+                    tasks_to_launch[myvim_thread_id].append(task)
+                    vm_id = task["id"]
+                    vm['vim_id'] = vm_id
+                    rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
+                    # put interface uuid back to scenario[vnfs][vms[[interfaces]
+                    for net in myVMDict['networks']:
+                        if "vim_id" in net:
+                            for iface in vm['interfaces']:
+                                if net["name"]==iface["internal_name"]:
+                                    iface["vim_id"]=net["vim_id"]
+                                    break
+                    vm_uuid = str(uuid4())
+                    uuid_list.append(vm_uuid)
+                    db_vm = {
+                        "uuid": vm_uuid,
+                        'instance_vnf_id': vnf_uuid,
+                        "vim_vm_id": vm_id,
+                        "vm_id": vm["uuid"],
+                        # "status":
+                    }
+                    db_instance_vms.append(db_vm)
+                    for db_vm_iface in db_vm_ifaces:
+                        iface_uuid = str(uuid4())
+                        uuid_list.append(iface_uuid)
+                        db_vm_iface_instance = {
+                            "uuid": iface_uuid,
+                            "instance_vm_id": vm_uuid
+                        }
+                        db_vm_iface_instance.update(db_vm_iface)
+                        if db_vm_iface_instance.get("ip_address"):  # increment ip_address
+                            ip = db_vm_iface_instance.get("ip_address")
+                            i = ip.rfind(".")
+                            if i > 0:
+                                try:
+                                    i += 1
+                                    ip = ip[i:] + str(int(ip[:i]) +1)
+                                    db_vm_iface_instance["ip_address"] = ip
+                                except:
+                                    db_vm_iface_instance["ip_address"] = None
+                        db_instance_interfaces.append(db_vm_iface_instance)
+
         scenarioDict["datacenter2tenant"] = myvim_threads_id
+
+        db_instance_scenario['datacenter_tenant_id'] = myvim_threads_id[default_datacenter_id]
+        db_instance_scenario['datacenter_id'] = default_datacenter_id
+        db_tables=[
+            {"instance_scenarios": db_instance_scenario},
+            {"instance_vnfs": db_instance_vnfs},
+            {"instance_nets": db_instance_nets},
+            {"ip_profiles": db_ip_profiles},
+            {"instance_vms": db_instance_vms},
+            {"instance_interfaces": db_instance_interfaces},
+        ]
+
         logger.debug("create_instance Deployment done scenarioDict: %s",
-                    yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False) )
-        instance_id = mydb.new_instance_scenario_as_a_whole(tenant_id,instance_name, instance_description, scenarioDict)
+                    yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
+        mydb.new_rows(db_tables, uuid_list)
         for myvim_thread_id,task_list in tasks_to_launch.items():
             for task in task_list:
                 vim_threads["running"][myvim_thread_id].insert_task(task)
 
-        global_instance_tasks[instance_id] = instance_tasks
+        global_instance_tasks[instance_uuid] = instance_tasks
         # Update database with those ended instance_tasks
         # for task in instance_tasks.values():
         #     if task["status"] == "ok":
@@ -2337,7 +2542,7 @@ def create_instance(mydb, tenant_id, instance_dict):
         #         elif task["name"] == "new-net":
         #             mydb.update_rows("instance_nets", UPDATE={"vim_net_id": task["result"]},
         #                              WHERE={"vim_net_id": task["id"]})
-        return mydb.get_instance_scenario(instance_id)
+        return mydb.get_instance_scenario(instance_uuid)
     except (NfvoException, vimconn.vimconnException,db_base_Exception)  as e:
         message = rollback(mydb, myvims, rollbackList)
         if isinstance(e, db_base_Exception):
@@ -2789,20 +2994,24 @@ def new_datacenter(mydb, datacenter_descriptor):
 
 
 def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
-    #obtain data, check that only one exist
+    # obtain data, check that only one exist
     datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id_name)
-    #edit data
+
+    # edit data
     datacenter_id = datacenter['uuid']
     where={'uuid': datacenter['uuid']}
+    remove_port_mapping = False
     if "config" in datacenter_descriptor:
-        if datacenter_descriptor['config']!=None:
+        if datacenter_descriptor['config'] != None:
             try:
                 new_config_dict = datacenter_descriptor["config"]
                 #delete null fields
                 to_delete=[]
                 for k in new_config_dict:
-                    if new_config_dict[k]==None:
+                    if new_config_dict[k] == None:
                         to_delete.append(k)
+                        if k == 'sdn-controller':
+                            remove_port_mapping = True
 
                 config_text = datacenter.get("config")
                 if not config_text:
@@ -2814,7 +3023,16 @@ def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
                     del config_dict[k]
             except Exception as e:
                 raise NfvoException("Bad format at datacenter:config " + str(e), HTTP_Bad_Request)
-        datacenter_descriptor["config"]= yaml.safe_dump(config_dict,default_flow_style=True,width=256) if len(config_dict)>0 else None
+        if config_dict:
+            datacenter_descriptor["config"] = yaml.safe_dump(config_dict, default_flow_style=True, width=256)
+        else:
+            datacenter_descriptor["config"] = None
+        if remove_port_mapping:
+            try:
+                datacenter_sdn_port_mapping_delete(mydb, None, datacenter_id)
+            except ovimException as e:
+                logger.error("Error deleting datacenter-port-mapping " + str(e))
+
     mydb.update_rows('datacenters', datacenter_descriptor, where)
     return datacenter_id
 
@@ -2823,6 +3041,10 @@ def delete_datacenter(mydb, datacenter):
     #get nfvo_tenant info
     datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter, 'datacenter')
     mydb.delete_row_by_id("datacenters", datacenter_dict['uuid'])
+    try:
+        datacenter_sdn_port_mapping_delete(mydb, None, datacenter_dict['uuid'])
+    except ovimException as e:
+        logger.error("Error deleting datacenter-port-mapping " + str(e))
     return datacenter_dict['uuid'] + " " + datacenter_dict['name']
 
 
index ac392c6..071d03a 100644 (file)
@@ -38,6 +38,7 @@ tables_with_createdat_field=["datacenters","instance_nets","instance_scenarios",
                            "interfaces","nets","nfvo_tenants","scenarios","sce_interfaces","sce_nets",
                            "sce_vnfs","tenants_datacenters","datacenter_tenants","vms","vnfs", "datacenter_nets"]
 
+
 class nfvo_db(db_base.db_base):
     def __init__(self, host=None, user=None, passwd=None, database=None, log_name='openmano.db', log_level=None):
         db_base.db_base.__init__(self, host, user, passwd, database, log_name, log_level)
@@ -585,15 +586,19 @@ class nfvo_db(db_base.db_base):
                     scenario_dict['vnfs'] = self.cur.fetchall()
                     for vnf in scenario_dict['vnfs']:
                         #sce_interfaces
-                        cmd = "SELECT scei.uuid,scei.sce_net_id,scei.interface_id,i.external_name,scei.ip_address FROM sce_interfaces as scei join interfaces as i on scei.interface_id=i.uuid WHERE scei.sce_vnf_id='{}' ORDER BY scei.created_at".format(vnf['uuid'])
+                        cmd = "SELECT scei.uuid,scei.sce_net_id,scei.interface_id,i.external_name,scei.ip_address"\
+                              " FROM sce_interfaces as scei join interfaces as i on scei.interface_id=i.uuid"\
+                              " WHERE scei.sce_vnf_id='{}' ORDER BY scei.created_at".format(vnf['uuid'])
                         self.logger.debug(cmd)
                         self.cur.execute(cmd)
                         vnf['interfaces'] = self.cur.fetchall()
                         #vms
-                        cmd = "SELECT vms.uuid as uuid, flavor_id, image_id, vms.name as name, vms.description as description, vms.boot_data as boot_data " \
-                                " FROM vnfs join vms on vnfs.uuid=vms.vnf_id " \
-                                " WHERE vnfs.uuid='" + vnf['vnf_id'] +"'"  \
-                                " ORDER BY vms.created_at"
+                        cmd = "SELECT vms.uuid as uuid, flavor_id, image_id, vms.name as name," \
+                              " vms.description as description, vms.boot_data as boot_data, count," \
+                              " vms.availability_zone as availability_zone" \
+                              " FROM vnfs join vms on vnfs.uuid=vms.vnf_id" \
+                              " WHERE vnfs.uuid='" + vnf['vnf_id'] + "'"  \
+                              " ORDER BY vms.created_at"
                         self.logger.debug(cmd)
                         self.cur.execute(cmd)
                         vnf['vms'] = self.cur.fetchall()
@@ -720,6 +725,43 @@ class nfvo_db(db_base.db_base):
                 self._format_error(e, tries, "delete", "instances running")
             tries -= 1
 
+    def new_rows(self, tables, uuid_list=None):
+        """
+        Make a transactional insertion of rows at several tables
+        :param tables: list with dictionary where the keys are the table names and the values are a row or row list
+            with the values to be inserted at the table. Each row is a dictionary with the key values. E.g.:
+            tables = [
+                {"table1": [ {"column1": value, "column2: value, ... }, {"column1": value, "column2: value, ... }, ...],
+                {"table2": [ {"column1": value, "column2: value, ... }, {"column1": value, "column2: value, ... }, ...],
+                {"table3": {"column1": value, "column2: value, ... }
+            }
+        :param uuid_list: list of created uuids, first one is the root (#TODO to store at uuid table)
+        :return: None if success,  raise exception otherwise
+        """
+        tries = 2
+        while tries:
+            created_time = time.time()
+            try:
+                with self.con:
+                    self.cur = self.con.cursor()
+                    for table in tables:
+                        for table_name, row_list in table.items():
+                            index = 0
+                            if isinstance(row_list, dict):
+                                row_list = (row_list, )  #create a list with the single value
+                            for row in row_list:
+                                if table_name in self.tables_with_created_field:
+                                    created_time_param = created_time + index*0.00001
+                                else:
+                                    created_time_param=0
+                                self._new_row_internal(table_name, row, add_uuid=False, root_uuid=None,
+                                                               created_time=created_time_param)
+                                index += 1
+                    return
+            except (mdb.Error, AttributeError) as e:
+                self._format_error(e, tries)
+            tries -= 1
+
     def new_instance_scenario_as_a_whole(self,tenant_id,instance_scenario_name,instance_scenario_description,scenarioDict):
         tries = 2
         while tries:
index 7d218ec..fb12d9f 100644 (file)
@@ -386,7 +386,7 @@ internal_connection_schema = {
         "name": name_schema,
         "description":description_schema,
         "type":{"type":"string", "enum":["bridge","data","ptp"]},
-        "elements": {"type" : "array", "items": internal_connection_element_schema, "minItems":2}
+        "elements": {"type" : "array", "items": internal_connection_element_schema, "minItems":1}
     },
     "required": ["name", "type", "elements"],
     "additionalProperties": False
@@ -400,7 +400,7 @@ internal_connection_schema_v02 = {
         "type": {"type": "string", "enum":["e-line", "e-lan"]},
         "implementation": {"type": "string", "enum":["overlay", "underlay"]},
         "ip-profile": ip_profile_schema,
-        "elements": {"type" : "array", "items": internal_connection_element_schema_v02, "minItems":2}
+        "elements": {"type" : "array", "items": internal_connection_element_schema_v02, "minItems":1}
     },
     "required": ["name", "type", "implementation", "elements"],
     "additionalProperties": False
@@ -541,8 +541,10 @@ vnfc_schema = {
     "properties":{
         "name": name_schema,
         "description": description_schema,
-        "VNFC image": {"oneOf": [path_schema, http_schema]},
+        "count": integer1_schema,
         "image name": name_schema,
+        "availability_zone": name_schema,
+        "VNFC image": {"oneOf": [path_schema, http_schema]},
         "image checksum": checksum_schema,
         "image metadata": metadata_schema, 
         #"cloud-config": cloud_config_schema, #common for all vnfs in the scenario
@@ -592,6 +594,7 @@ vnfd_schema_v01 = {
             "properties":{
                 "name": name_schema,
                 "description": description_schema,
+
                 "class": nameshort_schema,
                 "public": {"type" : "boolean"},
                 "physical": {"type" : "boolean"},
index 9f396a2..0074dfe 100644 (file)
@@ -144,8 +144,9 @@ class vim_thread(threading.Thread):
                             #delete old port
                             if task_interface.get("sdn_port_id"):
                                 try:
-                                    self.ovim.delete_port(task_interface["sdn_port_id"])
-                                    task_interface["sdn_port_id"] = None
+                                    with self.db_lock:
+                                        self.ovim.delete_port(task_interface["sdn_port_id"])
+                                        task_interface["sdn_port_id"] = None
                                 except ovimException as e:
                                     self.logger.error("ovimException deleting external_port={} ".format(
                                         task_interface["sdn_port_id"]) + str(e), exc_info=True)
@@ -174,11 +175,15 @@ class vim_thread(threading.Thread):
                                 continue
                             else:
                                 db_iface = db_ifaces[0]
-                                #If there is no sdn_net_id, check if it is because an already created vim network is being used
-                                #in that case, the sdn_net_id will be in that entry of the instance_nets table
+                                # If there is no sdn_net_id, check if it is because an already created vim network is being used
+                                # in that case, the sdn_net_id will be in that entry of the instance_nets table
                                 if not db_iface.get("sdn_net_id"):
-                                    result = self.db.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets',
-                                                                  WHERE={'vim_net_id': db_iface.get("vim_net_id"), 'instance_scenario_id': None, "datacenter_tenant_id":  self.datacenter_tenant_id})
+                                    with self.db_lock:
+                                        result = self.db.get_rows(
+                                            SELECT=('sdn_net_id',), FROM='instance_nets',
+                                            WHERE={'vim_net_id': db_iface.get("vim_net_id"),
+                                                   'instance_scenario_id': None,
+                                                   'datacenter_tenant_id':  self.datacenter_tenant_id})
                                     if len(result) == 1:
                                         db_iface["sdn_net_id"] = result[0]['sdn_net_id']
 
@@ -187,15 +192,16 @@ class vim_thread(threading.Thread):
                                     sdn_port_name = sdn_net_id + "." + db_iface["vm_id"]
                                     sdn_port_name = sdn_port_name[:63]
                                     try:
-                                        sdn_port_id = self.ovim.new_external_port(
-                                            {"compute_node": interface["compute_node"],
-                                             "pci": interface["pci"],
-                                             "vlan": interface.get("vlan"),
-                                             "net_id": sdn_net_id,
-                                             "region": self.vim["config"]["datacenter_id"],
-                                             "name": sdn_port_name,
-                                             "mac": interface.get("mac_address")})
-                                        interface["sdn_port_id"] = sdn_port_id
+                                        with self.db_lock:
+                                            sdn_port_id = self.ovim.new_external_port(
+                                                {"compute_node": interface["compute_node"],
+                                                 "pci": interface["pci"],
+                                                 "vlan": interface.get("vlan"),
+                                                 "net_id": sdn_net_id,
+                                                 "region": self.vim["config"]["datacenter_id"],
+                                                 "name": sdn_port_name,
+                                                 "mac": interface.get("mac_address")})
+                                            interface["sdn_port_id"] = sdn_port_id
                                     except (ovimException, Exception) as e:
                                         self.logger.error(
                                             "ovimException creating new_external_port compute_node={} " \
@@ -248,7 +254,8 @@ class vim_thread(threading.Thread):
                         if db_net.get("sdn_net_id"):
                             # get ovim status
                             try:
-                                sdn_net = self.ovim.show_network(db_net["sdn_net_id"])
+                                with self.db_lock:
+                                    sdn_net = self.ovim.show_network(db_net["sdn_net_id"])
                                 if sdn_net["status"] == "ERROR":
                                     if not vim_info.get("error_msg"):
                                         vim_info["error_msg"] = sdn_net["error_msg"]
@@ -421,7 +428,8 @@ class vim_thread(threading.Thread):
                             net_name, net_type, vim_net['encapsulation']))
                 network["vlan"] = vim_net.get('segmentation_id')
                 try:
-                    sdn_net_id = self.ovim.new_network(network)
+                    with self.db_lock:
+                        sdn_net_id = self.ovim.new_network(network)
                 except (ovimException, Exception) as e:
                     self.logger.error("task=%s cannot create SDN network vim_net_id=%s input='%s' ovimException='%s'",
                                       str(task_id), net_id, str(network), str(e))
@@ -531,7 +539,8 @@ class vim_thread(threading.Thread):
             for iface in interfaces:
                 if iface.get("sdn_port_id"):
                     try:
-                        self.ovim.delete_port(iface["sdn_port_id"])
+                        with self.db_lock:
+                            self.ovim.delete_port(iface["sdn_port_id"])
                     except ovimException as e:
                         self.logger.error("ovimException deleting external_port={} at VM vim_id={} deletion ".format(
                             iface["sdn_port_id"], vm_id) + str(e), exc_info=True)
@@ -559,17 +568,19 @@ class vim_thread(threading.Thread):
             self._remove_refresh("get-net", net_id)
             result = self.vim.delete_network(net_id)
             if sdn_net_id:
-                #Delete any attached port to this sdn network
-                #At this point, there will be ports associated to this network in case it was manually done using 'openmano vim-net-sdn-attach'
+                # Delete any attached port to this sdn network
+                # At this point, there will be ports associated to this network in case it was manually done using 'openmano vim-net-sdn-attach'
                 try:
-                    port_list = self.ovim.get_ports(columns={'uuid'}, filter={'name': 'external_port', 'net_id': sdn_net_id})
+                    with self.db_lock:
+                        port_list = self.ovim.get_ports(columns={'uuid'}, filter={'name': 'external_port', 'net_id': sdn_net_id})
                 except ovimException as e:
                     raise vimconn.vimconnException(
                         "ovimException obtaining external ports for net {}. ".format(sdn_net_id) + str(e))
 
                 for port in port_list:
                     try:
-                        self.ovim.delete_port(port['uuid'])
+                        with self.db_lock:
+                            self.ovim.delete_port(port['uuid'])
                     except ovimException as e:
                         raise vimconn.vimconnException(
                             "ovimException deleting port {} for net {}. ".format(port['uuid'], sdn_net_id) + str(e))
index 18f4334..7adaa36 100644 (file)
@@ -82,6 +82,7 @@ class vimconnNotImplemented(vimconnException):
     def __init__(self, message, http_code=HTTP_Not_Implemented):
         vimconnException.__init__(self, message, http_code)
 
+
 class vimconnector():
     """Abstract base class for all the VIM connector plugins
     These plugins must implement a vimconnector class derived from this 
@@ -115,6 +116,7 @@ class vimconnector():
         self.user      = user
         self.passwd    = passwd
         self.config    = config
+        self.availability_zone = None
         self.logger = logging.getLogger('openmano.vim')
         if log_level:
             self.logger.setLevel( getattr(logging, log_level) )
@@ -353,8 +355,8 @@ class vimconnector():
         """
         raise vimconnNotImplemented( "Should have implemented this" )
 
-    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None,
-                       disk_list=None):
+    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+        availability_zone_index=None, availability_zone_list=None):
         """Adds a VM instance to VIM
         Params:
             'start': (boolean) indicates if VM must start or created in pause mode.
@@ -385,7 +387,8 @@ class vimconnector():
                 'users': (optional) list of users to be inserted, each item is a dict with:
                     'name': (mandatory) user name,
                     'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) string is a text script to be passed directly to cloud-init
+                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
                 'config-files': (optional). List of files to be transferred. Each item is a dict with:
                     'dest': (mandatory) string with the destination absolute path
                     'encoding': (optional, by default text). Can be one of:
@@ -397,6 +400,9 @@ class vimconnector():
             'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
                 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
                 'size': (mandatory) string with the size of the disk in GB
+            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                availability_zone_index is None
         Returns the instance identifier or raises an exception on error
         """
         raise vimconnNotImplemented( "Should have implemented this" )
@@ -458,7 +464,7 @@ class vimconnector():
                 suffix:   extra text, e.g. the http path and query string   
         """
         raise vimconnNotImplemented( "Should have implemented this" )
-        
+
 #NOT USED METHODS in current version        
 
     def host_vim2gui(self, host, server_dict):
index f0eebb6..3cccbfc 100644 (file)
@@ -351,12 +351,12 @@ class vimconnector(vimconn.vimconnector):
             if filter_dict != {}:
                 if 'tenant_id' in filter_dict:
                     tfilters['vpcId'] = filter_dict['tenant_id']
-            subnets = self.conn_vpc.get_all_subnets(subnet_ids=filter_dict.get('id', None), filters=tfilters)
+            subnets = self.conn_vpc.get_all_subnets(subnet_ids=filter_dict.get('name', None), filters=tfilters)
             net_list = []
             for net in subnets:
                 net_list.append(
                     {'id': str(net.id), 'name': str(net.id), 'status': str(net.state), 'vpc_id': str(net.vpc_id),
-                     'cidr_block': str(net.cidr_block)})
+                     'cidr_block': str(net.cidr_block), 'type': 'bridge'})
             return net_list
         except Exception as e:
             self.format_vimconn_exception(e)
@@ -590,7 +590,7 @@ class vimconnector(vimconn.vimconnector):
             self.format_vimconn_exception(e)
 
     def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None,
-                       disk_list=None):
+                       disk_list=None, availability_zone_index=None, availability_zone_list=None):
         """Create a new VM/instance in AWS
         Params: name
                 decription
@@ -796,7 +796,10 @@ class vimconnector(vimconn.vimconnector):
                         interface_dict['vim_interface_id'] = interface.id
                         interface_dict['vim_net_id'] = interface.subnet_id
                         interface_dict['mac_address'] = interface.mac_address
-                        interface_dict['ip_address'] = interface.private_ip_address
+                        if hasattr(interface, 'publicIp') and interface.publicIp != None:
+                            interface_dict['ip_address'] = interface.publicIp + ";" + interface.private_ip_address
+                        else:
+                            interface_dict['ip_address'] = interface.private_ip_address
                         instance_dict['interfaces'].append(interface_dict)
                 except Exception as e:
                     self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
index b280da8..319f8c1 100644 (file)
@@ -35,6 +35,7 @@ import netaddr
 import time
 import yaml
 import random
+import sys
 import re
 
 from novaclient import client as nClient, exceptions as nvExceptions
@@ -51,8 +52,11 @@ from httplib import HTTPException
 from neutronclient.neutron import client as neClient
 from neutronclient.common import exceptions as neExceptions
 from requests.exceptions import ConnectionError
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
 
-'''contain the openstack virtual machine status to openmano status'''
+
+"""contain the openstack virtual machine status to openmano status"""
 vmStatus2manoFormat={'ACTIVE':'ACTIVE',
                      'PAUSED':'PAUSED',
                      'SUSPENDED': 'SUSPENDED',
@@ -65,7 +69,7 @@ netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE',
 
 #global var to have a timeout creating and deleting volumes
 volume_timeout = 60
-server_timeout = 60
+server_timeout = 300
 
 class vimconnector(vimconn.vimconnector):
     def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
@@ -94,6 +98,7 @@ class vimconnector(vimconn.vimconnector):
         if not url:
             raise TypeError, 'url param can not be NoneType'
         self.persistent_info = persistent_info
+        self.availability_zone = persistent_info.get('availability_zone', None)
         self.session = persistent_info.get('session', {'reload_client': True})
         self.nova = self.session.get('nova')
         self.neutron = self.session.get('neutron')
@@ -117,7 +122,7 @@ class vimconnector(vimconn.vimconnector):
             self.logger = logging.getLogger('openmano.vim.vio')
 
         if log_level:
-            self.logger.setLevel(getattr(logging, log_level))
+            self.logger.setLevel( getattr(logging, log_level))
 
     def __getitem__(self, index):
         """Get individuals parameters.
@@ -171,7 +176,16 @@ class vimconnector(vimconn.vimconnector):
             else:
                 self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type)
             self.session['keystone'] = self.keystone
-            self.nova = self.session['nova'] = nClient.Client("2.1", session=sess, endpoint_type=self.endpoint_type)
+            # In order to enable microversion functionality an explicit microversion must be specified in 'config'.
+            # This implementation approach is due to the warning message in
+            # https://developer.openstack.org/api-guide/compute/microversions.html
+            # where it is stated that microversion backwards compatibility is not guaranteed and clients should
+            # always require an specific microversion.
+            # To be able to use 'device role tagging' functionality define 'microversion: 2.32' in datacenter config
+            version = self.config.get("microversion")
+            if not version:
+                version = "2.1"
+            self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type)
             self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type)
             self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type)
             if self.endpoint_type == "internalURL":
@@ -185,6 +199,9 @@ class vimconnector(vimconn.vimconnector):
                                                                        endpoint=glance_endpoint)
             self.session['reload_client'] = False
             self.persistent_info['session'] = self.session
+            # add availablity zone info inside  self.persistent_info
+            self._set_availablity_zones()
+            self.persistent_info['availability_zone'] = self.availability_zone
 
     def __net_os2mano(self, net_list_dict):
         '''Transform the net openstack format to mano format
@@ -214,7 +231,10 @@ class vimconnector(vimconn.vimconnector):
             raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + str(exception))
         elif isinstance(exception, nvExceptions.Conflict):
             raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + str(exception))
+        elif isinstance(exception, vimconn.vimconnException):
+            raise
         else:  # ()
+            self.logger.error("General Exception " + str(exception), exc_info=True)
             raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + str(exception))
 
     def get_tenant_list(self, filter_dict={}):
@@ -308,19 +328,19 @@ class vimconnector(vimconn.vimconnector):
                 ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
             if 'ip_version' not in ip_profile: 
                 ip_profile['ip_version'] = "IPv4"
-            subnet={"name":net_name+"-subnet",
+            subnet = {"name":net_name+"-subnet",
                     "network_id": new_net["network"]["id"],
                     "ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6,
                     "cidr": ip_profile['subnet_address']
                     }
-            if 'gateway_address' in ip_profile:
-                subnet['gateway_ip'] = ip_profile['gateway_address']
+            # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
+            subnet['gateway_ip'] = ip_profile.get('gateway_address')
             if ip_profile.get('dns_address'):
                 subnet['dns_nameservers'] = ip_profile['dns_address'].split(";")
             if 'dhcp_enabled' in ip_profile:
                 subnet['enable_dhcp'] = False if ip_profile['dhcp_enabled']=="false" else True
             if 'dhcp_start_address' in ip_profile:
-                subnet['allocation_pools']=[]
+                subnet['allocation_pools'] = []
                 subnet['allocation_pools'].append(dict())
                 subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address']
             if 'dhcp_count' in ip_profile:
@@ -567,11 +587,11 @@ class vimconnector(vimconn.vimconnector):
                             #     if interface["dedicated"]=="yes":
                             #         raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
                             #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
-
-                #create flavor
-                new_flavor=self.nova.flavors.create(name,
-                                ram,
-                                vcpus,
+                                
+                #create flavor                 
+                new_flavor=self.nova.flavors.create(name, 
+                                ram, 
+                                vcpus, 
                                 flavor_data.get('disk',1),
                                 is_public=flavor_data.get('is_public', True)
                             )
@@ -608,7 +628,6 @@ class vimconnector(vimconn.vimconnector):
             metadata: metadata of the image
         Returns the image_id
         '''
-        # ALF TODO: revise and change for the new method or session
         retry=0
         max_retries=3
         while retry<max_retries:
@@ -715,8 +734,120 @@ class vimconnector(vimconn.vimconnector):
         except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
             self._format_exception(e)
 
+    @staticmethod
+    def _create_mimemultipart(content_list):
+        """Creates a MIMEmultipart text combining the content_list
+        :param content_list: list of text scripts to be combined
+        :return: str of the created MIMEmultipart. If the list is empty returns None, if the list contains only one
+        element MIMEmultipart is not created and this content is returned
+        """
+        if not content_list:
+            return None
+        elif len(content_list) == 1:
+            return content_list[0]
+        combined_message = MIMEMultipart()
+        for content in content_list:
+            if content.startswith('#include'):
+                format = 'text/x-include-url'
+            elif content.startswith('#include-once'):
+                format = 'text/x-include-once-url'
+            elif content.startswith('#!'):
+                format = 'text/x-shellscript'
+            elif content.startswith('#cloud-config'):
+                format = 'text/cloud-config'
+            elif content.startswith('#cloud-config-archive'):
+                format = 'text/cloud-config-archive'
+            elif content.startswith('#upstart-job'):
+                format = 'text/upstart-job'
+            elif content.startswith('#part-handler'):
+                format = 'text/part-handler'
+            elif content.startswith('#cloud-boothook'):
+                format = 'text/cloud-boothook'
+            else:  # by default
+                format = 'text/x-shellscript'
+            sub_message = MIMEText(content, format, sys.getdefaultencoding())
+            combined_message.attach(sub_message)
+        return combined_message.as_string()
+
+    def __wait_for_vm(self, vm_id, status):
+        """wait until vm is in the desired status and return True.
+        If the VM gets in ERROR status, return false.
+        If the timeout is reached generate an exception"""
+        elapsed_time = 0
+        while elapsed_time < server_timeout:
+            vm_status = self.nova.servers.get(vm_id).status
+            if vm_status == status:
+                return True
+            if vm_status == 'ERROR':
+                return False
+            time.sleep(1)
+            elapsed_time += 1
+
+        # if we exceeded the timeout rollback
+        if elapsed_time >= server_timeout:
+            raise vimconn.vimconnException('Timeout waiting for instance ' + vm_id + ' to get ' + status,
+                                           http_code=vimconn.HTTP_Request_Timeout)
+
+    def _get_openstack_availablity_zones(self):
+        """
+        Get from openstack availability zones available
+        :return:
+        """
+        try:
+            openstack_availability_zone = self.nova.availability_zones.list()
+            openstack_availability_zone = [str(zone.zoneName) for zone in openstack_availability_zone
+                                           if zone.zoneName != 'internal']
+            return openstack_availability_zone
+        except Exception as e:
+            return None
+
+    def _set_availablity_zones(self):
+        """
+        Set vim availablity zone
+        :return:
+        """
+
+        if 'availability_zone' in self.config:
+            vim_availability_zones = self.config.get('availability_zone')
+            if isinstance(vim_availability_zones, str):
+                self.availability_zone = [vim_availability_zones]
+            elif isinstance(vim_availability_zones, list):
+                self.availability_zone = vim_availability_zones
+        else:
+            self.availability_zone = self._get_openstack_availablity_zones()
 
-    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None,disk_list=None):
+    def _get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
+        """
+        Return thge availability zone to be used by the created VM.
+        :return: The VIM availability zone to be used or None
+        """
+        if availability_zone_index is None:
+            if not self.config.get('availability_zone'):
+                return None
+            elif isinstance(self.config.get('availability_zone'), str):
+                return self.config['availability_zone']
+            else:
+                # TODO consider using a different parameter at config for default AV and AV list match
+                return self.config['availability_zone'][0]
+
+        vim_availability_zones = self.availability_zone
+        # check if VIM offer enough availability zones describe in the VNFD
+        if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+            # check if all the names of NFV AV match VIM AV names
+            match_by_index = False
+            for av in availability_zone_list:
+                if av not in vim_availability_zones:
+                    match_by_index = True
+                    break
+            if match_by_index:
+                return vim_availability_zones[availability_zone_index]
+            else:
+                return availability_zone_list[availability_zone_index]
+        else:
+            raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
+
+    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+                       availability_zone_index=None, availability_zone_list=None):
         '''Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
@@ -731,25 +862,48 @@ class vimconnector(vimconn.vimconnector):
                 type: 'virtual', 'PF', 'VF', 'VFnotShared'
                 vim_id: filled/added by this function
                 floating_ip: True/False (or it can be None)
+                'cloud_config': (optional) dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) string is a text script to be passed directly to cloud-init
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                'size': (mandatory) string with the size of the disk in GB
+            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                availability_zone_index is None
                 #TODO ip, security groups
         Returns the instance identifier
         '''
         self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list))
         try:
+            server = None
             metadata={}
             net_list_vim=[]
-            external_network=[] #list of external networks to be connected to instance, later on used to create floating_ip
+            external_network=[]     # list of external networks to be connected to instance, later on used to create floating_ip
+            no_secured_ports = []   # List of port-is with port-security disabled
             self._reload_connection()
-            metadata_vpci = {} #For a specific neutron plugin
+            metadata_vpci={}   # For a specific neutron plugin
+            block_device_mapping = None
             for net in net_list:
                 if not net.get("net_id"): #skip non connected iface
                     continue
 
                 port_dict={
-                            "network_id": net["net_id"],
-                            "name": net.get("name"),
-                            "admin_state_up": True
-                        }
+                    "network_id": net["net_id"],
+                    "name": net.get("name"),
+                    "admin_state_up": True
+                }
                 if net["type"]=="virtual":
                     if "vpci" in net:
                         metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
@@ -781,15 +935,20 @@ class vimconnector(vimconn.vimconnector):
                     port_dict["name"]=name
                 if net.get("mac_address"):
                     port_dict["mac_address"]=net["mac_address"]
-                if net.get("port_security") == False:
-                    port_dict["port_security_enabled"]=net["port_security"]
-
                 new_port = self.neutron.create_port({"port": port_dict })
-
                 net["mac_adress"] = new_port["port"]["mac_address"]
                 net["vim_id"] = new_port["port"]["id"]
-                net["ip"] = new_port["port"].get("fixed_ips", [{}])[0].get("ip_address")
-                net_list_vim.append({"port-id": new_port["port"]["id"]})
+                # if try to use a network without subnetwork, it will return a emtpy list
+                fixed_ips = new_port["port"].get("fixed_ips")
+                if fixed_ips:
+                    net["ip"] = fixed_ips[0].get("ip_address")
+                else:
+                    net["ip"] = None
+
+                port = {"port-id": new_port["port"]["id"]}
+                if float(self.nova.api_version.get_string()) >= 2.32:
+                    port["tag"] = new_port["port"]["name"]
+                net_list_vim.append(port)
 
                 if net.get('floating_ip', False):
                     net['exit_on_floating_ip_error'] = True
@@ -798,6 +957,11 @@ class vimconnector(vimconn.vimconnector):
                     net['exit_on_floating_ip_error'] = False
                     external_network.append(net)
 
+                # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic is dropped.
+                # As a workaround we wait until the VM is active and then disable the port-security
+                if net.get("port_security") == False:
+                    no_secured_ports.append(new_port["port"]["id"])
+
             if metadata_vpci:
                 metadata = {"pci_assignement": json.dumps(metadata_vpci)}
                 if len(metadata["pci_assignement"]) >255:
@@ -805,24 +969,27 @@ class vimconnector(vimconn.vimconnector):
                     #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
                     self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
                     metadata = {}
-
+            
             self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s' metadata %s",
                               name, image_id, flavor_id, str(net_list_vim), description, str(metadata))
-
+            
             security_groups   = self.config.get('security_groups')
             if type(security_groups) is str:
                 security_groups = ( security_groups, )
             #cloud config
             userdata=None
             config_drive = None
+            userdata_list = []
             if isinstance(cloud_config, dict):
                 if cloud_config.get("user-data"):
-                    userdata=cloud_config["user-data"]
+                    if isinstance(cloud_config["user-data"], str):
+                        userdata_list.append(cloud_config["user-data"])
+                    else:
+                        for u in cloud_config["user-data"]:
+                            userdata_list.append(u)
                 if cloud_config.get("boot-data-drive") != None:
                     config_drive = cloud_config["boot-data-drive"]
                 if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"):
-                    if userdata:
-                        raise vimconn.vimconnConflictException("Cloud-config cannot contain both 'userdata' and 'config-files'/'users'/'key-pairs'")
                     userdata_dict={}
                     #default user
                     if cloud_config.get("key-pairs"):
@@ -856,17 +1023,17 @@ class vimconnector(vimconn.vimconnector):
                             if file.get("owner"):
                                 file_info["owner"] = file["owner"]
                             userdata_dict["write_files"].append(file_info)
-                    userdata = "#cloud-config\n"
-                    userdata += yaml.safe_dump(userdata_dict, indent=4, default_flow_style=False)
+                    userdata_list.append("#cloud-config\n" + yaml.safe_dump(userdata_dict, indent=4,
+                                                                              default_flow_style=False))
+                    userdata = self._create_mimemultipart(userdata_list)
                 self.logger.debug("userdata: %s", userdata)
             elif isinstance(cloud_config, str):
                 userdata = cloud_config
 
             #Create additional volumes in case these are present in disk_list
-            block_device_mapping = None
             base_disk_index = ord('b')
             if disk_list != None:
-                block_device_mapping = dict()
+                block_device_mapping = {}
                 for disk in disk_list:
                     if 'image_id' in disk:
                         volume = self.cinder.volumes.create(size = disk['size'],name = name + '_vd' +
@@ -902,34 +1069,45 @@ class vimconnector(vimconn.vimconnector):
 
                     raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
                                                    http_code=vimconn.HTTP_Request_Timeout)
-
+            # get availability Zone
+            vm_av_zone = self._get_vm_availability_zone(availability_zone_index, availability_zone_list)
+
+            self.logger.debug("nova.servers.create({}, {}, {}, nics={}, meta={}, security_groups={}, "
+                              "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
+                              "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim, metadata,
+                                                                security_groups, vm_av_zone, self.config.get('keypair'),
+                              userdata, config_drive, block_device_mapping))
             server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata,
                                               security_groups=security_groups,
-                                              availability_zone=self.config.get('availability_zone'),
+                                              availability_zone=vm_av_zone,
                                               key_name=self.config.get('keypair'),
                                               userdata=userdata,
-                                              config_drive = config_drive,
-                                              block_device_mapping = block_device_mapping
+                                              config_drive=config_drive,
+                                              block_device_mapping=block_device_mapping
                                               )  # , description=description)
+
+            # Previously mentioned workaround to wait until the VM is active and then disable the port-security
+            if no_secured_ports:
+                self.__wait_for_vm(server.id, 'ACTIVE')
+
+            for port_id in no_secured_ports:
+                try:
+                    self.neutron.update_port(port_id, {"port": {"port_security_enabled": False, "security_groups": None} })
+
+                except Exception as e:
+                    self.logger.error("It was not possible to disable port security for port {}".format(port_id))
+                    self.delete_vminstance(server.id)
+                    raise
+
             #print "DONE :-)", server
             pool_id = None
             floating_ips = self.neutron.list_floatingips().get("floatingips", ())
-            for floating_network in external_network:
-                try:
-                    # wait until vm is active
-                    elapsed_time = 0
-                    while elapsed_time < server_timeout:
-                        status = self.nova.servers.get(server.id).status
-                        if status == 'ACTIVE':
-                            break
-                        time.sleep(1)
-                        elapsed_time += 1
 
-                    #if we exceeded the timeout rollback
-                    if elapsed_time >= server_timeout:
-                        raise vimconn.vimconnException('Timeout creating instance ' + name,
-                                                       http_code=vimconn.HTTP_Request_Timeout)
+            if external_network:
+                self.__wait_for_vm(server.id, 'ACTIVE')
 
+            for floating_network in external_network:
+                try:
                     assigned = False
                     while(assigned == False):
                         if floating_ips:
@@ -973,26 +1151,31 @@ class vimconnector(vimconn.vimconnector):
                     if not floating_network['exit_on_floating_ip_error']:
                         self.logger.warn("Cannot create floating_ip. %s", str(e))
                         continue
-                    self.delete_vminstance(server.id)
                     raise
 
             return server.id
 #        except nvExceptions.NotFound as e:
 #            error_value=-vimconn.HTTP_Not_Found
 #            error_text= "vm instance %s not found" % vm_id
-        except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+#        except TypeError as e:
+#            raise vimconn.vimconnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
+
+        except Exception as e:
             # delete the volumes we just created
-            if block_device_mapping != None:
+            if block_device_mapping:
                 for volume_id in block_device_mapping.itervalues():
                     self.cinder.volumes.delete(volume_id)
 
-            # delete ports we just created
-            for net_item in net_list_vim:
-                if 'port-id' in net_item:
-                    self.neutron.delete_port(net_item['port-id'])
+            # Delete the VM
+            if server != None:
+                self.delete_vminstance(server.id)
+            else:
+                # delete ports we just created
+                for net_item in net_list_vim:
+                    if 'port-id' in net_item:
+                        self.neutron.delete_port(net_item['port-id'])
+
             self._format_exception(e)
-        except TypeError as e:
-            raise vimconn.vimconnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
 
     def get_vminstance(self,vm_id):
         '''Returns the VM instance information from VIM'''
@@ -1449,4 +1632,6 @@ class vimconnector(vimconn.vimconnector):
         if self.debug:
             print "get_hosts " + error_text
         return error_value, error_text        
+  
+
 
index e722dc8..abfffbe 100644 (file)
@@ -785,7 +785,8 @@ class vimconnector(vimconn.vimconnector):
             #print text
             return -vim_response.status_code,text
 
-    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list, cloud_config=None, disk_list=None):
+    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+                       availability_zone_index=None, availability_zone_list=None):
         '''Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
index a33ca4e..9faf8b4 100644 (file)
@@ -357,6 +357,11 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return vca object that letter can be used to connect to vcloud direct as admin
         """
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+        self.vca = vca
         try:
             if self.org_uuid is None:
                 org_dict = self.get_org_list()
@@ -422,9 +427,61 @@ class vimconnector(vimconn.vimconnector):
             raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
 
     def delete_tenant(self, tenant_id=None):
-        """Delete a tenant from VIM"""
-        'Returns the tenant identifier'
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
+        """ Delete a tenant from VIM
+             Args:
+                tenant_id is tenant_id to be deleted.
+
+            Return:
+                returns the tenant identifier in UUID format.
+                If action is failed method will throw exception
+        """
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+        if tenant_id is not None:
+            if vca.vcloud_session and vca.vcloud_session.organization:
+                #Get OrgVDC
+                url_list = [self.vca.host, '/api/vdc/', tenant_id]
+                orgvdc_herf = ''.join(url_list)
+                response = Http.get(url=orgvdc_herf,
+                                headers=vca.vcloud_session.get_vcloud_headers(),
+                                verify=vca.verify,
+                                logger=vca.logger)
+
+                if response.status_code != requests.codes.ok:
+                    self.logger.debug("delete_tenant():GET REST API call {} failed. "\
+                                      "Return status code {}".format(orgvdc_herf,
+                                                                     response.status_code))
+                    raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
+
+                lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+                namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+                namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+                vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
+                vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
+
+                #Delete OrgVDC
+                response = Http.delete(url=vdc_remove_href,
+                                    headers=vca.vcloud_session.get_vcloud_headers(),
+                                    verify=vca.verify,
+                                    logger=vca.logger)
+
+                if response.status_code == 202:
+                        delete_vdc_task = taskType.parseString(response.content, True)
+                        if type(delete_vdc_task) is GenericTask:
+                            self.vca.block_until_completed(delete_vdc_task)
+                            self.logger.info("Deleted tenant with ID {}".format(tenant_id))
+                            return tenant_id
+                else:
+                    self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
+                                      "Return status code {}".format(vdc_remove_href,
+                                                                     response.status_code))
+                    raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
+        else:
+            self.logger.debug("delete_tenant():Incorrect tenant ID  {}".format(tenant_id))
+            raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
+
 
     def get_tenant_list(self, filter_dict={}):
         """Obtain tenants of VIM
@@ -495,19 +552,16 @@ class vimconnector(vimconn.vimconnector):
         """
 
         self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
 
         if not self.tenant_name:
             raise vimconn.vimconnConnectionException("Tenant name is empty.")
 
-        vdc = vca.get_vdc(self.tenant_name)
+        vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
 
         vdc_uuid = vdc.get_id().split(":")[3]
-        networks = vca.get_networks(vdc.get_name())
+        networks = self.vca.get_networks(vdc.get_name())
         network_list = []
         try:
             for network in networks:
@@ -553,21 +607,18 @@ class vimconnector(vimconn.vimconnector):
             List can be empty
         """
 
-        self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+        self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
 
         if not self.tenant_name:
             raise vimconn.vimconnConnectionException("Tenant name is empty.")
 
-        vdc = vca.get_vdc(self.tenant_name)
+        vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
 
         try:
             vdcid = vdc.get_id().split(":")[3]
-            networks = vca.get_networks(vdc.get_name())
+            networks = self.vca.get_networks(vdc.get_name())
             network_list = []
 
             for network in networks:
@@ -613,15 +664,11 @@ class vimconnector(vimconn.vimconnector):
         """Method obtains network details of net_id VIM network
            Return a dict with  the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
-
         try:
-            vdc = vca.get_vdc(self.tenant_name)
+            vdc = self.get_vdc_details()
             vdc_id = vdc.get_id().split(":")[3]
 
-            networks = vca.get_networks(vdc.get_name())
+            networks = self.vca.get_networks(vdc.get_name())
             filter_dict = {}
 
             for network in networks:
@@ -652,10 +699,6 @@ class vimconnector(vimconn.vimconnector):
             Returns the network identifier or raise an exception
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() for tenant {} is failed.".format(self.tenant_name))
-
         # ############# Stub code for SRIOV #################
 #         dvport_group = self.get_dvport_group(net_id)
 #         if dvport_group:
@@ -693,10 +736,6 @@ class vimconnector(vimconn.vimconnector):
 
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
-
         dict_entry = {}
         try:
             for net in net_list:
@@ -754,6 +793,13 @@ class vimconnector(vimconn.vimconnector):
         cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
         disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
 
+        if not isinstance(ram, int):
+            raise vimconn.vimconnException("Non-integer value for ram")
+        elif not isinstance(cpu, int):
+            raise vimconn.vimconnException("Non-integer value for cpu")
+        elif not isinstance(disk, int):
+            raise vimconn.vimconnException("Non-integer value for disk")
+
         extended_flv = flavor_data.get("extended")
         if extended_flv:
             numas=extended_flv.get("numas")
@@ -801,12 +847,81 @@ class vimconnector(vimconn.vimconnector):
 
     def delete_image(self, image_id):
         """
-
-        :param image_id:
-        :return:
+            Deletes a tenant image from VIM
+            Args:
+                image_id is ID of Image to be deleted
+            Return:
+                returns the image identifier in UUID format or raises an exception on error
         """
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed")
+        # Get Catalog details
+        url_list = [self.vca.host, '/api/catalog/', image_id]
+        catalog_herf = ''.join(url_list)
+        response = Http.get(url=catalog_herf,
+                            headers=vca.vcloud_session.get_vcloud_headers(),
+                            verify=vca.verify,
+                            logger=vca.logger)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.debug("delete_image():GET REST API call {} failed. "\
+                              "Return status code {}".format(catalog_herf,
+                                                             response.status_code))
+            raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
+
+        lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+        namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+        namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+        catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
+        catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
+        for catalogItem in catalogItems:
+            catalogItem_href = catalogItem.attrib['href']
+
+            #GET details of catalogItem
+            response = Http.get(url=catalogItem_href,
+                            headers=vca.vcloud_session.get_vcloud_headers(),
+                            verify=vca.verify,
+                            logger=vca.logger)
+
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("delete_image():GET REST API call {} failed. "\
+                                  "Return status code {}".format(catalog_herf,
+                                                                 response.status_code))
+                raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
+                                                                                    catalogItem,
+                                                                                    image_id))
+
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+            catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
+
+            #Remove catalogItem
+            response = Http.delete(url= catalogitem_remove_href,
+                                    headers=vca.vcloud_session.get_vcloud_headers(),
+                                    verify=vca.verify,
+                                    logger=vca.logger)
+            if response.status_code == requests.codes.no_content:
+                self.logger.debug("Deleted Catalog item {}".format(catalogItem))
+            else:
+                raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
+
+        #Remove catalog
+        url_list = [self.vca.host, '/api/admin/catalog/', image_id]
+        catalog_remove_herf = ''.join(url_list)
+        response = Http.delete(url= catalog_remove_herf,
+                                    headers=vca.vcloud_session.get_vcloud_headers(),
+                                    verify=vca.verify,
+                                    logger=vca.logger)
+
+        if response.status_code == requests.codes.no_content:
+            self.logger.debug("Deleted Catalog {}".format(image_id))
+            return image_id
+        else:
+            raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
 
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
 
     def catalog_exists(self, catalog_name, catalogs):
         """
@@ -1077,9 +1192,6 @@ class vimconnector(vimconn.vimconnector):
 
         Return: if image uploaded correct method will provide image catalog UUID.
         """
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
 
         if not path:
             raise vimconn.vimconnException("Image path can't be None.")
@@ -1104,21 +1216,21 @@ class vimconnector(vimconn.vimconnector):
                           "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
 
         try:
-            catalogs = vca.get_catalogs()
+            catalogs = self.vca.get_catalogs()
         except Exception as exp:
             self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
             raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
 
         if len(catalogs) == 0:
             self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
-            result = self.create_vimcatalog(vca, catalog_md5_name)
+            result = self.create_vimcatalog(self.vca, catalog_md5_name)
             if not result:
                 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
-            result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
+            result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
                                           media_name=filename, medial_file_name=path, progress=progress)
             if not result:
                 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
-            return self.get_catalogid(catalog_name, vca.get_catalogs())
+            return self.get_catalogid(catalog_name, self.vca.get_catalogs())
         else:
             for catalog in catalogs:
                 # search for existing catalog if we find same name we return ID
@@ -1127,20 +1239,20 @@ class vimconnector(vimconn.vimconnector):
                     self.logger.debug("Found existing catalog entry for {} "
                                       "catalog id {}".format(catalog_name,
                                                              self.get_catalogid(catalog_md5_name, catalogs)))
-                    return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
+                    return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
 
         # if we didn't find existing catalog we create a new one and upload image.
         self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
-        result = self.create_vimcatalog(vca, catalog_md5_name)
+        result = self.create_vimcatalog(self.vca, catalog_md5_name)
         if not result:
             raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
 
-        result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
+        result = self.upload_vimimage(vca=self.vca, catalog_name=catalog_md5_name,
                                       media_name=filename, medial_file_name=path, progress=progress)
         if not result:
             raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
 
-        return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
+        return self.get_catalogid(catalog_md5_name, self.vca.get_catalogs())
 
     def get_image_list(self, filter_dict={}):
         '''Obtain tenant images from VIM
@@ -1153,12 +1265,10 @@ class vimconnector(vimconn.vimconnector):
             [{<the fields at Filter_dict plus some VIM specific>}, ...]
             List can be empty
         '''
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
         try:
             image_list = []
-            catalogs = vca.get_catalogs()
+            catalogs = self.vca.get_catalogs()
             if len(catalogs) == 0:
                 return image_list
             else:
@@ -1229,7 +1339,7 @@ class vimconnector(vimconn.vimconnector):
             return False
         return False
 
-    def get_namebyvappid(self, vca=None, vdc=None, vapp_uuid=None):
+    def get_namebyvappid(self, vdc=None, vapp_uuid=None):
         """Method returns vApp name from vCD and lookup done by vapp_id.
 
         Args:
@@ -1248,8 +1358,13 @@ class vimconnector(vimconn.vimconnector):
                 # we care only about UUID the rest doesn't matter
                 vappid = ref.href.split("vapp")[1][1:]
                 if vappid == vapp_uuid:
-                    response = Http.get(ref.href, headers=vca.vcloud_session.get_vcloud_headers(), verify=vca.verify,
+                    response = Http.get(ref.href, headers=self.vca.vcloud_session.get_vcloud_headers(), verify=self.vca.verify,
                                         logger=self.logger)
+
+                    #Retry login if session expired & retry sending request
+                    if response.status_code == 403:
+                        response = self.retry_rest('GET', ref.href)
+
                     tree = XmlElementTree.fromstring(response.content)
                     return tree.attrib['name']
         except Exception as e:
@@ -1258,7 +1373,7 @@ class vimconnector(vimconn.vimconnector):
         return None
 
     def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={},
-                       cloud_config=None, disk_list=None):
+                       cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
         """Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
@@ -1284,9 +1399,6 @@ class vimconnector(vimconn.vimconnector):
         self.logger.info("Creating new instance for entry {}".format(name))
         self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
                                     description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
 
         #new vm name = vmname + tenant_id + uuid
         new_vm_name = [name, '-', str(uuid.uuid4())]
@@ -1298,11 +1410,15 @@ class vimconnector(vimconn.vimconnector):
         #     return vapp_uuid
 
         # we check for presence of VDC, Catalog entry and Flavor.
-        vdc = vca.get_vdc(self.tenant_name)
+        vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnNotFoundException(
                 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
-        catalogs = vca.get_catalogs()
+        catalogs = self.vca.get_catalogs()
+        if catalogs is None:
+            #Retry once, if failed by refreshing token
+            self.get_token()
+            catalogs = self.vca.get_catalogs()
         if catalogs is None:
             raise vimconn.vimconnNotFoundException(
                 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
@@ -1319,6 +1435,7 @@ class vimconnector(vimconn.vimconnector):
         vm_cpus = None
         vm_memory = None
         vm_disk = None
+        numas = None
 
         if flavor_id is not None:
             if flavor_id not in vimconnector.flavorlist:
@@ -1371,19 +1488,26 @@ class vimconnector(vimconn.vimconnector):
         # use: 'data', 'bridge', 'mgmt'
         # create vApp.  Set vcpu and ram based on flavor id.
         try:
-            vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
-                                       self.get_catalogbyid(image_id, catalogs),
-                                       network_name=None,  # None while creating vapp
-                                       network_mode=network_mode,
-                                       vm_name=vmname_andid,
-                                       vm_cpus=vm_cpus,  # can be None if flavor is None
-                                       vm_memory=vm_memory)  # can be None if flavor is None
+            for retry in (1,2):
+                vapptask = self.vca.create_vapp(self.tenant_name, vmname_andid, templateName,
+                                           self.get_catalogbyid(image_id, catalogs),
+                                           network_name=None,  # None while creating vapp
+                                           network_mode=network_mode,
+                                           vm_name=vmname_andid,
+                                           vm_cpus=vm_cpus,  # can be None if flavor is None
+                                           vm_memory=vm_memory)  # can be None if flavor is None
+
+                if not vapptask and retry==1:
+                    self.get_token() # Retry getting token
+                    continue
+                else:
+                    break
 
             if vapptask is None or vapptask is False:
                 raise vimconn.vimconnUnexpectedResponse(
                     "new_vminstance(): failed to create vApp {}".format(vmname_andid))
             if type(vapptask) is VappTask:
-                vca.block_until_completed(vapptask)
+                self.vca.block_until_completed(vapptask)
 
         except Exception as exp:
             raise vimconn.vimconnUnexpectedResponse(
@@ -1391,14 +1515,14 @@ class vimconnector(vimconn.vimconnector):
 
         # we should have now vapp in undeployed state.
         try:
-            vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
-            vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+            vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
+
         except Exception as exp:
             raise vimconn.vimconnUnexpectedResponse(
                     "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
                     .format(vmname_andid, exp))
 
-        if vapp is None:
+        if vapp_uuid is None:
             raise vimconn.vimconnUnexpectedResponse(
                 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
                                                                             vmname_andid))
@@ -1433,6 +1557,8 @@ class vimconnector(vimconn.vimconnector):
                                                             pci_devices_info,
                                                             vmname_andid)
                                  )
+
+        vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
         # Modify vm disk
         if vm_disk:
             #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
@@ -1464,7 +1590,7 @@ class vimconnector(vimconn.vimconnector):
                     if added_existing_disk:
                         time.sleep(5)
                         added_existing_disk = False
-                    self.add_new_disk(vca, vapp_uuid, disk['size'])
+                    self.add_new_disk(vapp_uuid, disk['size'])
 
         if numas:
             # Assigning numa affinity setting
@@ -1500,12 +1626,14 @@ class vimconnector(vimconn.vimconnector):
                                   - NONE (No IP addressing mode specified.)"""
 
                 if primary_netname is not None:
-                    nets = filter(lambda n: n.name == interface_net_name, vca.get_networks(self.tenant_name))
+                    nets = filter(lambda n: n.name == interface_net_name, self.vca.get_networks(self.tenant_name))
                     if len(nets) == 1:
                         self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
+
+                        vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
                         task = vapp.connect_to_network(nets[0].name, nets[0].href)
                         if type(task) is GenericTask:
-                            vca.block_until_completed(task)
+                            self.vca.block_until_completed(task)
                         # connect network to VM - with all DHCP by default
 
                         type_list = ['PF','VF','VFnotShared']
@@ -1529,6 +1657,7 @@ class vimconnector(vimconn.vimconnector):
                                                                 net)
                 nicIndex += 1
 
+            vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
             # cloud-init for ssh-key injection
             if cloud_config:
                 self.cloud_init(vapp,cloud_config)
@@ -1537,7 +1666,7 @@ class vimconnector(vimconn.vimconnector):
             self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
             deploytask = vapp.deploy(powerOn=False)
             if type(deploytask) is GenericTask:
-                vca.block_until_completed(deploytask)
+                self.vca.block_until_completed(deploytask)
 
         # ############# Stub code for SRIOV #################
         #Add SRIOV
@@ -1574,28 +1703,32 @@ class vimconnector(vimconn.vimconnector):
                                                                 str(memReserve),str(vm_obj)))
 
             self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
+
+            vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
             poweron_task = vapp.poweron()
             if type(poweron_task) is GenericTask:
-                vca.block_until_completed(poweron_task)
+                self.vca.block_until_completed(poweron_task)
 
         except Exception as exp :
             # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
-            self.logger.debug("new_vminstance(): Failed create new vm instance {}".format(name, exp))
-            raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {}".format(name, exp))
+            self.logger.debug("new_vminstance(): Failed create new vm instance {} with exception {}"
+                              .format(name, exp))
+            raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
+                                           .format(name, exp))
 
         # check if vApp deployed and if that the case return vApp UUID otherwise -1
         wait_time = 0
         vapp_uuid = None
         while wait_time <= MAX_WAIT_TIME:
             try:
-                vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+                vapp = self.vca.get_vapp(self.get_vdc_details(), vmname_andid)
             except Exception as exp:
                 raise vimconn.vimconnUnexpectedResponse(
                         "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
                         .format(vmname_andid, exp))
 
             if vapp and vapp.me.deployed:
-                vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+                vapp_uuid = self.get_vappid(self.get_vdc_details(), vmname_andid)
                 break
             else:
                 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
@@ -1627,11 +1760,8 @@ class vimconnector(vimconn.vimconnector):
         """Returns the VM instance information from VIM"""
 
         self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
 
-        vdc = vca.get_vdc(self.tenant_name)
+        vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnConnectionException(
                 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
@@ -1676,11 +1806,8 @@ class vimconnector(vimconn.vimconnector):
         """
 
         self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
 
-        vdc = vca.get_vdc(self.tenant_name)
+        vdc = self.get_vdc_details()
         if vdc is None:
             self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
                 self.tenant_name))
@@ -1688,7 +1815,7 @@ class vimconnector(vimconn.vimconnector):
                 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
 
         try:
-            vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
+            vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
             if vapp_name is None:
                 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
                 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
@@ -1696,7 +1823,7 @@ class vimconnector(vimconn.vimconnector):
                 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
 
             # Delete vApp and wait for status change if task executed and vApp is None.
-            vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+            vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
 
             if vapp:
                 if vapp.me.deployed:
@@ -1705,14 +1832,14 @@ class vimconnector(vimconn.vimconnector):
                     powered_off = False
                     wait_time = 0
                     while wait_time <= MAX_WAIT_TIME:
-                        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+                        vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
                         if not vapp:
                             self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
                             return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
 
                         power_off_task = vapp.poweroff()
                         if type(power_off_task) is GenericTask:
-                            result = vca.block_until_completed(power_off_task)
+                            result = self.vca.block_until_completed(power_off_task)
                             if result:
                                 powered_off = True
                                 break
@@ -1731,14 +1858,14 @@ class vimconnector(vimconn.vimconnector):
                     wait_time = 0
                     undeployed = False
                     while wait_time <= MAX_WAIT_TIME:
-                        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+                        vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
                         if not vapp:
                             self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
                             return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
                         undeploy_task = vapp.undeploy(action='powerOff')
 
                         if type(undeploy_task) is GenericTask:
-                            result = vca.block_until_completed(undeploy_task)
+                            result = self.vca.block_until_completed(undeploy_task)
                             if result:
                                 undeployed = True
                                 break
@@ -1753,14 +1880,14 @@ class vimconnector(vimconn.vimconnector):
 
                 # delete vapp
                 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
-                vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+                vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
 
                 if vapp is not None:
                     wait_time = 0
                     result = False
 
                     while wait_time <= MAX_WAIT_TIME:
-                        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+                        vapp = self.vca.get_vapp(self.get_vdc_details(), vapp_name)
                         if not vapp:
                             self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
                             return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
@@ -1768,8 +1895,8 @@ class vimconnector(vimconn.vimconnector):
                         delete_task = vapp.delete()
 
                         if type(delete_task) is GenericTask:
-                            vca.block_until_completed(delete_task)
-                            result = vca.block_until_completed(delete_task)
+                            self.vca.block_until_completed(delete_task)
+                            result = self.vca.block_until_completed(delete_task)
                             if result:
                                 break
                         else:
@@ -1785,7 +1912,7 @@ class vimconnector(vimconn.vimconnector):
             self.logger.debug(traceback.format_exc())
             raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
 
-        if vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) is None:
+        if self.vca.get_vapp(self.get_vdc_details(), vapp_name) is None:
             self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
             return vm__vim_uuid
         else:
@@ -1817,25 +1944,21 @@ class vimconnector(vimconn.vimconnector):
 
         self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
-
-        vdc = vca.get_vdc(self.tenant_name)
+        vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
 
         vms_dict = {}
         nsx_edge_list = []
         for vmuuid in vm_list:
-            vmname = self.get_namebyvappid(vca, vdc, vmuuid)
+            vmname = self.get_namebyvappid(self.get_vdc_details(), vmuuid)
             if vmname is not None:
 
                 try:
-                    the_vapp = vca.get_vapp(vdc, vmname)
+                    vm_pci_details = self.get_vm_pci_details(vmuuid)
+                    the_vapp = self.vca.get_vapp(self.get_vdc_details(), vmname)
                     vm_info = the_vapp.get_vms_details()
                     vm_status = vm_info[0]['status']
-                    vm_pci_details = self.get_vm_pci_details(vmuuid)
                     vm_info[0].update(vm_pci_details)
 
                     vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
@@ -1984,15 +2107,11 @@ class vimconnector(vimconn.vimconnector):
         if vm__vim_uuid is None or action_dict is None:
             raise vimconn.vimconnException("Invalid request. VM id or action is None.")
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
-
-        vdc = vca.get_vdc(self.tenant_name)
+        vdc = self.get_vdc_details()
         if vdc is None:
             return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
 
-        vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
+        vapp_name = self.get_namebyvappid(vdc, vm__vim_uuid)
         if vapp_name is None:
             self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
             raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
@@ -2000,7 +2119,7 @@ class vimconnector(vimconn.vimconnector):
             self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
 
         try:
-            the_vapp = vca.get_vapp(vdc, vapp_name)
+            the_vapp = self.vca.get_vapp(vdc, vapp_name)
             # TODO fix all status
             if "start" in action_dict:
                 vm_info = the_vapp.get_vms_details()
@@ -2008,28 +2127,28 @@ class vimconnector(vimconn.vimconnector):
                 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
                 if vm_status == "Suspended" or vm_status == "Powered off":
                     power_on_task = the_vapp.poweron()
-                    result = vca.block_until_completed(power_on_task)
+                    result = self.vca.block_until_completed(power_on_task)
                     self.instance_actions_result("start", result, vapp_name)
             elif "rebuild" in action_dict:
                 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
                 rebuild_task = the_vapp.deploy(powerOn=True)
-                result = vca.block_until_completed(rebuild_task)
+                result = self.vca.block_until_completed(rebuild_task)
                 self.instance_actions_result("rebuild", result, vapp_name)
             elif "pause" in action_dict:
                 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
                 pause_task = the_vapp.undeploy(action='suspend')
-                result = vca.block_until_completed(pause_task)
+                result = self.vca.block_until_completed(pause_task)
                 self.instance_actions_result("pause", result, vapp_name)
             elif "resume" in action_dict:
                 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
                 power_task = the_vapp.poweron()
-                result = vca.block_until_completed(power_task)
+                result = self.vca.block_until_completed(power_task)
                 self.instance_actions_result("resume", result, vapp_name)
             elif "shutoff" in action_dict or "shutdown" in action_dict:
                 action_name , value = action_dict.items()[0]
                 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
                 power_off_task = the_vapp.undeploy(action='powerOff')
-                result = vca.block_until_completed(power_off_task)
+                result = self.vca.block_until_completed(power_off_task)
                 if action_name == "shutdown":
                     self.instance_actions_result("shutdown", result, vapp_name)
                 else:
@@ -2126,10 +2245,6 @@ class vimconnector(vimconn.vimconnector):
             The return network name.
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
-
         if not network_uuid:
             return None
 
@@ -2156,10 +2271,6 @@ class vimconnector(vimconn.vimconnector):
             network_uuid: network_id
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
-
         if not network_name:
             self.logger.debug("get_network_id_by_name() : Network name is empty")
             return None
@@ -2189,18 +2300,18 @@ class vimconnector(vimconn.vimconnector):
                 The return XML respond
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
-
-        url_list = [vca.host, '/api/org']
+        url_list = [self.vca.host, '/api/org']
         vm_list_rest_call = ''.join(url_list)
 
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+        if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
             response = Http.get(url=vm_list_rest_call,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+                                headers=self.vca.vcloud_session.get_vcloud_headers(),
+                                verify=self.vca.verify,
+                                logger=self.vca.logger)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
+
             if response.status_code == requests.codes.ok:
                 return response.content
 
@@ -2218,21 +2329,22 @@ class vimconnector(vimconn.vimconnector):
                 The return XML respond
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
-
         if org_uuid is None:
             return None
 
-        url_list = [vca.host, '/api/org/', org_uuid]
+        url_list = [self.vca.host, '/api/org/', org_uuid]
         vm_list_rest_call = ''.join(url_list)
 
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+        if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
             response = Http.get(url=vm_list_rest_call,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+                                headers=self.vca.vcloud_session.get_vcloud_headers(),
+                                verify=self.vca.verify,
+                                logger=self.vca.logger)
+
+            #Retry login if session expired & retry sending request
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
+
             if response.status_code == requests.codes.ok:
                 return response.content
 
@@ -2253,9 +2365,6 @@ class vimconnector(vimconn.vimconnector):
         """
 
         org_dict = {}
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
 
         if org_uuid is None:
             return org_dict
@@ -2293,9 +2402,6 @@ class vimconnector(vimconn.vimconnector):
         """
 
         org_dict = {}
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
 
         content = self.list_org_action()
         try:
@@ -2467,21 +2573,22 @@ class vimconnector(vimconn.vimconnector):
                 The return XML respond
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
-
         if network_uuid is None:
             return None
 
-        url_list = [vca.host, '/api/network/', network_uuid]
+        url_list = [self.vca.host, '/api/network/', network_uuid]
         vm_list_rest_call = ''.join(url_list)
 
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+        if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
             response = Http.get(url=vm_list_rest_call,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+                                headers=self.vca.vcloud_session.get_vcloud_headers(),
+                                verify=self.vca.verify,
+                                logger=self.vca.logger)
+
+            #Retry login if session expired & retry sending request
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
+
             if response.status_code == requests.codes.ok:
                 return response.content
 
@@ -2906,8 +3013,7 @@ class vimconnector(vimconn.vimconnector):
                 # application/vnd.vmware.admin.providervdc+xml
                 # we need find a template from witch we instantiate VDC
                 if child.tag.split("}")[1] == 'VdcTemplate':
-                    if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml' and child.attrib.get(
-                            'name') == 'openmano':
+                    if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
                         vdc_template_ref = child.attrib.get('href')
         except:
             self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
@@ -2930,6 +3036,11 @@ class vimconnector(vimconn.vimconnector):
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
             response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
                                  logger=vca.logger)
+
+            vdc_task = taskType.parseString(response.content, True)
+            if type(vdc_task) is GenericTask:
+                self.vca.block_until_completed(vdc_task)
+
             # if we all ok we respond with content otherwise by default None
             if response.status_code >= 200 and response.status_code < 300:
                 return response.content
@@ -3044,7 +3155,7 @@ class vimconnector(vimconn.vimconnector):
         if need_admin_access:
             vca = self.connect_as_admin()
         else:
-            vca = self.connect()
+            vca = self.vca
 
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed")
@@ -3060,6 +3171,10 @@ class vimconnector(vimconn.vimconnector):
                                 verify=vca.verify,
                                 logger=vca.logger)
 
+            if response.status_code == 403:
+                if need_admin_access == False:
+                    response = self.retry_rest('GET', get_vapp_restcall)
+
             if response.status_code != requests.codes.ok:
                 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
                                                                                           response.status_code))
@@ -3166,21 +3281,20 @@ class vimconnector(vimconn.vimconnector):
 
     def acuire_console(self, vm_uuid=None):
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
         if vm_uuid is None:
             return None
 
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+        if not (not self.vca.vcloud_session or not self.vca.vcloud_session.organization):
             vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
             console_dict = vm_dict['acquireTicket']
             console_rest_call = console_dict['href']
 
             response = Http.post(url=console_rest_call,
-                                 headers=vca.vcloud_session.get_vcloud_headers(),
-                                 verify=vca.verify,
-                                 logger=vca.logger)
+                                 headers=self.vca.vcloud_session.get_vcloud_headers(),
+                                 verify=self.vca.verify,
+                                 logger=self.vca.logger)
+            if response.status_code == 403:
+                response = self.retry_rest('POST', console_rest_call)
 
             if response.status_code == requests.codes.ok:
                 return response.content
@@ -3237,17 +3351,17 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return network uuid or return None
         """
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
         if disk_href is None or disk_size is None:
             return None
 
-        if vca.vcloud_session and vca.vcloud_session.organization:
+        if self.vca.vcloud_session and self.vca.vcloud_session.organization:
             response = Http.get(url=disk_href,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+                                headers=self.vca.vcloud_session.get_vcloud_headers(),
+                                verify=self.vca.verify,
+                                logger=self.vca.logger)
+
+        if response.status_code == 403:
+            response = self.retry_rest('GET', disk_href)
 
         if response.status_code != requests.codes.ok:
             self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
@@ -3269,13 +3383,17 @@ class vimconnector(vimconn.vimconnector):
                                              xml_declaration=True)
 
             #Send PUT request to modify disk size
-            headers = vca.vcloud_session.get_vcloud_headers()
+            headers = self.vca.vcloud_session.get_vcloud_headers()
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
 
             response = Http.put(url=disk_href,
                                 data=data,
                                 headers=headers,
-                                verify=vca.verify, logger=self.logger)
+                                verify=self.vca.verify, logger=self.logger)
+
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', disk_href, add_headers, data)
 
             if response.status_code != 202:
                 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
@@ -3283,7 +3401,7 @@ class vimconnector(vimconn.vimconnector):
             else:
                 modify_disk_task = taskType.parseString(response.content, True)
                 if type(modify_disk_task) is GenericTask:
-                    status = vca.block_until_completed(modify_disk_task)
+                    status = self.vca.block_until_completed(modify_disk_task)
                     return status
 
             return None
@@ -3668,9 +3786,6 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 None
         """
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
 
         try:
             ip_address = None
@@ -3691,12 +3806,16 @@ class vimconnector(vimconn.vimconnector):
                 for vms in vapp._get_vms():
                     vm_id = (vms.id).split(':')[-1]
 
-                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
 
                     response = Http.get(url=url_rest_call,
-                                        headers=vca.vcloud_session.get_vcloud_headers(),
-                                        verify=vca.verify,
-                                        logger=vca.logger)
+                                        headers=self.vca.vcloud_session.get_vcloud_headers(),
+                                        verify=self.vca.verify,
+                                        logger=self.vca.logger)
+
+                    if response.status_code == 403:
+                        response = self.retry_rest('GET', url_rest_call)
+
                     if response.status_code != 200:
                         self.logger.error("REST call {} failed reason : {}"\
                                              "status code : {}".format(url_rest_call,
@@ -3734,11 +3853,16 @@ class vimconnector(vimconn.vimconnector):
 
                         data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
 
-                    headers = vca.vcloud_session.get_vcloud_headers()
+                    headers = self.vca.vcloud_session.get_vcloud_headers()
                     headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
                     response = Http.put(url=url_rest_call, headers=headers, data=data,
-                                                                   verify=vca.verify,
-                                                                   logger=vca.logger)
+                                                                   verify=self.vca.verify,
+                                                                   logger=self.vca.logger)
+
+                    if response.status_code == 403:
+                        add_headers = {'Content-Type': headers['Content-Type']}
+                        response = self.retry_rest('PUT', url_rest_call, add_headers, data)
+
                     if response.status_code != 202:
                         self.logger.error("REST call {} failed reason : {}"\
                                             "status code : {} ".format(url_rest_call,
@@ -3749,7 +3873,7 @@ class vimconnector(vimconn.vimconnector):
                     else:
                         nic_task = taskType.parseString(response.content, True)
                         if isinstance(nic_task, GenericTask):
-                            vca.block_until_completed(nic_task)
+                            self.vca.block_until_completed(nic_task)
                             self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
                                                                "default NIC type".format(vm_id))
                         else:
@@ -3759,12 +3883,16 @@ class vimconnector(vimconn.vimconnector):
                 for vms in vapp._get_vms():
                     vm_id = (vms.id).split(':')[-1]
 
-                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.vca.host, vm_id)
 
                     response = Http.get(url=url_rest_call,
-                                        headers=vca.vcloud_session.get_vcloud_headers(),
-                                        verify=vca.verify,
-                                        logger=vca.logger)
+                                        headers=self.vca.vcloud_session.get_vcloud_headers(),
+                                        verify=self.vca.verify,
+                                        logger=self.vca.logger)
+
+                    if response.status_code == 403:
+                        response = self.retry_rest('GET', url_rest_call)
+
                     if response.status_code != 200:
                         self.logger.error("REST call {} failed reason : {}"\
                                             "status code : {}".format(url_rest_call,
@@ -3803,11 +3931,15 @@ class vimconnector(vimconn.vimconnector):
 
                         data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
 
-                    headers = vca.vcloud_session.get_vcloud_headers()
+                    headers = self.vca.vcloud_session.get_vcloud_headers()
                     headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
                     response = Http.put(url=url_rest_call, headers=headers, data=data,
-                                                                   verify=vca.verify,
-                                                                   logger=vca.logger)
+                                                                   verify=self.vca.verify,
+                                                                   logger=self.vca.logger)
+
+                    if response.status_code == 403:
+                        add_headers = {'Content-Type': headers['Content-Type']}
+                        response = self.retry_rest('PUT', url_rest_call, add_headers, data)
 
                     if response.status_code != 202:
                         self.logger.error("REST call {} failed reason : {}"\
@@ -3819,7 +3951,7 @@ class vimconnector(vimconn.vimconnector):
                     else:
                         nic_task = taskType.parseString(response.content, True)
                         if isinstance(nic_task, GenericTask):
-                            vca.block_until_completed(nic_task)
+                            self.vca.block_until_completed(nic_task)
                             self.logger.info("add_network_adapter_to_vms(): VM {} "\
                                                "conneced to NIC type {}".format(vm_id, nic_type))
                         else:
@@ -3892,7 +4024,8 @@ class vimconnector(vimconn.vimconnector):
                 'users': (optional) list of users to be inserted, each item is a dict with:
                     'name': (mandatory) user name,
                     'key-pairs': (optional) list of strings with the public key to be inserted to the user
-                'user-data': (optional) string is a text script to be passed directly to cloud-init
+                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
                 'config-files': (optional). List of files to be transferred. Each item is a dict with:
                     'dest': (mandatory) string with the destination absolute path
                     'encoding': (optional, by default text). Can be one of:
@@ -3902,9 +4035,6 @@ class vimconnector(vimconn.vimconnector):
                     'owner': (optional) file owner, string with the format 'owner:group'
                 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
         """
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
 
         try:
             if isinstance(cloud_config, dict):
@@ -3966,7 +4096,7 @@ class vimconnector(vimconn.vimconnector):
                                 vm_name = vm.name
                                 task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
                                 if isinstance(task, GenericTask):
-                                    vca.block_until_completed(task)
+                                    self.vca.block_until_completed(task)
                                     self.logger.info("cloud_init : customized guest os task "\
                                                         "completed for VM {}".format(vm_name))
                                 else:
@@ -3979,7 +4109,7 @@ class vimconnector(vimconn.vimconnector):
                                                                "ssh-key".format(exp))
 
 
-    def add_new_disk(self, vca, vapp_uuid, disk_size):
+    def add_new_disk(self, vapp_uuid, disk_size):
         """
             Method to create an empty vm disk
 
@@ -4001,7 +4131,7 @@ class vimconnector(vimconn.vimconnector):
             if vm_details and "vm_virtual_hardware" in vm_details:
                 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
                 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
-                status = self.add_new_disk_rest(vca, disk_href, disk_size_mb)
+                status = self.add_new_disk_rest(disk_href, disk_size_mb)
 
         except Exception as exp:
             msg = "Error occurred while creating new disk {}.".format(exp)
@@ -4015,7 +4145,7 @@ class vimconnector(vimconn.vimconnector):
             self.rollback_newvm(vapp_uuid, msg)
 
 
-    def add_new_disk_rest(self, vca, disk_href, disk_size_mb):
+    def add_new_disk_rest(self, disk_href, disk_size_mb):
         """
         Retrives vApp Disks section & add new empty disk
 
@@ -4026,11 +4156,14 @@ class vimconnector(vimconn.vimconnector):
             Returns: Status of add new disk task
         """
         status = False
-        if vca.vcloud_session and vca.vcloud_session.organization:
+        if self.vca.vcloud_session and self.vca.vcloud_session.organization:
             response = Http.get(url=disk_href,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+                                headers=self.vca.vcloud_session.get_vcloud_headers(),
+                                verify=self.vca.verify,
+                                logger=self.vca.logger)
+
+        if response.status_code == 403:
+            response = self.retry_rest('GET', disk_href)
 
         if response.status_code != requests.codes.ok:
             self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
@@ -4069,13 +4202,17 @@ class vimconnector(vimconn.vimconnector):
             new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
 
             # Send PUT request to modify virtual hardware section with new disk
-            headers = vca.vcloud_session.get_vcloud_headers()
+            headers = self.vca.vcloud_session.get_vcloud_headers()
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
 
             response = Http.put(url=disk_href,
                                 data=new_data,
                                 headers=headers,
-                                verify=vca.verify, logger=self.logger)
+                                verify=self.vca.verify, logger=self.logger)
+
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', disk_href, add_headers, new_data)
 
             if response.status_code != 202:
                 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
@@ -4083,7 +4220,7 @@ class vimconnector(vimconn.vimconnector):
             else:
                 add_disk_task = taskType.parseString(response.content, True)
                 if type(add_disk_task) is GenericTask:
-                    status = vca.block_until_completed(add_disk_task)
+                    status = self.vca.block_until_completed(add_disk_task)
                     if not status:
                         self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
 
@@ -4909,3 +5046,101 @@ class vimconnector(vimconn.vimconnector):
                                                "getting media details")
             raise vimconn.vimconnException(message=exp)
 
+
+    def retry_rest(self, method, url, add_headers=None, data=None):
+        """ Method to get Token & retry respective REST request
+            Args:
+                api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
+                url - request url to be used
+                add_headers - Additional headers (optional)
+                data - Request payload data to be passed in request
+            Returns:
+                response - Response of request
+        """
+        response = None
+
+        #Get token
+        self.get_token()
+
+        headers=self.vca.vcloud_session.get_vcloud_headers()
+
+        if add_headers:
+            headers.update(add_headers)
+
+        if method == 'GET':
+            response = Http.get(url=url,
+                                headers=headers,
+                                verify=self.vca.verify,
+                                logger=self.vca.logger)
+        elif method == 'PUT':
+            response = Http.put(url=url,
+                                data=data,
+                                headers=headers,
+                                verify=self.vca.verify,
+                                logger=self.logger)
+        elif method == 'POST':
+            response = Http.post(url=url,
+                                 headers=headers,
+                                 data=data,
+                                 verify=self.vca.verify,
+                                 logger=self.vca.logger)
+        elif method == 'DELETE':
+            response = Http.delete(url=url,
+                                 headers=headers,
+                                 verify=self.vca.verify,
+                                 logger=self.vca.logger)
+        return response
+
+
+    def get_token(self):
+        """ Generate a new token if expired
+
+            Returns:
+                The return vca object that letter can be used to connect to vCloud director as admin for VDC
+        """
+        vca = None
+
+        try:
+            self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
+                                                                                      self.user,
+                                                                                      self.org_name))
+            vca = VCA(host=self.url,
+                      username=self.user,
+                      service_type=STANDALONE,
+                      version=VCAVERSION,
+                      verify=False,
+                      log=False)
+
+            result = vca.login(password=self.passwd, org=self.org_name)
+            if result is True:
+                result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
+                if result is True:
+                    self.logger.info(
+                        "Successfully generated token for vcloud direct org: {} as user: {}".format(self.org_name, self.user))
+                    #Update vca
+                    self.vca = vca
+                    return
+
+        except:
+            raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
+                                                     "{} as user: {}".format(self.org_name, self.user))
+
+        if not vca or not result:
+            raise vimconn.vimconnConnectionException("self.connect() is failed while reconnecting")
+
+
+    def get_vdc_details(self):
+        """ Get VDC details using pyVcloud Lib
+
+            Returns vdc object
+        """
+        vdc = self.vca.get_vdc(self.tenant_name)
+
+        #Retry once, if failed by refreshing token
+        if vdc is None:
+            self.get_token()
+            vdc = self.vca.get_vdc(self.tenant_name)
+
+        return vdc
+
+
index 624dce3..ac28f0d 100755 (executable)
@@ -283,7 +283,8 @@ echo -e "\n"\
     "#####        INSTALLING OVIM LIBRARY                        #####\n"\
     "#################################################################"
 su $SUDO_USER -c "git -C ${BASEFOLDER} clone ${GIT_OVIM_URL} openvim"
-[[ -z $DEVELOP ]] && su $SUDO_USER -c "git -C ${BASEFOLDER}/openvim checkout v2.0"
+LATEST_STABLE_TAG=`git -C "${BASEFOLDER}/openvim" tag -l v[0-9].* | tail -n1`
+[[ -z $DEVELOP ]] && su $SUDO_USER -c "git -C ${BASEFOLDER}/openvim checkout tags/${LATEST_STABLE_TAG}"
 
 # Install debian dependencies before setup.py
 [ "$_DISTRO" == "Ubuntu" ] && install_packages "libmysqlclient-dev"
diff --git a/test/RO_tests/afiinity_vnf/scenario_simple_2_vnf_afinnity.yaml b/test/RO_tests/afiinity_vnf/scenario_simple_2_vnf_afinnity.yaml
new file mode 100644 (file)
index 0000000..deae332
--- /dev/null
@@ -0,0 +1,38 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          simple_ha
+  description:   Simple network scenario consisting of two VNF connected to an external network
+  vnfs:
+    linux1:                   # vnf/net name in the scenario
+      vnf_name:  linux_test_2vms # VNF name as introduced in OPENMANO DB
+  networks:
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces:
+      - linux1:  control0       # Node and its interface
+      - linux1:  control1       # Node and its interface
+
+
+
+
diff --git a/test/RO_tests/afiinity_vnf/vnfd_linux_2_vnfc_affinity.yaml b/test/RO_tests/afiinity_vnf/vnfd_linux_2_vnfc_affinity.yaml
new file mode 100644 (file)
index 0000000..9ec7f60
--- /dev/null
@@ -0,0 +1,61 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+
+vnf:
+    name:        linux_test_2vms
+    description: Single-VM VNF with a traditional cloud VM based on generic Linux OS
+    external-connections:
+    -   name:              control0
+        type:              mgmt              # "mgmt" (autoconnect to management net), "bridge", "data"
+        VNFC:              linux-VM-HA-A  # Virtual Machine this interface belongs to
+        local_iface_name:  eth0             # interface name inside this Virtual Machine (must be defined in the VNFC section)
+        description:       Management interface 0
+    -   name:              control1
+        type:              mgmt              # "mgmt" (autoconnect to management net), "bridge", "data"
+        VNFC:              linux-VM-HA-B  # Virtual Machine this interface belongs to
+        local_iface_name:  eth0             # interface name inside this Virtual Machine (must be defined in the VNFC section)
+        description:       Management interface 1
+    VNFC:
+    -   name:        linux-VM-HA-A
+        description: Generic Linux Virtual Machine
+        availability_zone: A  # availanility zone A
+        #Copy the image to a compute path and edit this path
+        image name:  TestVM
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1024         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 10
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:11.0"
+        numas: []
+    -   name:        linux-VM-HA-B
+        description: Generic Linux Virtual Machine
+        availability_zone: B # availanility zone B
+        #Copy the image to a compute path and edit this path
+        image name:  TestVM
+        vcpus: 1          # Only for traditional cloud VMs. Number of virtual CPUs (oversubscription is allowed).
+        ram: 1024         # Only for traditional cloud VMs. Memory in MBytes (not from hugepages, oversubscription is allowed)
+        disk: 10
+        bridge-ifaces:
+        -   name:      eth0
+            vpci:      "0000:00:12.0"
+        numas: []
diff --git a/test/RO_tests/simple_count3/scenario_linux_count3.yaml b/test/RO_tests/simple_count3/scenario_linux_count3.yaml
new file mode 100644 (file)
index 0000000..2362c02
--- /dev/null
@@ -0,0 +1,39 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version:  2
+scenario:
+  name:          simple_count3
+  description:   Simple network scenario consisting of a multi VNFC VNF connected to an external network
+  vnfs: 
+    linux1:                   # vnf/net name in the scenario
+      vnf_name:  simple_linux_count3        # VNF name as introduced in OPENMANO DB
+  networks: 
+    mgmt:                   # provide a name for this net or connection
+      external:  true
+      interfaces: 
+      - linux1:  control0       # Node and its interface
+    internal1:                   # provide a name for this net or connection
+      external:  false
+      interfaces: 
+      - linux1:  data-eth1
+
+
diff --git a/test/RO_tests/simple_count3/vnfd_count3.yaml b/test/RO_tests/simple_count3/vnfd_count3.yaml
new file mode 100644 (file)
index 0000000..a4c7070
--- /dev/null
@@ -0,0 +1,68 @@
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+---
+schema_version: "0.2"
+vnf:
+    name:        simple_linux_count3
+    description: "Example of a linux VNF consisting of two VMs with one internal network"
+    # class: parent      # Optional. Used to organize VNFs
+    internal-connections:
+    -   name:        internal-eth2
+        description: internalnet
+        type:        e-lan
+        implementation: overlay
+        ip-profile:
+            ip-version:       IPv4
+            subnet-address:   192.168.1.0/24
+            gateway-address:  192.168.1.1
+            dns-address:      8.8.8.8
+            dhcp:
+                enabled: true
+                start-address: 192.168.1.100
+                count: 100
+        elements:
+        -   VNFC:             linux_3VMs
+            local_iface_name: eth2
+            ip_address:       192.168.1.2
+    external-connections:
+    -   name:              control0
+        type:              mgmt
+        VNFC:              linux_3VMs
+        local_iface_name:  eth0
+        description:       control interface VM1
+    -   name:              data-eth1
+        type:              bridge
+        VNFC:              linux_3VMs
+        local_iface_name:  eth1
+        description:       data interface input
+    VNFC:
+    -   name:        linux_3VMs
+        count:       3
+        description: "Linux VM1 2 CPUs, 2 GB RAM and 3 bridge interfaces"
+        #Copy the image to a compute path and edit this path
+        image name:  TestVM
+        disk: 10
+        vcpus: 2
+        ram: 2048
+        bridge-ifaces:
+        -   name:      eth0
+        -   name:      eth1
+        -   name:      eth2
index 0d5e624..5b47182 100755 (executable)
@@ -43,6 +43,7 @@ import sys
 import time
 from pyvcloud.vcloudair import VCA
 import uuid
+import json
 
 global test_config   #  used for global variables with the test configuration
 test_config = {}
@@ -279,24 +280,6 @@ class test_VIM_tenant_operations(test_base):
         assert ('deleted' in tenant.get('result', ""))
 
 class test_vimconn_connect(test_base):
-    # test_index = 1
-    # test_text = None
-
-    # @classmethod
-    # def setUpClass(cls):
-    #     logger.info("{}. {}".format(test_config["test_number"], cls.__name__))
-
-    # @classmethod
-    # def tearDownClass(cls):
-    #     test_config["test_number"] += 1
-
-    # def tearDown(self):
-    #     exec_info = sys.exc_info()
-    #     if exec_info == (None, None, None):
-    #         logger.info(self.__class__.test_text+" -> TEST OK")
-    #     else:
-    #         logger.warning(self.__class__.test_text+" -> TEST NOK")
-    #         logger.critical("Traceback error",exc_info=True)
 
     def test_000_connect(self):
         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
@@ -311,25 +294,7 @@ class test_vimconn_connect(test_base):
 
 
 class test_vimconn_new_network(test_base):
-    # test_index = 1
     network_name = None
-    # test_text = None
-
-    # @classmethod
-    # def setUpClass(cls):
-    #     logger.info("{}. {}".format(test_config["test_number"], cls.__name__))
-
-    # @classmethod
-    # def tearDownClass(cls):
-    #     test_config["test_number"] += 1
-
-    # def tearDown(self):
-    #     exec_info = sys.exc_info()
-    #     if exec_info == (None, None, None):
-    #         logger.info(self.__class__.test_text+" -> TEST OK")
-    #     else:
-    #         logger.warning(self.__class__.test_text+" -> TEST NOK")
-    #         logger.critical("Traceback error",exc_info=True)
 
     def test_000_new_network(self):
         self.__class__.network_name = _get_random_string(20)
@@ -344,7 +309,7 @@ class test_vimconn_new_network(test_base):
         self.__class__.network_id = network
         logger.debug("{}".format(network))
 
-        network_list = test_config["vim_conn"].get_vcd_network_list()
+        network_list = test_config["vim_conn"].get_network_list()
         for net in network_list:
             if self.__class__.network_name in net.get('name'):
                 self.assertIn(self.__class__.network_name, net.get('name'))
@@ -372,7 +337,7 @@ class test_vimconn_new_network(test_base):
             delete_net_ids.append(network_id)
             logger.debug("{}".format(network_id))
 
-            network_list = test_config["vim_conn"].get_vcd_network_list()
+            network_list = test_config["vim_conn"].get_network_list()
             for net in network_list:
                 if self.__class__.network_name in net.get('name'):
                     self.assertIn(self.__class__.network_name, net.get('name'))
@@ -424,7 +389,7 @@ class test_vimconn_new_network(test_base):
         self.__class__.network_id = network
         logger.debug("{}".format(network))
 
-        network_list = test_config["vim_conn"].get_vcd_network_list()
+        network_list = test_config["vim_conn"].get_network_list()
         for net in network_list:
             if self.__class__.network_name in net.get('name'):
                 self.assertIn(self.__class__.network_name, net.get('name'))
@@ -449,7 +414,7 @@ class test_vimconn_new_network(test_base):
         self.__class__.network_id = network
         logger.debug("{}".format(network))
 
-        network_list = test_config["vim_conn"].get_vcd_network_list()
+        network_list = test_config["vim_conn"].get_network_list()
         for net in network_list:
             if self.__class__.network_name in net.get('name'):
                 self.assertIn(self.__class__.network_name, net.get('name'))
@@ -472,7 +437,7 @@ class test_vimconn_new_network(test_base):
                                                                     net_type='unknowntype')
         self.__class__.network_id = network
         logger.debug("{}".format(network))
-        network_list = test_config["vim_conn"].get_vcd_network_list()
+        network_list = test_config["vim_conn"].get_network_list()
         for net in network_list:
             if self.__class__.network_name in net.get('name'):
                 self.assertIn(self.__class__.network_name, net.get('name'))
@@ -484,18 +449,42 @@ class test_vimconn_new_network(test_base):
         else:
             logger.info("Failed to delete network id {}".format(self.__class__.network_id))
 
-class test_vimconn_get_network_list(test_base):
-    # test_index = 1
-    network_name = None
+    def test_050_refresh_nets_status(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        # creating new network
+        network_name = _get_random_string(20)
+        net_type = 'bridge'
+        network_id = test_config["vim_conn"].new_network(net_name=network_name,
+                                                          net_type=net_type)
+        # refresh net status
+        net_dict = test_config["vim_conn"].refresh_nets_status([network_id])
+        for attr in net_dict[network_id]:
+            if attr == 'status':
+                self.assertEqual(net_dict[network_id][attr], 'ACTIVE')
 
-    # test_text = None
-    # @classmethod
-    # def setUpClass(cls):
-    #     logger.info("{}. {}".format(test_config["test_number"], cls.__name__))
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(network_id))
 
-    # @classmethod
-    # def tearDownClass(cls):
-    #     test_config["test_number"] += 1
+    def test_060_refresh_nets_status_negative(self):
+        unknown_net_id = str(uuid.uuid4())
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # refresh net status
+        net_dict = test_config["vim_conn"].refresh_nets_status([unknown_net_id])
+        self.assertEqual(net_dict, {})
+
+class test_vimconn_get_network_list(test_base):
+    network_name = None
 
     def setUp(self):
         # creating new network
@@ -508,12 +497,6 @@ class test_vimconn_get_network_list(test_base):
 
     def tearDown(self):
         test_base.tearDown(self)
-        # exec_info = sys.exc_info()
-        # if exec_info == (None, None, None):
-        #     logger.info(self.__class__.test_text+" -> TEST OK")
-        # else:
-        #     logger.warning(self.__class__.test_text+" -> TEST NOK")
-        #     logger.critical("Traceback error",exc_info=True)
 
         # Deleting created network
         result = test_config["vim_conn"].delete_network(self.__class__.network_id)
@@ -631,17 +614,7 @@ class test_vimconn_get_network_list(test_base):
         self.assertEqual(network_list, [])
 
 class test_vimconn_get_network(test_base):
-    # test_index = 1
     network_name = None
-    # test_text = None
-
-    # @classmethod
-    # def setUpClass(cls):
-    #     logger.info("{}. {}".format(test_config["test_number"], cls.__name__))
-
-    # @classmethod
-    # def tearDownClass(cls):
-    #     test_config["test_number"] += 1
 
     def setUp(self):
         # creating new network
@@ -654,12 +627,6 @@ class test_vimconn_get_network(test_base):
 
     def tearDown(self):
         test_base.tearDown(self)
-        # exec_info = sys.exc_info()
-        # if exec_info == (None, None, None):
-        #     logger.info(self.__class__.test_text+" -> TEST OK")
-        # else:
-        #     logger.warning(self.__class__.test_text+" -> TEST NOK")
-        #     logger.critical("Traceback error",exc_info=True)
 
         # Deleting created network
         result = test_config["vim_conn"].delete_network(self.__class__.network_id)
@@ -691,25 +658,7 @@ class test_vimconn_get_network(test_base):
         self.assertEqual(network_info, {})
 
 class test_vimconn_delete_network(test_base):
-    # test_index = 1
     network_name = None
-    # test_text = None
-
-    # @classmethod
-    # def setUpClass(cls):
-    #     logger.info("{}. {}".format(test_config["test_number"], cls.__name__))
-
-    # @classmethod
-    # def tearDownClass(cls):
-    #     test_config["test_number"] += 1
-
-    # def tearDown(self):
-    #     exec_info = sys.exc_info()
-    #     if exec_info == (None, None, None):
-    #         logger.info(self.__class__.test_text+" -> TEST OK")
-    #     else:
-    #         logger.warning(self.__class__.test_text+" -> TEST NOK")
-    #         logger.critical("Traceback error",exc_info=True)
 
     def test_000_delete_network(self):
         # Creating network
@@ -749,24 +698,6 @@ class test_vimconn_delete_network(test_base):
         self.assertEqual((context.exception).http_code, 400)
 
 class test_vimconn_get_flavor(test_base):
-    # test_index = 1
-    # test_text = None
-
-    # @classmethod
-    # def setUpClass(cls):
-    #     logger.info("{}. {}".format(test_config["test_number"], cls.__name__))
-
-    # @classmethod
-    # def tearDownClass(cls):
-    #     test_config["test_number"] += 1
-
-    # def tearDown(self):
-    #     exec_info = sys.exc_info()
-    #     if exec_info == (None, None, None):
-    #         logger.info(self.__class__.test_text+" -> TEST OK")
-    #     else:
-    #         logger.warning(self.__class__.test_text+" -> TEST NOK")
-    #         logger.critical("Traceback error",exc_info=True)
 
     def test_000_get_flavor(self):
         test_directory_content = os.listdir(test_config["test_directory"])
@@ -825,6 +756,622 @@ class test_vimconn_get_flavor(test_base):
 
         self.assertEqual((context.exception).http_code, 404)
 
+class test_vimconn_new_flavor(test_base):
+    flavor_id = None
+
+    def test_000_new_flavor(self):
+        flavor_data = {'ram': 1024, 'vpcus': 1, 'disk': 10}
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # create new flavor
+        self.__class__.flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+        self.assertEqual(type(self.__class__.flavor_id),str)
+        self.assertIsInstance(uuid.UUID(self.__class__.flavor_id),uuid.UUID)
+
+    def test_010_delete_flavor(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # delete flavor
+        result = test_config["vim_conn"].delete_flavor(self.__class__.flavor_id)
+        if result:
+            logger.info("Flavor id {} sucessfully deleted".format(result))
+        else:
+            logger.error("Failed to delete flavor id {}".format(result))
+            raise Exception ("Failed to delete created flavor")
+
+    def test_020_new_flavor_negative(self):
+        Invalid_flavor_data = {'ram': '1024', 'vcpus': 2.0, 'disk': 2.0}
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].new_flavor(Invalid_flavor_data)
+
+        self.assertEqual((context.exception).http_code, 400)
+
+    def test_030_delete_flavor_negative(self):
+        Non_exist_flavor_id = str(uuid.uuid4())
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].delete_flavor(Non_exist_flavor_id)
+
+        self.assertEqual((context.exception).http_code, 404)
+
+class test_vimconn_new_image(test_base):
+
+    def test_000_new_image(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        image_path = test_config['image_path']
+        if image_path:
+            image_id = test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : image_path })
+            time.sleep(20)
+            self.assertEqual(type(image_id),str)
+            self.assertIsInstance(uuid.UUID(image_id),uuid.UUID)
+        else:
+            self.skipTest("Skipping test as image file not present at RO container")
+
+    def test_010_new_image_negative(self):
+        Non_exist_image_path = '/temp1/cirros.ovf'
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path })
+
+        self.assertEqual((context.exception).http_code, 400)
+
+    def test_020_delete_image(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        image_id = test_config["vim_conn"].delete_image(self.__class__.image_id)
+        self.assertEqual(type(image_id),str)
+
+    def test_030_delete_image_negative(self):
+        Non_exist_image_id = str(uuid.uuid4())
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].delete_image(Non_exist_image_id)
+
+        self.assertEqual((context.exception).http_code, 404)
+
+class test_vimconn_get_image_id_from_path(test_base):
+
+    def test_000_get_image_id_from_path(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        image_path = test_config['image_path']
+        if image_path:
+            image_id = test_config["vim_conn"].get_image_id_from_path( image_path )
+            self.assertEqual(type(image_id),str)
+        else:
+            self.skipTest("Skipping test as image file not present at RO container")
+
+    def test_010_get_image_id_from_path_negative(self):
+        Non_exist_image_path = '/temp1/cirros.ovf'
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path })
+
+        self.assertEqual((context.exception).http_code, 400)
+
+class test_vimconn_get_image_list(test_base):
+    image_name = None
+    image_id = None
+
+    def test_000_get_image_list(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        image_list = test_config["vim_conn"].get_image_list()
+
+        for item in image_list:
+            if 'name' in item:
+                self.__class__.image_name = item['name']
+                self.__class__.image_id = item['id']
+                self.assertEqual(type(self.__class__.image_name),str)
+                self.assertEqual(type(self.__class__.image_id),str)
+
+    def test_010_get_image_list_by_name(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        image_list = test_config["vim_conn"].get_image_list({'name': self.__class__.image_name})
+
+        for item in image_list:
+            self.assertEqual(type(item['id']), str)
+            self.assertEqual(item['id'], self.__class__.image_id)
+            self.assertEqual(type(item['name']), str)
+            self.assertEqual(item['name'], self.__class__.image_name)
+
+    def test_020_get_image_list_by_id(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        filter_image_list = test_config["vim_conn"].get_image_list({'id': self.__class__.image_id})
+
+        for item1 in filter_image_list:
+            self.assertEqual(type(item1.get('id')), str)
+            self.assertEqual(item1.get('id'), self.__class__.image_id)
+            self.assertEqual(type(item1.get('name')), str)
+            self.assertEqual(item1.get('name'), self.__class__.image_name)
+
+    def test_030_get_image_list_negative(self):
+        Non_exist_image_id = uuid.uuid4()
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        image_list = test_config["vim_conn"].get_image_list({'name': 'Unknown_name', 'id': Non_exist_image_id})
+
+        self.assertIsNotNone(image_list, None)
+        self.assertEqual(image_list, [])
+
+class test_vimconn_new_vminstance(test_base):
+    network_name = None
+    net_type = None
+    network_id = None
+    image_id = None
+    instance_id = None
+
+    def setUp(self):
+        # create network
+        self.__class__.network_name = _get_random_string(20)
+        self.__class__.net_type = 'bridge'
+
+        self.__class__.network_id = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                                            net_type=self.__class__.net_type)
+
+    def tearDown(self):
+        test_base.tearDown(self)
+        # Deleting created network
+        result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+        if result:
+            logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+        else:
+            logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+
+    def test_000_new_vminstance(self):
+        vpci = "0000:00:11.0"
+        name = "eth0"
+
+        flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        # find image name and image id
+        if test_config['image_name']:
+            image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
+            if len(image_list) == 0:
+                raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
+            else:
+                self.__class__.image_id = image_list[0]['id']
+        else:
+            image_list = test_config['vim_conn'].get_image_list()
+            if len(image_list) == 0:
+                raise Exception("Not found any image at VIM")
+            else:
+                self.__class__.image_id = image_list[0]['id']
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        self.__class__.instance_id = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
+
+        self.assertEqual(type(self.__class__.instance_id),str)
+
+    def test_010_new_vminstance_by_model(self):
+        flavor_data = {'ram': 1024, 'vcpus': 2, 'disk': 10}
+        model_name = 'e1000'
+        name = 'eth0'
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'model': model_name, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        instance_id = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id,
+                                                                                           flavor_id=flavor_id,
+                                                                                             net_list=net_list)
+        self.assertEqual(type(instance_id),str)
+        # Deleting created vm instance
+        logger.info("Deleting created vm intance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_020_new_vminstance_by_net_use(self):
+        flavor_data = {'ram': 1024, 'vcpus': 2, 'disk': 10}
+        net_use = 'data'
+        name = 'eth0'
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': net_use, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        instance_id = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id,
+                                                                                           flavor_id=flavor_id,
+                                                                                             net_list=net_list)
+        self.assertEqual(type(instance_id),str)
+        # Deleting created vm instance
+        logger.info("Deleting created vm intance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_030_new_vminstance_by_net_type(self):
+        flavor_data = {'ram': 1024, 'vcpus': 2, 'disk': 10}
+        _type = 'VF'
+        name = 'eth0'
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': _type, 'net_id': self.__class__.network_id}]
+
+        instance_id = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id,
+                                                                                           flavor_id=flavor_id,
+                                                                                             net_list=net_list)
+        self.assertEqual(type(instance_id),str)
+        # Deleting created vm instance
+        logger.info("Deleting created vm intance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_040_new_vminstance_by_cloud_config(self):
+        flavor_data = {'ram': 1024, 'vcpus': 2, 'disk': 10}
+        name = 'eth0'
+        user_name = 'test_user'
+
+        key_pairs = ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com']
+
+        users_data = [{'key-pairs': ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com'], 'name': user_name}]
+
+        cloud_data = {'config-files': [{'content': 'auto enp0s3\niface enp0s3 inet dhcp\n', 'dest': '/etc/network/interfaces.d/enp0s3.cfg', 'owner': 'root:root', 'permissions': '0644'}, {'content': '#! /bin/bash\nls -al >> /var/log/osm.log\n', 'dest': '/etc/rc.local', 'permissions': '0755'}, {'content': 'file content', 'dest': '/etc/test_delete'}], 'boot-data-drive': True, 'key-pairs': key_pairs, 'users': users_data }
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        instance_id = test_config["vim_conn"].new_vminstance(name='Cloud_vm', image_id=self.__class__.image_id,
+                                                                                           flavor_id=flavor_id,
+                                                                                             net_list=net_list,
+                                                                                       cloud_config=cloud_data)
+        self.assertEqual(type(instance_id),str)
+        # Deleting created vm instance
+        logger.info("Deleting created vm intance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_050_new_vminstance_by_disk_list(self):
+        flavor_data = {'ram': 1024, 'vcpus': 2, 'disk': 10}
+        name = 'eth0'
+
+        device_data = [{'image_id': self.__class__.image_id, 'size': '5'}]
+
+        # create new flavor
+        flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        instance_id = test_config["vim_conn"].new_vminstance(name='VM_test1', image_id=self.__class__.image_id,
+                                                                                           flavor_id=flavor_id,
+                                                                                             net_list=net_list,
+                                                                                         disk_list=device_data)
+        self.assertEqual(type(instance_id),str)
+        # Deleting created vm instance
+        logger.info("Deleting created vm intance")
+        test_config["vim_conn"].delete_vminstance(instance_id)
+        time.sleep(10)
+
+    def test_060_new_vminstance_negative(self):
+        unknown_flavor_id = str(uuid.uuid4())
+        unknown_image_id = str(uuid.uuid4())
+        name = 'eth2'
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=unknown_image_id,
+                                                                  flavor_id=unknown_flavor_id,
+                                                                            net_list=net_list)
+        self.assertEqual((context.exception).http_code, 404)
+
+    def test_070_get_vminstance(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Get instance by its id
+        vm_info = test_config["vim_conn"].get_vminstance(self.__class__.instance_id)
+
+        if test_config['vimtype'] == 'vmware':
+            for attr in vm_info:
+                if attr == 'status':
+                    self.assertEqual(vm_info[attr], 'ACTIVE')
+                if attr == 'hostId':
+                    self.assertEqual(type(vm_info[attr]), str)
+                if attr == 'interfaces':
+                    self.assertEqual(type(vm_info[attr]), list)
+                    self.assertEqual(vm_info[attr][0]['IsConnected'], 'true')
+                if attr == 'IsEnabled':
+                    self.assertEqual(vm_info[attr], 'true')
+
+    def test_080_get_vminstance_negative(self):
+        unknown_instance_id = str(uuid.uuid4())
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].get_vminstance(unknown_instance_id)
+
+        self.assertEqual((context.exception).http_code, 404)
+
+    def test_090_refresh_vms_status(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+        vm_list = []
+        vm_list.append(self.__class__.instance_id)
+
+        # refresh vm status
+        vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
+        for attr in vm_info[self.__class__.instance_id]:
+            if attr == 'status':
+                self.assertEqual(vm_info[self.__class__.instance_id][attr], 'ACTIVE')
+            if attr == 'interfaces':
+                self.assertEqual(type(vm_info[self.__class__.instance_id][attr]), list)
+
+    def test_100_refresh_vms_status_negative(self):
+        unknown_id = str(uuid.uuid4())
+
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        vm_dict = test_config["vim_conn"].refresh_vms_status([unknown_id])
+        self.assertEqual(vm_dict, {})
+
+    def test_110_action_vminstance(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        action_list = ['shutdown','start','shutoff','rebuild','pause','resume']
+        # various action on vminstace
+        for action in action_list:
+            instance_id = test_config["vim_conn"].action_vminstance(self.__class__.instance_id,
+                                                                               { action: None})
+            self.assertEqual(instance_id, self.__class__.instance_id)
+
+    def test_120_action_vminstance_negative(self):
+        non_exist_id = str(uuid.uuid4())
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        action = 'start'
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].action_vminstance(non_exist_id, { action: None})
+
+        self.assertEqual((context.exception).http_code, 400)
+
+    def test_130_delete_vminstance(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Deleting created vm instance
+        logger.info("Deleting created vm instance")
+        test_config["vim_conn"].delete_vminstance(self.__class__.instance_id)
+        time.sleep(10)
+
+class test_vimconn_get_tenant_list(test_base):
+    tenant_id = None
+
+    def test_000_get_tenant_list(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Getting tenant list
+        tenant_list = test_config["vim_conn"].get_tenant_list()
+
+        for item in tenant_list:
+            if test_config['tenant'] == item['name']:
+                self.__class__.tenant_id = item['id']
+                self.assertEqual(type(item['name']), str)
+                self.assertEqual(type(item['id']), str)
+
+    def test_010_get_tenant_list_by_id(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Getting filter tenant list by its id
+        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'id': self.__class__.tenant_id})
+
+        for item in filter_tenant_list:
+            self.assertEqual(type(item['id']), str)
+            self.assertEqual(item['id'], self.__class__.tenant_id)
+
+    def test_020_get_tenant_list_by_name(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Getting filter tenant list by its name
+        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': test_config['tenant']})
+
+        for item in filter_tenant_list:
+            self.assertEqual(type(item['name']), str)
+            self.assertEqual(item['name'], test_config['tenant'])
+
+    def test_030_get_tenant_list_by_name_and_id(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        # Getting filter tenant list by its name and id
+        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': test_config['tenant'],
+                                                                    'id': self.__class__.tenant_id})
+
+        for item in filter_tenant_list:
+            self.assertEqual(type(item['name']), str)
+            self.assertEqual(type(item['id']), str)
+            self.assertEqual(item['name'], test_config['tenant'])
+            self.assertEqual(item['id'], self.__class__.tenant_id)
+
+    def test_040_get_tenant_list_negative(self):
+        non_exist_tenant_name = "Tenant_123"
+        non_exist_tenant_id = "kjhgrt456-45345kjhdfgnbdk-34dsfjdfg"
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': non_exist_tenant_name,
+                                                                         'id': non_exist_tenant_id})
+
+        self.assertEqual(filter_tenant_list, [])
+
+class test_vimconn_new_tenant(test_base):
+    tenant_id = None
+
+    def test_000_new_tenant(self):
+        tenant_name = _get_random_string(20)
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        self.__class__.tenant_id = test_config["vim_conn"].new_tenant(tenant_name)
+        time.sleep(15)
+
+        self.assertEqual(type(self.__class__.tenant_id), str)
+
+    def test_010_new_tenant_negative(self):
+        Invalid_tenant_name = 10121
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].new_tenant(Invalid_tenant_name)
+
+        self.assertEqual((context.exception).http_code, 400)
+
+    def test_020_delete_tenant(self):
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        tenant_id = test_config["vim_conn"].delete_tenant(self.__class__.tenant_id)
+        self.assertEqual(type(tenant_id), str)
+
+    def test_030_delete_tenant_negative(self):
+        Non_exist_tenant_name = 'Test_30_tenant'
+        self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                            self.__class__.test_index,
+                                                inspect.currentframe().f_code.co_name)
+        self.__class__.test_index += 1
+
+        with self.assertRaises(Exception) as context:
+            test_config["vim_conn"].delete_tenant(Non_exist_tenant_name)
+
+        self.assertEqual((context.exception).http_code, 404)
 
 '''
 IMPORTANT NOTE
@@ -978,6 +1525,8 @@ def test_vimconnector(args):
         org_user = config_params.get('user')
         org_passwd = config_params.get('passwd')
         vim_url = args.endpoint_url
+        test_config['image_path'] = args.image_path
+        test_config['image_name'] = args.image_name
 
         # vmware connector obj
         test_config['vim_conn'] = vim.vimconnector(name=org_name, tenant_name=tenant_name, user=org_user,passwd=org_passwd, url=vim_url, config=config_params)
@@ -1233,6 +1782,8 @@ if __name__=="__main__":
                                     help='Set the vimconnector specific config parameters in dictionary format')
     mandatory_arguments.add_argument('-u', '--url', dest='endpoint_url',required=True, help="Set the vim connector url or Host IP")
     # Optional arguments
+    vimconn_parser.add_argument('-i', '--image-path', dest='image_path', help="Provide image path present at RO container")
+    vimconn_parser.add_argument('-n', '--image-name', dest='image_name', help="Provide image name for test")
     # TODO add optional arguments for vimconn tests
     # vimconn_parser.add_argument("-i", '--image-name', dest='image_name', help='<HELP>'))
 
diff --git a/test/test_openmanocli.sh b/test/test_openmanocli.sh
new file mode 100755 (executable)
index 0000000..9bbeea5
--- /dev/null
@@ -0,0 +1,207 @@
+#!/bin/bash
+
+##
+# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
+# This file is part of openmano
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: nfvlabs@tid.es
+##
+
+#This script can be used as a basic test of openmano.
+#WARNING: It destroy the database content
+
+
+function usage(){
+    echo -e "usage: ${BASH_SOURCE[0]} [OPTIONS] <action>\n  test openmano with fake tenant, datancenters, etc."\
+            "It assumes that you have configured openmano cli with HOST,PORT,TENANT with environment variables"
+            "If not, it will use by default localhost:9080 and creates a new TENANT"
+    echo -e "    -h --help        shows this help"
+}
+
+function is_valid_uuid(){
+    echo "$1" | grep -q -E '^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$' && return 0
+    return 1
+}
+
+DIRNAME=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+DIRmano=$(dirname $DIRNAME)
+DIRscript=${DIRmano}/scripts
+
+#detect paths of executables, preceding the relative paths
+openmano=openmano && [[ -x "${DIRmano}/openmano" ]] && openmano="${DIRmano}/openmano"
+service_openmano=service-openmano && [[ -x "$DIRscript/service-openmano" ]] &&
+    service_openmano="$DIRscript/service-openmano"
+initopenvim="initopenvim"
+openvim="openvim"
+
+function _exit()
+{
+    EXIT_STATUS=$1
+    for item in $ToDelete
+    do
+        command=${item%%:*}
+        uuid=${item#*:}
+        [[ $command == "datacenter-detach" ]] && force="" || force=-f
+        printf "%-50s" "$command $uuid:"
+        ! $openmano $command $uuid $force >> /dev/null && echo FAIL && EXIT_STATUS=1 || echo OK
+     done
+    [[ ${BASH_SOURCE[0]} != $0 ]] && return $1 || exit $EXIT_STATUS
+}
+
+
+# process options
+source ${DIRscript}/get-options.sh "force:-f help:h insert-bashrc init-openvim:initopenvim install-openvim screen" \
+                $* || _exit 1
+
+# help
+[ -n "$option_help" ] && usage && _exit 0
+
+
+ToDelete=""
+DCs="dc-fake1-openstack dc-fake2-openvim" #dc-fake3-vmware
+Ts="fake-tenant1 fake-tenand2"
+SDNs="sdn-fake1-opendaylight sdn-fake2-floodlight sdn-fake3-onos"
+
+for T in $Ts
+do
+    printf "%-50s" "Creating fake tenant '$T':"
+    ! result=`$openmano tenant-create "$T"` && echo FAIL && echo "    $result" && _exit 1
+    tenant=`echo $result |gawk '{print $1}'`
+    ! is_valid_uuid $tenant && echo "FAIL" && echo "    $result" && _exit 1
+    echo $tenant
+    ToDelete="tenant-delete:$tenant $ToDelete"
+    [[ -z "$OPENMANO_TENANT" ]] && export OPENMANO_TENANT=$tenant
+done
+
+index=0
+for DC in $DCs
+do
+    index=$((index+1))
+    printf "%-50s" "Creating datacenter '$DC':"
+    ! result=`$openmano datacenter-create "$DC" "http://$DC/v2.0" --type=${DC##*-} --config='{insecure: True}'` &&
+        echo FAIL && echo "    $result" && _exit 1
+    datacenter=`echo $result |gawk '{print $1}'`
+    ! is_valid_uuid $datacenter && echo "FAIL" && echo "    $result" && _exit 1
+    echo $datacenter
+    eval DC${index}=$datacenter
+    ToDelete="datacenter-delete:$datacenter $ToDelete"
+    [[ -z "$datacenter_empty" ]] && datacenter_empty=datacenter
+
+    printf "%-50s" "Attaching openmano tenant to the datacenter:"
+    ! result=`$openmano datacenter-attach "$DC" --vim-tenant-name=osm --config='{insecure: False}'` &&
+        echo FAIL && echo "    $result" && _exit 1
+    ToDelete="datacenter-detach:$datacenter $ToDelete"
+    echo OK
+done
+
+printf "%-50s" "Datacenter list:"
+! result=`$openmano datacenter-list` &&
+    echo  "FAIL" && echo "    $result" && _exit 1
+for verbose in "" -v -vv -vvv
+do
+    ! result=`$openmano datacenter-list "$DC" $verbose` &&
+        echo  "FAIL" && echo "    $result" && _exit 1
+done
+echo OK
+
+dpid_prefix=55:56:57:58:59:60:61:0
+dpid_sufix=0
+for SDN in $SDNs
+do
+    printf "%-50s" "Creating SDN controller '$SDN':"
+    ! result=`$openmano sdn-controller-create "$SDN" --ip 4.5.6.7 --port 80 --type=${SDN##*-} \
+        --user user --passwd p --dpid=${dpid_prefix}${dpid_sufix}` && echo "FAIL" && echo "    $result" && _exit 1
+    sdn=`echo $result |gawk '{print $1}'`
+    #check a valid uuid is obtained
+    ! is_valid_uuid $sdn && echo "FAIL" && echo "    $result" && _exit 1
+    echo $sdn
+    ToDelete="sdn-controller-delete:$sdn $ToDelete"
+    dpid_sufix=$((dpid_sufix+1))
+
+done
+printf "%-50s" "Edit SDN-controller:"
+for edit in user=u password=p ip=5.6.6.7 port=81 name=name dpid=45:55:54:45:44:44:55:67
+do
+    ! result=`$openmano sdn-controller-edit $sdn -f --"${edit}"` &&
+        echo  "FAIL" && echo "    $result" && _exit 1
+done
+echo OK
+
+printf "%-50s" "SDN-controller list:"
+! result=`$openmano sdn-controller-list` &&
+    echo  "FAIL" && echo "    $result" && _exit 1
+for verbose in "" -v -vv -vvv
+do
+    ! result=`$openmano sdn-controller-list "$sdn" $verbose` &&
+        echo  "FAIL" && echo "    $result" && _exit 1
+done
+echo OK
+
+printf "%-50s" "Add sdn to datacenter:"
+! result=`$openmano datacenter-edit -f $DC --sdn-controller $SDN` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Clear Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-clear -f $DC` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Set Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "List Port mapping:"
+for verbose in "" -v -vv -vvv
+do
+    ! result=`$openmano datacenter-sdn-port-mapping-list "$DC" $verbose` &&
+        echo  "FAIL" && echo "    $result" && _exit 1
+done
+echo OK
+
+printf "%-50s" "Set again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Clear again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-clear -f $DC` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Set again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Remove datacenter sdn:"
+! result=`$openmano datacenter-edit -f $DC --sdn-controller null` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Negative list port mapping:"
+result=`$openmano datacenter-sdn-port-mapping-list $DC` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Add again datacenter sdn:"
+! result=`$openmano datacenter-edit -f $DC --sdn-controller $SDN` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+printf "%-50s" "Empty list port mapping:"
+! [[ `$openmano datacenter-sdn-port-mapping-list $DC | wc -l` -eq 6 ]] &&
+    echo "FAIL" && _exit 1 || echo OK
+
+printf "%-50s" "Set again Port mapping:"
+! result=`$openmano datacenter-sdn-port-mapping-set -f $DC ${DIRmano}/sdn/sdn_port_mapping.yaml` &&
+    echo "FAIL" && echo "    $result" && _exit 1 || echo OK
+
+_exit 0
+
index 121c5ab..b57ebfd 100644 (file)
@@ -46,6 +46,7 @@ vnf:
         description:       Bridge interface
     VNFC:                              # Virtual machine array 
     -   name:        TEMPLATE-VM       # name of Virtual Machine
+        # count:       1                 #by default 1
         description: TEMPLATE description
         VNFC image: /path/to/imagefolder/TEMPLATE-VM.qcow2
         # image metadata: {"bus":"ide", "os_type":"windows", "use_incremental": "no" } #Optional