inject_user_key routine fixes
[osm/RO.git] / osm_ro / vimconn_vmware.py
index a224255..f343eea 100644 (file)
@@ -1,8 +1,8 @@
 # -*- coding: utf-8 -*-
 
 ##
-# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U.
-# This file is part of openmano
+# Copyright 2016-2017 VMware Inc.
+# This file is part of ETSI OSM
 # All Rights Reserved.
 #
 # Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -18,7 +18,7 @@
 # under the License.
 #
 # For those usages not covered by the Apache License, Version 2.0 please
-# contact with: nfvlabs@tid.es
+# contact:  osslegalrouting@vmware.com
 ##
 
 """
@@ -29,6 +29,9 @@ from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
 
 import vimconn
 import os
+import shutil
+import subprocess
+import tempfile
 import traceback
 import itertools
 import requests
@@ -42,27 +45,24 @@ from xml.etree import ElementTree as XmlElementTree
 from lxml import etree as lxmlElementTree
 
 import yaml
-from pyvcloud import Http
-from pyvcloud.vcloudair import VCA
-from pyvcloud.schema.vcd.v1_5.schemas.vcloud import sessionType, organizationType, \
-    vAppType, organizationListType, vdcType, catalogType, queryRecordViewType, \
-    networkType, vcloudType, taskType, diskType, vmsType, vdcTemplateListType, mediaType
+from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
+from pyvcloud.vcd.vdc import VDC
+from pyvcloud.vcd.org import Org
+import re
+from pyvcloud.vcd.vapp import VApp
 from xml.sax.saxutils import escape
-
-from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TaskType
-from pyvcloud.schema.vcd.v1_5.schemas.vcloud.taskType import TaskType as GenericTask
-from pyvcloud.schema.vcd.v1_5.schemas.vcloud.vAppType import TaskType as VappTask
-from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgressType
-
 import logging
 import json
 import time
 import uuid
 import httplib
+#For python3
+#import http.client
 import hashlib
 import socket
 import struct
 import netaddr
+import random
 
 # global variable for vcd connector type
 STANDALONE = 'standalone'
@@ -71,23 +71,19 @@ STANDALONE = 'standalone'
 FLAVOR_RAM_KEY = 'ram'
 FLAVOR_VCPUS_KEY = 'vcpus'
 FLAVOR_DISK_KEY = 'disk'
-DEFAULT_IP_PROFILE = {'gateway_address':"192.168.1.1",
-                      'dhcp_count':50,
-                      'subnet_address':"192.168.1.0/24",
+DEFAULT_IP_PROFILE = {'dhcp_count':50,
                       'dhcp_enabled':True,
-                      'dhcp_start_address':"192.168.1.3",
-                      'ip_version':"IPv4",
-                      'dns_address':"192.168.1.2"
+                      'ip_version':"IPv4"
                       }
 # global variable for wait time
 INTERVAL_TIME = 5
 MAX_WAIT_TIME = 1800
 
-VCAVERSION = '5.9'
+API_VERSION = '27.0'
 
-__author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
-__date__ = "$12-Jan-2017 11:09:29$"
-__version__ = '0.1'
+__author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
+__date__ = "$09-Mar-2018 11:09:29$"
+__version__ = '0.2'
 
 #     -1: "Could not be created",
 #     0: "Unresolved",
@@ -181,10 +177,10 @@ class vimconnector(vimconn.vimconnector):
         self.nsx_manager = None
         self.nsx_user = None
         self.nsx_password = None
-        self.vcenter_ip = None
-        self.vcenter_port = None
-        self.vcenter_user = None
-        self.vcenter_password = None
+        self.availability_zone = None
+
+        # Disable warnings from self-signed certificates.
+        requests.packages.urllib3.disable_warnings()
 
         if tenant_name is not None:
             orgnameandtenant = tenant_name.split(":")
@@ -217,8 +213,19 @@ class vimconnector(vimconn.vimconnector):
         self.vcenter_user = config.get("vcenter_user", None)
         self.vcenter_password = config.get("vcenter_password", None)
 
+        #Set availability zone for Affinity rules
+        self.availability_zone = self.set_availability_zones()
+
+# ############# Stub code for SRIOV #################
+#         try:
+#             self.dvs_name = config['dv_switch_name']
+#         except KeyError:
+#             raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
+#
+#         self.vlanID_range = config.get("vlanID_range", None)
+
         self.org_uuid = None
-        self.vca = None
+        self.client = None
 
         if not url:
             raise vimconn.vimconnException('url param can not be NoneType')
@@ -291,59 +298,41 @@ class vimconnector(vimconn.vimconnector):
             Organization creation / provider network creation etc.
 
             Returns:
-                The return vca object that letter can be used to connect to vcloud direct as admin for provider vdc
+                The return client object that latter can be used to connect to vcloud director as admin for provider vdc
         """
+        self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
 
-        self.logger.debug("Logging in to a vca {} as admin.".format(self.org_name))
-
-        vca_admin = VCA(host=self.url,
-                        username=self.admin_user,
-                        service_type=STANDALONE,
-                        version=VCAVERSION,
-                        verify=False,
-                        log=False)
-        result = vca_admin.login(password=self.admin_password, org='System')
-        if not result:
-            raise vimconn.vimconnConnectionException(
-                "Can't connect to a vCloud director as: {}".format(self.admin_user))
-        result = vca_admin.login(token=vca_admin.token, org='System', org_url=vca_admin.vcloud_session.org_url)
-        if result is True:
-            self.logger.info(
-                "Successfully logged to a vcloud direct org: {} as user: {}".format('System', self.admin_user))
+        try:
+            host = self.url
+            org = 'System'
+            client_as_admin = Client(host, verify_ssl_certs=False)
+            client_as_admin.set_highest_supported_version()
+            client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
+        except Exception as e:
+            raise vimconn.vimconnException(
+                  "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
 
-        return vca_admin
+        return client_as_admin
 
     def connect(self):
         """ Method connect as normal user to vCloud director.
 
             Returns:
-                The return vca object that letter can be used to connect to vCloud director as admin for VDC
+                The return client object that latter can be used to connect to vCloud director as admin for VDC
         """
-
         try:
-            self.logger.debug("Logging in to a vca {} as {} to datacenter {}.".format(self.org_name,
+            self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
                                                                                       self.user,
                                                                                       self.org_name))
-            vca = VCA(host=self.url,
-                      username=self.user,
-                      service_type=STANDALONE,
-                      version=VCAVERSION,
-                      verify=False,
-                      log=False)
-
-            result = vca.login(password=self.passwd, org=self.org_name)
-            if not result:
-                raise vimconn.vimconnConnectionException("Can't connect to a vCloud director as: {}".format(self.user))
-            result = vca.login(token=vca.token, org=self.org_name, org_url=vca.vcloud_session.org_url)
-            if result is True:
-                self.logger.info(
-                    "Successfully logged to a vcloud direct org: {} as user: {}".format(self.org_name, self.user))
-
+            host = self.url
+            client = Client(host, verify_ssl_certs=False)
+            client.set_highest_supported_version()
+            client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
         except:
             raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
                                                      "{} as user: {}".format(self.org_name, self.user))
 
-        return vca
+        return client
 
     def init_organization(self):
         """ Method initialize organization UUID and VDC parameters.
@@ -356,13 +345,18 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return vca object that letter can be used to connect to vcloud direct as admin
         """
+        client = self.connect()
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD.")
+
+        self.client = client
         try:
             if self.org_uuid is None:
-                org_dict = self.get_org_list()
-                for org in org_dict:
+                org_list = client.get_org_list()
+                for org in org_list.Org:
                     # we set org UUID at the init phase but we can do it only when we have valid credential.
-                    if org_dict[org] == self.org_name:
-                        self.org_uuid = org
+                    if org.get('name') == self.org_name:
+                        self.org_uuid = org.get('href').split('/')[-1]
                         self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
                         break
                 else:
@@ -415,15 +409,66 @@ class vimconnector(vimconn.vimconnector):
         vdc_task = self.create_vdc(vdc_name=tenant_name)
         if vdc_task is not None:
             vdc_uuid, value = vdc_task.popitem()
-            self.logger.info("Crated new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
+            self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
             return vdc_uuid
         else:
             raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
 
     def delete_tenant(self, tenant_id=None):
-        """Delete a tenant from VIM"""
-        'Returns the tenant identifier'
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
+        """ Delete a tenant from VIM
+             Args:
+                tenant_id is tenant_id to be deleted.
+
+            Return:
+                returns the tenant identifier in UUID format.
+                If action is failed method will throw exception
+        """
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+
+        if tenant_id is not None:
+            if vca._session:
+                #Get OrgVDC
+                url_list = [self.url, '/api/vdc/', tenant_id]
+                orgvdc_herf = ''.join(url_list)
+
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                                url=orgvdc_herf,
+                                                headers=headers)
+
+                if response.status_code != requests.codes.ok:
+                    self.logger.debug("delete_tenant():GET REST API call {} failed. "\
+                                      "Return status code {}".format(orgvdc_herf,
+                                                                     response.status_code))
+                    raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
+
+                lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+                namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+                #For python3
+                #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+                namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+                vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
+                vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
+
+                response = self.perform_request(req_type='DELETE',
+                                                url=vdc_remove_href,
+                                                headers=headers)
+
+                if response.status_code == 202:
+                    time.sleep(5)
+                    return tenant_id
+                else:
+                    self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
+                                      "Return status code {}".format(vdc_remove_href,
+                                                                     response.status_code))
+                    raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
+        else:
+            self.logger.debug("delete_tenant():Incorrect tenant ID  {}".format(tenant_id))
+            raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
+
 
     def get_tenant_list(self, filter_dict={}):
         """Obtain tenants of VIM
@@ -458,25 +503,49 @@ class vimconnector(vimconn.vimconnector):
 
         return vdclist
 
-    def new_network(self, net_name, net_type, ip_profile=None, shared=False):
+    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
         """Adds a tenant network to VIM
-            net_name is the name
-            net_type can be 'bridge','data'.'ptp'.
-            ip_profile is a dict containing the IP parameters of the network
-            shared is a boolean
-        Returns the network identifier"""
+        Params:
+            'net_name': name of the network
+            'net_type': one of:
+                'bridge': overlay isolated network
+                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+            'ip_profile': is a dict containing the IP parameters of the network
+                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                'dhcp_enabled': True or False
+                'dhcp_start_address': ip_schema, first IP to grant
+                'dhcp_count': number of IPs to grant.
+            'shared': if this network can be seen/use by other tenants/organization
+            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+        Returns a tuple with the network identifier and created_items, or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
 
         self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
                           .format(net_name, net_type, ip_profile, shared))
 
+        created_items = {}
         isshared = 'false'
         if shared:
             isshared = 'true'
 
+# ############# Stub code for SRIOV #################
+#         if net_type == "data" or net_type == "ptp":
+#             if self.config.get('dv_switch_name') == None:
+#                  raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
+#             network_uuid = self.create_dvPort_group(net_name)
+
         network_uuid = self.create_network(network_name=net_name, net_type=net_type,
                                            ip_profile=ip_profile, isshared=isshared)
         if network_uuid is not None:
-            return network_uuid
+            return network_uuid, created_items
         else:
             raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
 
@@ -488,42 +557,71 @@ class vimconnector(vimconn.vimconnector):
         """
 
         self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
 
         if not self.tenant_name:
             raise vimconn.vimconnConnectionException("Tenant name is empty.")
 
-        vdc = vca.get_vdc(self.tenant_name)
+        org, vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
 
-        vdc_uuid = vdc.get_id().split(":")[3]
-        networks = vca.get_networks(vdc.get_name())
+        vdc_uuid = vdc.get('id').split(":")[3]
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+        if response.status_code != 200:
+            self.logger.error("Failed to get vdc content")
+            raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+        else:
+            content = XmlElementTree.fromstring(response.content)
+
         network_list = []
         try:
-            for network in networks:
-                filter_dict = {}
-                netid = network.get_id().split(":")
-                if len(netid) != 4:
-                    continue
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
 
-                filter_dict["name"] = network.get_name()
-                filter_dict["id"] = netid[3]
-                filter_dict["shared"] = network.get_IsShared()
-                filter_dict["tenant_id"] = vdc_uuid
-                if network.get_status() == 1:
-                    filter_dict["admin_state_up"] = True
-                else:
-                    filter_dict["admin_state_up"] = False
-                filter_dict["status"] = "ACTIVE"
-                filter_dict["type"] = "bridge"
-                network_list.append(filter_dict)
-                self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
+                            filter_dict = {}
+                            net_uuid = net_details.get('id').split(":")
+                            if len(net_uuid) != 4:
+                                continue
+                            else:
+                                net_uuid = net_uuid[3]
+                                # create dict entry
+                                self.logger.debug("get_vcd_network_list(): Adding network {} "
+                                                  "to a list vcd id {} network {}".format(net_uuid,
+                                                                                          vdc_uuid,
+                                                                                          net_details.get('name')))
+                                filter_dict["name"] = net_details.get('name')
+                                filter_dict["id"] = net_uuid
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_dict["shared"] = shared
+                                filter_dict["tenant_id"] = vdc_uuid
+                                if int(net_details.get('status')) == 1:
+                                    filter_dict["admin_state_up"] = True
+                                else:
+                                    filter_dict["admin_state_up"] = False
+                                filter_dict["status"] = "ACTIVE"
+                                filter_dict["type"] = "bridge"
+                                network_list.append(filter_dict)
+                                self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
         except:
-            self.logger.debug("Error in get_vcd_network_list")
-            self.logger.debug(traceback.format_exc())
+            self.logger.debug("Error in get_vcd_network_list", exc_info=True)
             pass
 
         self.logger.debug("get_vcd_network_list returning {}".format(network_list))
@@ -546,58 +644,86 @@ class vimconnector(vimconn.vimconnector):
             List can be empty
         """
 
-        self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+        self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
 
         if not self.tenant_name:
             raise vimconn.vimconnConnectionException("Tenant name is empty.")
 
-        vdc = vca.get_vdc(self.tenant_name)
+        org, vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
 
-        vdcid = vdc.get_id().split(":")[3]
-        networks = vca.get_networks(vdc.get_name())
-        network_list = []
-
         try:
-            for network in networks:
-                filter_entry = {}
-                net_uuid = network.get_id().split(":")
-                if len(net_uuid) != 4:
-                    continue
-                else:
-                    net_uuid = net_uuid[3]
-                # create dict entry
-                self.logger.debug("Adding  {} to a list vcd id {} network {}".format(net_uuid,
-                                                                                     vdcid,
-                                                                                     network.get_name()))
-                filter_entry["name"] = network.get_name()
-                filter_entry["id"] = net_uuid
-                filter_entry["shared"] = network.get_IsShared()
-                filter_entry["tenant_id"] = vdcid
-                if network.get_status() == 1:
-                    filter_entry["admin_state_up"] = True
-                else:
-                    filter_entry["admin_state_up"] = False
-                filter_entry["status"] = "ACTIVE"
-                filter_entry["type"] = "bridge"
-                filtered_entry = filter_entry.copy()
+            vdcid = vdc.get('id').split(":")[3]
+
+            if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+            if response.status_code != 200:
+                self.logger.error("Failed to get vdc content")
+                raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+            else:
+                content = XmlElementTree.fromstring(response.content)
+
+            network_list = []
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
 
-                if filter_dict is not None and filter_dict:
-                    # we remove all the key : value we don't care and match only
-                    # respected field
-                    filtered_dict = set(filter_entry.keys()) - set(filter_dict)
-                    for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
-                    if filter_dict == filter_entry:
-                        network_list.append(filtered_entry)
-                else:
-                    network_list.append(filtered_entry)
-        except:
-            self.logger.debug("Error in get_vcd_network_list")
-            self.logger.debug(traceback.format_exc())
+                            filter_entry = {}
+                            net_uuid = net_details.get('id').split(":")
+                            if len(net_uuid) != 4:
+                                continue
+                            else:
+                                net_uuid = net_uuid[3]
+                                # create dict entry
+                                self.logger.debug("get_network_list(): Adding net {}"
+                                                  " to a list vcd id {} network {}".format(net_uuid,
+                                                                                           vdcid,
+                                                                                           net_details.get('name')))
+                                filter_entry["name"] = net_details.get('name')
+                                filter_entry["id"] = net_uuid
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_entry["shared"] = shared
+                                filter_entry["tenant_id"] = vdcid
+                                if int(net_details.get('status')) == 1:
+                                    filter_entry["admin_state_up"] = True
+                                else:
+                                    filter_entry["admin_state_up"] = False
+                                filter_entry["status"] = "ACTIVE"
+                                filter_entry["type"] = "bridge"
+                                filtered_entry = filter_entry.copy()
+
+                                if filter_dict is not None and filter_dict:
+                                    # we remove all the key : value we don't care and match only
+                                    # respected field
+                                    filtered_dict = set(filter_entry.keys()) - set(filter_dict)
+                                    for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
+                                    if filter_dict == filter_entry:
+                                        network_list.append(filtered_entry)
+                                else:
+                                    network_list.append(filtered_entry)
+        except Exception as e:
+            self.logger.debug("Error in get_network_list",exc_info=True)
+            if isinstance(e, vimconn.vimconnException):
+                raise
+            else:
+                raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
 
         self.logger.debug("Returning {}".format(network_list))
         return network_list
@@ -606,48 +732,85 @@ class vimconnector(vimconn.vimconnector):
         """Method obtains network details of net_id VIM network
            Return a dict with  the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+        try:
+            org, vdc = self.get_vdc_details()
+            vdc_id = vdc.get('id').split(":")[3]
+            if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+            if response.status_code != 200:
+                self.logger.error("Failed to get vdc content")
+                raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+            else:
+                content = XmlElementTree.fromstring(response.content)
 
-        vdc = vca.get_vdc(self.tenant_name)
-        vdc_id = vdc.get_id().split(":")[3]
+            filter_dict = {}
 
-        networks = vca.get_networks(vdc.get_name())
-        filter_dict = {}
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
 
-        try:
-            for network in networks:
-                vdc_network_id = network.get_id().split(":")
-                if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
-                    filter_dict["name"] = network.get_name()
-                    filter_dict["id"] = vdc_network_id[3]
-                    filter_dict["shared"] = network.get_IsShared()
-                    filter_dict["tenant_id"] = vdc_id
-                    if network.get_status() == 1:
-                        filter_dict["admin_state_up"] = True
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
+
+                            vdc_network_id = net_details.get('id').split(":")
+                            if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
+                                filter_dict["name"] = net_details.get('name')
+                                filter_dict["id"] = vdc_network_id[3]
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_dict["shared"] = shared
+                                filter_dict["tenant_id"] = vdc_id
+                                if int(net_details.get('status')) == 1:
+                                    filter_dict["admin_state_up"] = True
+                                else:
+                                    filter_dict["admin_state_up"] = False
+                                filter_dict["status"] = "ACTIVE"
+                                filter_dict["type"] = "bridge"
+                                self.logger.debug("Returning {}".format(filter_dict))
+                                return filter_dict
                     else:
-                        filter_dict["admin_state_up"] = False
-                    filter_dict["status"] = "ACTIVE"
-                    filter_dict["type"] = "bridge"
-                    self.logger.debug("Returning {}".format(filter_dict))
-                    return filter_dict
-        except:
+                        raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
+        except Exception as e:
             self.logger.debug("Error in get_network")
             self.logger.debug(traceback.format_exc())
+            if isinstance(e, vimconn.vimconnException):
+                raise
+            else:
+                raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
 
         return filter_dict
 
-    def delete_network(self, net_id):
+    def delete_network(self, net_id, created_items=None):
         """
-            Method Deletes a tenant network from VIM, provide the network id.
-
-            Returns the network identifier or raise an exception
+        Removes a tenant network from VIM and its associated elements
+        :param net_id: VIM identifier of the network, provided by method new_network
+        :param created_items: dictionary with extra items to be deleted. provided by method new_network
+        Returns the network identifier or raises an exception upon error or when network is not found
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() for tenant {} is failed.".format(self.tenant_name))
+        # ############# Stub code for SRIOV #################
+#         dvport_group = self.get_dvport_group(net_id)
+#         if dvport_group:
+#             #delete portgroup
+#             status = self.destroy_dvport_group(net_id)
+#             if status:
+#                 # Remove vlanID from persistent info
+#                 if net_id in self.persistent_info["used_vlanIDs"]:
+#                     del self.persistent_info["used_vlanIDs"][net_id]
+#
+#                 return net_id
 
         vcd_network = self.get_vcd_network(network_uuid=net_id)
         if vcd_network is not None and vcd_network:
@@ -674,10 +837,6 @@ class vimconnector(vimconn.vimconnector):
 
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
-
         dict_entry = {}
         try:
             for net in net_list:
@@ -733,7 +892,14 @@ class vimconnector(vimconn.vimconnector):
         new_flavor=flavor_data
         ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
         cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
-        disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
+        disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
+
+        if not isinstance(ram, int):
+            raise vimconn.vimconnException("Non-integer value for ram")
+        elif not isinstance(cpu, int):
+            raise vimconn.vimconnException("Non-integer value for cpu")
+        elif not isinstance(disk, int):
+            raise vimconn.vimconnException("Non-integer value for disk")
 
         extended_flv = flavor_data.get("extended")
         if extended_flv:
@@ -741,7 +907,8 @@ class vimconnector(vimconn.vimconnector):
             if numas:
                 for numa in numas:
                     #overwrite ram and vcpus
-                    ram = numa['memory']*1024
+                    if 'memory' in numa:
+                        ram = numa['memory']*1024
                     if 'paired-threads' in numa:
                         cpu = numa['paired-threads']*2
                     elif 'cores' in numa:
@@ -782,12 +949,84 @@ class vimconnector(vimconn.vimconnector):
 
     def delete_image(self, image_id):
         """
-
-        :param image_id:
-        :return:
+            Deletes a tenant image from VIM
+            Args:
+                image_id is ID of Image to be deleted
+            Return:
+                returns the image identifier in UUID format or raises an exception on error
         """
+        conn = self.connect_as_admin()
+        if not conn:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+        # Get Catalog details
+        url_list = [self.url, '/api/catalog/', image_id]
+        catalog_herf = ''.join(url_list)
+
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                  'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
+
+        response = self.perform_request(req_type='GET',
+                                        url=catalog_herf,
+                                        headers=headers)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.debug("delete_image():GET REST API call {} failed. "\
+                              "Return status code {}".format(catalog_herf,
+                                                             response.status_code))
+            raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
+
+        lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+        namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+        #For python3
+        #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+        namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+        catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
+        catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
+        for catalogItem in catalogItems:
+            catalogItem_href = catalogItem.attrib['href']
+
+            response = self.perform_request(req_type='GET',
+                                        url=catalogItem_href,
+                                        headers=headers)
+
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("delete_image():GET REST API call {} failed. "\
+                                  "Return status code {}".format(catalog_herf,
+                                                                 response.status_code))
+                raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
+                                                                                    catalogItem,
+                                                                                    image_id))
+
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+            catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
+
+            #Remove catalogItem
+            response = self.perform_request(req_type='DELETE',
+                                        url=catalogitem_remove_href,
+                                        headers=headers)
+            if response.status_code == requests.codes.no_content:
+                self.logger.debug("Deleted Catalog item {}".format(catalogItem))
+            else:
+                raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
+
+        #Remove catalog
+        url_list = [self.url, '/api/admin/catalog/', image_id]
+        catalog_remove_herf = ''.join(url_list)
+        response = self.perform_request(req_type='DELETE',
+                                        url=catalog_remove_herf,
+                                        headers=headers)
+
+        if response.status_code == requests.codes.no_content:
+            self.logger.debug("Deleted Catalog {}".format(image_id))
+            return image_id
+        else:
+            raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
 
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
 
     def catalog_exists(self, catalog_name, catalogs):
         """
@@ -797,9 +1036,8 @@ class vimconnector(vimconn.vimconnector):
         :return:
         """
         for catalog in catalogs:
-            if catalog.name == catalog_name:
-                return True
-        return False
+            if catalog['name'] == catalog_name:
+                return catalog['id']
 
     def create_vimcatalog(self, vca=None, catalog_name=None):
         """ Create new catalog entry in vCloud director.
@@ -809,17 +1047,19 @@ class vimconnector(vimconn.vimconnector):
                 catalog_name catalog that client wish to create.   Note no validation done for a name.
                 Client must make sure that provide valid string representation.
 
-             Return (bool) True if catalog created.
+             Returns catalog id if catalog created else None.
 
         """
         try:
-            task = vca.create_catalog(catalog_name, catalog_name)
-            result = vca.block_until_completed(task)
-            if not result:
-                return False
-            catalogs = vca.get_catalogs()
-        except:
-            return False
+            lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
+            if lxml_catalog_element:
+                id_attr_value = lxml_catalog_element.get('id')  # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
+                return id_attr_value.split(':')[-1]
+            catalogs = vca.list_catalogs()
+        except Exception as ex:
+            self.logger.error(
+                'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
+            raise
         return self.catalog_exists(catalog_name, catalogs)
 
     # noinspection PyIncorrectDocstring
@@ -844,117 +1084,123 @@ class vimconnector(vimconn.vimconnector):
         #  create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
         #  status change.
         #  if VCD can parse OVF we upload VMDK file
-        for catalog in vca.get_catalogs():
-            if catalog_name != catalog.name:
-                continue
-            link = filter(lambda link: link.get_type() == "application/vnd.vmware.vcloud.media+xml" and
-                                       link.get_rel() == 'add', catalog.get_Link())
-            assert len(link) == 1
-            data = """
-            <UploadVAppTemplateParams name="%s" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>%s vApp Template</Description></UploadVAppTemplateParams>
-            """ % (escape(catalog_name), escape(description))
-            headers = vca.vcloud_session.get_vcloud_headers()
-            headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
-            response = Http.post(link[0].get_href(), headers=headers, data=data, verify=vca.verify, logger=self.logger)
-            if response.status_code == requests.codes.created:
-                catalogItem = XmlElementTree.fromstring(response.content)
-                entity = [child for child in catalogItem if
-                          child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
-                href = entity.get('href')
-                template = href
-                response = Http.get(href, headers=vca.vcloud_session.get_vcloud_headers(),
-                                    verify=vca.verify, logger=self.logger)
-
-                if response.status_code == requests.codes.ok:
-                    media = mediaType.parseString(response.content, True)
-                    link = filter(lambda link: link.get_rel() == 'upload:default',
-                                  media.get_Files().get_File()[0].get_Link())[0]
-                    headers = vca.vcloud_session.get_vcloud_headers()
-                    headers['Content-Type'] = 'Content-Type text/xml'
-                    response = Http.put(link.get_href(),
-                                        data=open(media_file_name, 'rb'),
-                                        headers=headers,
-                                        verify=vca.verify, logger=self.logger)
-                    if response.status_code != requests.codes.ok:
-                        self.logger.debug(
-                            "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
-                                                                                                  media_file_name))
-                        return False
-
-                # TODO fix this with aync block
-                time.sleep(5)
-
-                self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
-
-                # uploading VMDK file
-                # check status of OVF upload and upload remaining files.
-                response = Http.get(template,
-                                    headers=vca.vcloud_session.get_vcloud_headers(),
-                                    verify=vca.verify,
-                                    logger=self.logger)
-
-                if response.status_code == requests.codes.ok:
-                    media = mediaType.parseString(response.content, True)
-                    number_of_files = len(media.get_Files().get_File())
-                    for index in xrange(0, number_of_files):
-                        links_list = filter(lambda link: link.get_rel() == 'upload:default',
-                                            media.get_Files().get_File()[index].get_Link())
-                        for link in links_list:
-                            # we skip ovf since it already uploaded.
-                            if 'ovf' in link.get_href():
-                                continue
-                            # The OVF file and VMDK must be in a same directory
-                            head, tail = os.path.split(media_file_name)
-                            file_vmdk = head + '/' + link.get_href().split("/")[-1]
-                            if not os.path.isfile(file_vmdk):
-                                return False
-                            statinfo = os.stat(file_vmdk)
-                            if statinfo.st_size == 0:
-                                return False
-                            hrefvmdk = link.get_href()
-
-                            if progress:
-                                print("Uploading file: {}".format(file_vmdk))
-                            if progress:
-                                widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
+        try:
+            for catalog in vca.list_catalogs():
+                if catalog_name != catalog['name']:
+                    continue
+                catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
+                data = """
+                <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
+                """.format(catalog_name, description)
+
+                if self.client:
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
+
+                response = self.perform_request(req_type='POST',
+                                                url=catalog_href,
+                                                headers=headers,
+                                                data=data)
+
+                if response.status_code == requests.codes.created:
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if
+                              child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    href = entity.get('href')
+                    template = href
+
+                    response = self.perform_request(req_type='GET',
+                                                    url=href,
+                                                    headers=headers)
+
+                    if response.status_code == requests.codes.ok:
+                        headers['Content-Type'] = 'Content-Type text/xml'
+                        result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
+                        if result:
+                            transfer_href = result.group(1)
+
+                        response = self.perform_request(req_type='PUT',
+                                                    url=transfer_href,
+                                                    headers=headers,
+                                                    data=open(media_file_name, 'rb'))
+                        if response.status_code != requests.codes.ok:
+                            self.logger.debug(
+                                "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
+                                                                                                      media_file_name))
+                            return False
+
+                    # TODO fix this with aync block
+                    time.sleep(5)
+
+                    self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
+
+                    # uploading VMDK file
+                    # check status of OVF upload and upload remaining files.
+                    response = self.perform_request(req_type='GET',
+                                                    url=template,
+                                                    headers=headers)
+
+                    if response.status_code == requests.codes.ok:
+                        result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
+                        if result:
+                            link_href = result.group(1)
+                        # we skip ovf since it already uploaded.
+                        if 'ovf' in link_href:
+                            continue
+                        # The OVF file and VMDK must be in a same directory
+                        head, tail = os.path.split(media_file_name)
+                        file_vmdk = head + '/' + link_href.split("/")[-1]
+                        if not os.path.isfile(file_vmdk):
+                            return False
+                        statinfo = os.stat(file_vmdk)
+                        if statinfo.st_size == 0:
+                            return False
+                        hrefvmdk = link_href
+
+                        if progress:
+                            widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
                                            FileTransferSpeed()]
-                                progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
-
-                            bytes_transferred = 0
-                            f = open(file_vmdk, 'rb')
-                            while bytes_transferred < statinfo.st_size:
-                                my_bytes = f.read(chunk_bytes)
-                                if len(my_bytes) <= chunk_bytes:
-                                    headers = vca.vcloud_session.get_vcloud_headers()
-                                    headers['Content-Range'] = 'bytes %s-%s/%s' % (
-                                        bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
-                                    headers['Content-Length'] = str(len(my_bytes))
-                                    response = Http.put(hrefvmdk,
-                                                        headers=headers,
-                                                        data=my_bytes,
-                                                        verify=vca.verify,
-                                                        logger=None)
-
-                                    if response.status_code == requests.codes.ok:
-                                        bytes_transferred += len(my_bytes)
-                                        if progress:
-                                            progress_bar.update(bytes_transferred)
-                                    else:
-                                        self.logger.debug(
-                                            'file upload failed with error: [%s] %s' % (response.status_code,
+                            progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
+
+                        bytes_transferred = 0
+                        f = open(file_vmdk, 'rb')
+                        while bytes_transferred < statinfo.st_size:
+                            my_bytes = f.read(chunk_bytes)
+                            if len(my_bytes) <= chunk_bytes:
+                                headers['Content-Range'] = 'bytes %s-%s/%s' % (
+                                    bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
+                                headers['Content-Length'] = str(len(my_bytes))
+                                response = requests.put(url=hrefvmdk,
+                                                         headers=headers,
+                                                         data=my_bytes,
+                                                         verify=False)
+                                if response.status_code == requests.codes.ok:
+                                    bytes_transferred += len(my_bytes)
+                                    if progress:
+                                        progress_bar.update(bytes_transferred)
+                                else:
+                                    self.logger.debug(
+                                        'file upload failed with error: [%s] %s' % (response.status_code,
                                                                                         response.content))
 
-                                        f.close()
-                                        return False
-                            f.close()
-                            if progress:
-                                progress_bar.finish()
+                                    f.close()
+                                    return False
+                        f.close()
+                        if progress:
+                            progress_bar.finish()
                             time.sleep(10)
                     return True
                 else:
                     self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
                                       format(catalog_name, media_file_name))
                     return False
+        except Exception as exp:
+            self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                .format(catalog_name,media_file_name, exp))
+            raise vimconn.vimconnException(
+                "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                .format(catalog_name,media_file_name, exp))
 
         self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
         return False
@@ -988,9 +1234,9 @@ class vimconnector(vimconn.vimconnector):
         """
 
         for catalog in catalogs:
-            if catalog.name == catalog_name:
-                catalog_id = catalog.get_id().split(":")
-                return catalog_id[3]
+            if catalog['name'] == catalog_name:
+                catalog_id = catalog['id']
+                return catalog_id
         return None
 
     def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
@@ -1007,9 +1253,28 @@ class vimconnector(vimconn.vimconnector):
             return None
 
         for catalog in catalogs:
-            catalog_id = catalog.get_id().split(":")[3]
+            catalog_id = catalog.get('id')
+            if catalog_id == catalog_uuid:
+                return catalog.get('name')
+        return None
+
+    def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
+        """  Method check catalog and return catalog name lookup done by catalog UUID.
+
+        Args
+            catalog_name: catalog name as string
+            catalogs:  list of catalogs.
+
+        Return: catalogs name or None
+        """
+
+        if not self.validate_uuid4(uuid_string=catalog_uuid):
+            return None
+
+        for catalog in catalogs:
+            catalog_id = catalog.get('id')
             if catalog_id == catalog_uuid:
-                return catalog.name
+                return catalog
         return None
 
     def get_image_id_from_path(self, path=None, progress=False):
@@ -1032,9 +1297,6 @@ class vimconnector(vimconn.vimconnector):
 
         Return: if image uploaded correct method will provide image catalog UUID.
         """
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
 
         if not path:
             raise vimconn.vimconnException("Image path can't be None.")
@@ -1058,39 +1320,44 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("File name {} Catalog Name {} file path {} "
                           "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
 
-        catalogs = vca.get_catalogs()
+        try:
+            org,vdc = self.get_vdc_details()
+            catalogs = org.list_catalogs()
+        except Exception as exp:
+            self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
+            raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
+
         if len(catalogs) == 0:
             self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
-            result = self.create_vimcatalog(vca, catalog_md5_name)
-            if not result:
+            if self.create_vimcatalog(org, catalog_md5_name) is None:
                 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
-            result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
+
+            result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
                                           media_name=filename, medial_file_name=path, progress=progress)
             if not result:
                 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
-            return self.get_catalogid(catalog_name, vca.get_catalogs())
+            return self.get_catalogid(catalog_name, catalogs)
         else:
             for catalog in catalogs:
                 # search for existing catalog if we find same name we return ID
                 # TODO optimize this
-                if catalog.name == catalog_md5_name:
+                if catalog['name'] == catalog_md5_name:
                     self.logger.debug("Found existing catalog entry for {} "
                                       "catalog id {}".format(catalog_name,
                                                              self.get_catalogid(catalog_md5_name, catalogs)))
-                    return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
+                    return self.get_catalogid(catalog_md5_name, catalogs)
 
         # if we didn't find existing catalog we create a new one and upload image.
         self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
-        result = self.create_vimcatalog(vca, catalog_md5_name)
-        if not result:
+        if self.create_vimcatalog(org, catalog_md5_name) is None:
             raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
 
-        result = self.upload_vimimage(vca=vca, catalog_name=catalog_md5_name,
+        result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
                                       media_name=filename, medial_file_name=path, progress=progress)
         if not result:
             raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
 
-        return self.get_catalogid(catalog_md5_name, vca.get_catalogs())
+        return self.get_catalogid(catalog_md5_name, org.list_catalogs())
 
     def get_image_list(self, filter_dict={}):
         '''Obtain tenant images from VIM
@@ -1103,18 +1370,17 @@ class vimconnector(vimconn.vimconnector):
             [{<the fields at Filter_dict plus some VIM specific>}, ...]
             List can be empty
         '''
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
         try:
+            org, vdc = self.get_vdc_details()
             image_list = []
-            catalogs = vca.get_catalogs()
+            catalogs = org.list_catalogs()
             if len(catalogs) == 0:
                 return image_list
             else:
                 for catalog in catalogs:
-                    catalog_uuid = catalog.get_id().split(":")[3]
-                    name = catalog.name
+                    catalog_uuid = catalog.get('id')
+                    name = catalog.get('name')
                     filtered_dict = {}
                     if filter_dict.get("name") and filter_dict["name"] != name:
                         continue
@@ -1145,6 +1411,9 @@ class vimconnector(vimconn.vimconnector):
         try:
             refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
                           vdc.ResourceEntities.ResourceEntity)
+            #For python3
+            #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
+            #         if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
             if len(refs) == 1:
                 return refs[0].href.split("vapp")[1][1:]
         except Exception as e:
@@ -1169,6 +1438,9 @@ class vimconnector(vimconn.vimconnector):
             refs = filter(lambda ref:
                           ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
                           vdc.ResourceEntities.ResourceEntity)
+            #For python3
+            #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
+            #         if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
             for ref in refs:
                 vappid = ref.href.split("vapp")[1][1:]
                 # find vapp with respected vapp uuid
@@ -1179,80 +1451,120 @@ class vimconnector(vimconn.vimconnector):
             return False
         return False
 
-    def get_namebyvappid(self, vca=None, vdc=None, vapp_uuid=None):
+    def get_namebyvappid(self, vapp_uuid=None):
         """Method returns vApp name from vCD and lookup done by vapp_id.
 
         Args:
-            vca: Connector to VCA
-            vdc: The VDC object.
             vapp_uuid: vappid is application identifier
 
         Returns:
             The return vApp name otherwise None
         """
-
         try:
-            refs = filter(lambda ref: ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
-                          vdc.ResourceEntities.ResourceEntity)
-            for ref in refs:
-                # we care only about UUID the rest doesn't matter
-                vappid = ref.href.split("vapp")[1][1:]
-                if vappid == vapp_uuid:
-                    response = Http.get(ref.href, headers=vca.vcloud_session.get_vcloud_headers(), verify=vca.verify,
-                                        logger=self.logger)
-                    tree = XmlElementTree.fromstring(response.content)
-                    return tree.attrib['name']
+            if self.client and vapp_uuid:
+                vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+                response = self.perform_request(req_type='GET',
+                                                url=vapp_call,
+                                                headers=headers)
+                #Retry login if session expired & retry sending request
+                if response.status_code == 403:
+                    response = self.retry_rest('GET', vapp_call)
+
+                tree = XmlElementTree.fromstring(response.content)
+                return tree.attrib['name']
         except Exception as e:
             self.logger.exception(e)
             return None
         return None
 
-    def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={},
-                       cloud_config=None, disk_list=None):
+    def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
+                       cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
         """Adds a VM instance to VIM
         Params:
-            start: indicates if VM must start or boot in pause mode. Ignored
-            image_id,flavor_id: image and flavor uuid
-            net_list: list of interfaces, each one is a dictionary with:
-                name:
-                net_id: network uuid to connect
-                vpci: virtual vcpi to assign
-                model: interface model, virtio, e2000, ...
-                mac_address:
-                use: 'data', 'bridge',  'mgmt'
-                type: 'virtual', 'PF', 'VF', 'VFnotShared'
-                vim_id: filled/added by this function
-                cloud_config: can be a text script to be passed directly to cloud-init,
-                    or an object to inject users and ssh keys with format:
-                        key-pairs: [] list of keys to install to the default user
-                        users: [{ name, key-pairs: []}] list of users to add with their key-pair
-                #TODO ip, security groups
-        Returns >=0, the instance identifier
-                <0, error_text
+            'start': (boolean) indicates if VM must start or created in pause mode.
+            'image_id','flavor_id': image and flavor VIM id to use for the VM
+            'net_list': list of interfaces, each one is a dictionary with:
+                'name': (optional) name for the interface.
+                'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
+                'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                'mac_address': (optional) mac address to assign to this interface
+                #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
+                    the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
+                'type': (mandatory) can be one of:
+                    'virtual', in this case always connected to a network of type 'net_type=bridge'
+                     'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                           can created unconnected
+                     'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                     'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                            are allocated on the same physical NIC
+                'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
+                                or True, it must apply the default VIM behaviour
+                After execution the method will add the key:
+                'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
+                        interface. 'net_list' is modified
+            'cloud_config': (optional) dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                'size': (mandatory) string with the size of the disk in GB
+            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                availability_zone_index is None
+        Returns a tuple with the instance identifier and created_items or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
         """
-
         self.logger.info("Creating new instance for entry {}".format(name))
-        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".format(
-                                    description, start, image_id, flavor_id, net_list, cloud_config))
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
+                          "availability_zone_index {} availability_zone_list {}"\
+                          .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
+                                  availability_zone_index, availability_zone_list))
 
         #new vm name = vmname + tenant_id + uuid
         new_vm_name = [name, '-', str(uuid.uuid4())]
         vmname_andid = ''.join(new_vm_name)
 
-        # if vm already deployed we return existing uuid
-        # vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), name)
-        # if vapp_uuid is not None:
-        #     return vapp_uuid
+        for net in net_list:
+            if net['type'] == "PCI-PASSTHROUGH":
+                raise vimconn.vimconnNotSupportedException(
+                      "Current vCD version does not support type : {}".format(net['type']))
+
+        if len(net_list) > 10:
+            raise vimconn.vimconnNotSupportedException(
+                      "The VM hardware versions 7 and above support upto 10 NICs only")
 
+        # if vm already deployed we return existing uuid
         # we check for presence of VDC, Catalog entry and Flavor.
-        vdc = vca.get_vdc(self.tenant_name)
+        org, vdc = self.get_vdc_details()
         if vdc is None:
             raise vimconn.vimconnNotFoundException(
                 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
-        catalogs = vca.get_catalogs()
+        catalogs = org.list_catalogs()
+        if catalogs is None:
+            #Retry once, if failed by refreshing token
+            self.get_token()
+            org = Org(self.client, resource=self.client.get_org())
+            catalogs = org.list_catalogs()
         if catalogs is None:
             raise vimconn.vimconnNotFoundException(
                 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
@@ -1264,13 +1576,12 @@ class vimconnector(vimconn.vimconnector):
             raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
                                                    "(Failed retrieve catalog information {})".format(name, image_id))
 
-
         # Set vCPU and Memory based on flavor.
-        #
         vm_cpus = None
         vm_memory = None
         vm_disk = None
-        pci_devices_info = []
+        numas = None
+
         if flavor_id is not None:
             if flavor_id not in vimconnector.flavorlist:
                 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
@@ -1285,11 +1596,7 @@ class vimconnector(vimconn.vimconnector):
                     extended = flavor.get("extended", None)
                     if extended:
                         numas=extended.get("numas", None)
-                        if numas:
-                            for numa in numas:
-                                for interface in numa.get("interfaces",() ):
-                                    if interface["dedicated"].strip()=="yes":
-                                        pci_devices_info.append(interface)
+
                 except Exception as exp:
                     raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
 
@@ -1304,16 +1611,19 @@ class vimconnector(vimconn.vimconnector):
         #If no mgmt, then the 1st NN in netlist is considered as primary net. 
         primary_net = None
         primary_netname = None
+        primary_net_href = None
         network_mode = 'bridged'
         if net_list is not None and len(net_list) > 0:
             for net in net_list:
-                if 'use' in net and net['use'] == 'mgmt':
+                if 'use' in net and net['use'] == 'mgmt' and not primary_net:
                     primary_net = net
             if primary_net is None:
                 primary_net = net_list[0]
 
             try:
                 primary_net_id = primary_net['net_id']
+                url_list = [self.url, '/api/network/', primary_net_id]
+                primary_net_href = ''.join(url_list) 
                 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
                 if 'name' in network_dict:
                     primary_netname = network_dict['name']
@@ -1325,31 +1635,198 @@ class vimconnector(vimconn.vimconnector):
 
         # use: 'data', 'bridge', 'mgmt'
         # create vApp.  Set vcpu and ram based on flavor id.
-        vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
-                                   self.get_catalogbyid(image_id, catalogs),
-                                   network_name=None,  # None while creating vapp
-                                   network_mode=network_mode,
-                                   vm_name=vmname_andid,
-                                   vm_cpus=vm_cpus,  # can be None if flavor is None
-                                   vm_memory=vm_memory)  # can be None if flavor is None
-
-        if vapptask is None or vapptask is False:
-            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): failed deploy vApp {}".format(vmname_andid))
-        if type(vapptask) is VappTask:
-            vca.block_until_completed(vapptask)
+        try:
+            vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
+            if not vdc_obj:
+                raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
+
+            for retry in (1,2):
+                items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
+                catalog_items = [items.attrib]
+
+                if len(catalog_items) == 1:
+                    if self.client:
+                        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+                    response = self.perform_request(req_type='GET',
+                                                url=catalog_items[0].get('href'),
+                                                headers=headers)
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    vapp_tempalte_href = entity.get("href")
+
+                response = self.perform_request(req_type='GET',
+                                                    url=vapp_tempalte_href,
+                                                    headers=headers)
+                if response.status_code != requests.codes.ok:
+                    self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
+                                                                                           response.status_code))
+                else:
+                    result = (response.content).replace("\n"," ")
+
+                vapp_template_tree = XmlElementTree.fromstring(response.content)
+                children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
+                vm_element = [child for child in children_element if 'Vm' in child.tag][0]
+                vm_name = vm_element.get('name')
+                vm_id = vm_element.get('id')
+                vm_href = vm_element.get('href')
+
+                cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
+
+                headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
+                vdc_id = vdc.get('id').split(':')[-1]
+                instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
+                                                                                                vdc_id)
+                data = """<?xml version="1.0" encoding="UTF-8"?>
+                <InstantiateVAppTemplateParams
+                xmlns="http://www.vmware.com/vcloud/v1.5"
+                name="{}"
+                deploy="false"
+                powerOn="false"
+                xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+                xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+                <Description>Vapp instantiation</Description>
+                <InstantiationParams>
+                     <NetworkConfigSection>
+                         <ovf:Info>Configuration parameters for logical networks</ovf:Info>
+                         <NetworkConfig networkName="{}">
+                             <Configuration>
+                                 <ParentNetwork href="{}" />
+                                 <FenceMode>bridged</FenceMode>
+                             </Configuration>
+                         </NetworkConfig>
+                     </NetworkConfigSection>
+                <LeaseSettingsSection
+                type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
+                <ovf:Info>Lease Settings</ovf:Info>
+                <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
+                <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
+                </LeaseSettingsSection>
+                </InstantiationParams>
+                <Source href="{}"/>
+                <SourcedItem>
+                <Source href="{}" id="{}" name="{}"
+                type="application/vnd.vmware.vcloud.vm+xml"/>
+                <VmGeneralParams>
+                    <NeedsCustomization>false</NeedsCustomization>
+                </VmGeneralParams>
+                <InstantiationParams>
+                      <NetworkConnectionSection>
+                      <ovf:Info>Specifies the available VM network connections</ovf:Info>
+                      <NetworkConnection network="{}">
+                      <NetworkConnectionIndex>0</NetworkConnectionIndex>
+                      <IsConnected>true</IsConnected>
+                      <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
+                      </NetworkConnection>
+                      </NetworkConnectionSection><ovf:VirtualHardwareSection>
+                      <ovf:Info>Virtual hardware requirements</ovf:Info>
+                      <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
+                      xmlns:vmw="http://www.vmware.com/schema/ovf">
+                      <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+                      <rasd:Description>Number of Virtual CPUs</rasd:Description>
+                      <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
+                      <rasd:InstanceID>4</rasd:InstanceID>
+                      <rasd:Reservation>0</rasd:Reservation>
+                      <rasd:ResourceType>3</rasd:ResourceType>
+                      <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
+                      <rasd:Weight>0</rasd:Weight>
+                      <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
+                      </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
+                      <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+                      <rasd:Description>Memory Size</rasd:Description>
+                      <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
+                      <rasd:InstanceID>5</rasd:InstanceID>
+                      <rasd:Reservation>0</rasd:Reservation>
+                      <rasd:ResourceType>4</rasd:ResourceType>
+                      <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
+                      <rasd:Weight>0</rasd:Weight>
+                      </ovf:Item>
+                </ovf:VirtualHardwareSection>
+                </InstantiationParams>
+                </SourcedItem>
+                <AllEULAsAccepted>false</AllEULAsAccepted>
+                </InstantiateVAppTemplateParams>""".format(vmname_andid,
+                                                        primary_netname,
+                                                        primary_net_href,
+                                                     vapp_tempalte_href,
+                                                                vm_href,
+                                                                  vm_id,
+                                                                vm_name,
+                                                        primary_netname,
+                                                               cpu=cpus,
+                                                             core=cores,
+                                                       memory=memory_mb)
+
+                response = self.perform_request(req_type='POST',
+                                                url=instantiate_vapp_href,
+                                                headers=headers,
+                                                data=data)
+
+                if response.status_code != 201:
+                    self.logger.error("REST call {} failed reason : {}"\
+                         "status code : {}".format(instantiate_vapp_href,
+                                                        response.content,
+                                                   response.status_code))
+                    raise vimconn.vimconnException("new_vminstance(): Failed to create"\
+                                                        "vAapp {}".format(vmname_andid))
+                else:
+                    vapptask = self.get_task_from_response(response.content)
+
+                if vapptask is None and retry==1:
+                    self.get_token() # Retry getting token
+                    continue
+                else:
+                    break
+
+            if vapptask is None or vapptask is False:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+
+            # wait for task to complete
+            result = self.client.get_task_monitor().wait_for_success(task=vapptask)
+
+            if result.get('status') == 'success':
+                self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
+            else:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+
+        except Exception as exp:
+            raise vimconn.vimconnUnexpectedResponse(
+                "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
 
         # we should have now vapp in undeployed state.
-        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
-        vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
-        if vapp is None:
+        try:
+            vdc_obj = VDC(self.client, href=vdc.get('href'))
+            vapp_resource = vdc_obj.get_vapp(vmname_andid)
+            vapp_uuid = vapp_resource.get('id').split(':')[-1]
+            vapp = VApp(self.client, resource=vapp_resource)
+
+        except Exception as exp:
+            raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                    .format(vmname_andid, exp))
+
+        if vapp_uuid is None:
             raise vimconn.vimconnUnexpectedResponse(
-                "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format(
+                "new_vminstance(): Failed to retrieve vApp {} after creation".format(
                                                                             vmname_andid))
 
-        #Add PCI passthrough configrations
-        PCI_devices_status = False
+        #Add PCI passthrough/SRIOV configrations
         vm_obj = None
-        si = None
+        pci_devices_info = []
+        reserve_memory = False
+
+        for net in net_list:
+            if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
+                pci_devices_info.append(net)
+            elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
+                reserve_memory = True
+
+        #Add PCI
         if len(pci_devices_info) > 0:
             self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
                                                                         vmname_andid ))
@@ -1361,23 +1838,67 @@ class vimconnector(vimconn.vimconnector):
                                                             pci_devices_info,
                                                             vmname_andid)
                                  )
+                reserve_memory = True
             else:
                 self.logger.info("Fail to add PCI devives {} to VM {}".format(
                                                             pci_devices_info,
                                                             vmname_andid)
                                  )
-        # add vm disk
+
+        # Modify vm disk
         if vm_disk:
             #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
             result = self.modify_vm_disk(vapp_uuid, vm_disk)
             if result :
                 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
 
+        #Add new or existing disks to vApp
+        if disk_list:
+            added_existing_disk = False
+            for disk in disk_list:
+                if 'device_type' in disk and disk['device_type'] == 'cdrom':
+                    image_id = disk['image_id']
+                    # Adding CD-ROM to VM
+                    # will revisit code once specification ready to support this feature
+                    self.insert_media_to_vm(vapp, image_id)
+                elif "image_id" in disk and disk["image_id"] is not None:
+                    self.logger.debug("Adding existing disk from image {} to vm {} ".format(
+                                                                    disk["image_id"] , vapp_uuid))
+                    self.add_existing_disk(catalogs=catalogs,
+                                           image_id=disk["image_id"],
+                                           size = disk["size"],
+                                           template_name=templateName,
+                                           vapp_uuid=vapp_uuid
+                                           )
+                    added_existing_disk = True
+                else:
+                    #Wait till added existing disk gets reflected into vCD database/API
+                    if added_existing_disk:
+                        time.sleep(5)
+                        added_existing_disk = False
+                    self.add_new_disk(vapp_uuid, disk['size'])
+
+        if numas:
+            # Assigning numa affinity setting
+            for numa in numas:
+                if 'paired-threads-id' in numa:
+                    paired_threads_id = numa['paired-threads-id']
+                    self.set_numa_affinity(vapp_uuid, paired_threads_id)
+
         # add NICs & connect to networks in netlist
         try:
+            vdc_obj = VDC(self.client, href=vdc.get('href'))
+            vapp_resource = vdc_obj.get_vapp(vmname_andid)
+            vapp = VApp(self.client, resource=vapp_resource)
+            vapp_id = vapp_resource.get('id').split(':')[-1]
+
+            self.logger.info("Removing primary NIC: ")
+            # First remove all NICs so that NIC properties can be adjusted as needed
+            self.remove_primary_network_adapter_from_all_vms(vapp)
+
             self.logger.info("Request to connect VM to a network: {}".format(net_list))
-            nicIndex = 0
             primary_nic_index = 0
+            nicIndex = 0
             for net in net_list:
                 # openmano uses network id in UUID format.
                 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
@@ -1387,6 +1908,10 @@ class vimconnector(vimconn.vimconnector):
                 if 'net_id' not in net:
                     continue
 
+                #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
+                #Same will be returned in refresh_vms_status() as vim_interface_id
+                net['vim_id'] = net['net_id']  # Provide the same VIM identifier as the VIM network
+
                 interface_net_id = net['net_id']
                 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
                 interface_network_mode = net['use']
@@ -1400,58 +1925,118 @@ class vimconnector(vimconn.vimconnector):
                                   - NONE (No IP addressing mode specified.)"""
 
                 if primary_netname is not None:
-                    nets = filter(lambda n: n.name == interface_net_name, vca.get_networks(self.tenant_name))
+                    self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
+                    nets = filter(lambda n: n.get('name') == interface_net_name, self.get_network_list())
+                    #For python3
+                    #nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
                     if len(nets) == 1:
-                        self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
-                        task = vapp.connect_to_network(nets[0].name, nets[0].href)
-                        if type(task) is GenericTask:
-                            vca.block_until_completed(task)
-                        # connect network to VM - with all DHCP by default
-                        self.logger.info("new_vminstance(): Connecting VM to a network {}".format(nets[0].name))
-                        task = vapp.connect_vms(nets[0].name,
-                                                connection_index=nicIndex,
-                                                connections_primary_index=primary_nic_index,
-                                                ip_allocation_mode='DHCP')
-                        if type(task) is GenericTask:
-                            vca.block_until_completed(task)
+                        self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
+
+                        if interface_net_name != primary_netname:
+                            # connect network to VM - with all DHCP by default
+                            self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
+                            self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
+
+                        type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
+                        nic_type = 'VMXNET3'
+                        if 'type' in net and net['type'] not in type_list:
+                            # fetching nic type from vnf
+                            if 'model' in net:
+                                if net['model'] is not None:
+                                    if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
+                                        nic_type = 'VMXNET3'
+                                else:
+                                    nic_type = net['model']
+
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                          "to a network {}".format(nets[0].get('name')))
+                                self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net,
+                                                                nic_type=nic_type)
+                            else:
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                         "to a network {}".format(nets[0].get('name')))
+                                if net['type'] in ['SR-IOV', 'VF']:
+                                    nic_type = net['type']
+                                self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net,
+                                                                nic_type=nic_type)
                 nicIndex += 1
-        except KeyError:
-            # it might be a case if specific mandatory entry in dict is empty
-            self.logger.debug("Key error {}".format(KeyError.message))
-            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
 
-        # deploy and power on vm
-        self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
-        deploytask = vapp.deploy(powerOn=False)
-        if type(deploytask) is GenericTask:
-            vca.block_until_completed(deploytask)
-
-        # If VM has PCI devices reserve memory for VM
-        if PCI_devices_status and vm_obj and vcenter_conect:
-            memReserve = vm_obj.config.hardware.memoryMB
-            spec = vim.vm.ConfigSpec()
-            spec.memoryAllocation = vim.ResourceAllocationInfo(reservation=memReserve)
-            task = vm_obj.ReconfigVM_Task(spec=spec)
-            if task:
-                result = self.wait_for_vcenter_task(task, vcenter_conect)
-                self.logger.info("Reserved memmoery {} MB for "\
-                                 "VM VM status: {}".format(str(memReserve),result))
+            # cloud-init for ssh-key injection
+            if cloud_config:
+                # Create a catalog which will be carrying the config drive ISO
+                # This catalog is deleted during vApp deletion. The catalog name carries
+                # vApp UUID and thats how it gets identified during its deletion.
+                config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
+                self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
+                    config_drive_catalog_name))
+                config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
+                if config_drive_catalog_id is None:
+                    error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
+                                "ISO".format(config_drive_catalog_name)
+                    raise Exception(error_msg)
+
+                # Create config-drive ISO
+                _, userdata = self._create_user_data(cloud_config)
+                # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
+                iso_path = self.create_config_drive_iso(userdata)
+                self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
+
+                self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
+                self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
+                # Attach the config-drive ISO to the VM
+                self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
+                # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
+                time.sleep(5)
+                self.insert_media_to_vm(vapp, config_drive_catalog_id)
+                shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
+
+            # If VM has PCI devices or SRIOV reserve memory for VM
+            if reserve_memory:
+                self.reserve_memory_for_all_vms(vapp, memory_mb)
+
+            self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
+
+            poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
+            result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+            if result.get('status') == 'success':
+                self.logger.info("new_vminstance(): Successfully power on "\
+                                             "vApp {}".format(vmname_andid))
             else:
-                self.logger.info("Fail to reserved memmoery {} to VM {}".format(
-                                                            str(memReserve),str(vm_obj)))
+                self.logger.error("new_vminstance(): failed to power on vApp "\
+                                                     "{}".format(vmname_andid))
 
-        self.logger.debug("new_vminstance(): power on vApp {} ".format(name))
-        poweron_task = vapp.poweron()
-        if type(poweron_task) is GenericTask:
-            vca.block_until_completed(poweron_task)
+        except Exception as exp:
+            try:
+                self.delete_vminstance(vapp_uuid)
+            except Exception as exp2:
+                self.logger.error("new_vminstance rollback fail {}".format(exp2))
+            # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
+            self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
+                              .format(name, exp))
+            raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
+                                           .format(name, exp))
 
         # check if vApp deployed and if that the case return vApp UUID otherwise -1
         wait_time = 0
         vapp_uuid = None
         while wait_time <= MAX_WAIT_TIME:
-            vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
-            if vapp and vapp.me.deployed:
-                vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+            try:
+                vapp_resource = vdc_obj.get_vapp(vmname_andid)
+                vapp = VApp(self.client, resource=vapp_resource)
+            except Exception as exp:
+                raise vimconn.vimconnUnexpectedResponse(
+                        "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                        .format(vmname_andid, exp))
+
+            #if vapp and vapp.me.deployed:
+            if vapp and vapp_resource.get('deployed') == 'true':
+                vapp_uuid = vapp_resource.get('id').split(':')[-1]
                 break
             else:
                 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
@@ -1459,46 +2044,655 @@ class vimconnector(vimconn.vimconnector):
 
             wait_time +=INTERVAL_TIME
 
+        #SET Affinity Rule for VM
+        #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
+        #While creating VIM account user has to pass the Host Group names in availability_zone list
+        #"availability_zone" is a  part of VIM "config" parameters
+        #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
+        #Host groups are referred as availability zones
+        #With following procedure, deployed VM will be added into a VM group.
+        #Then A VM to Host Affinity rule will be created using the VM group & Host group.
+        if(availability_zone_list):
+            self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
+            #Admin access required for creating Affinity rules
+            client = self.connect_as_admin()
+            if not client:
+                raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
+            else:
+                self.client = client
+            if self.client:
+                headers = {'Accept':'application/*+xml;version=27.0',
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            #Step1: Get provider vdc details from organization
+            pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
+            if pvdc_href is not None:
+            #Step2: Found required pvdc, now get resource pool information
+                respool_href = self.get_resource_pool_details(pvdc_href, headers)
+                if respool_href is None:
+                    #Raise error if respool_href not found
+                    msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
+                           .format(pvdc_href)
+                    self.log_message(msg)
+
+            #Step3: Verify requested availability zone(hostGroup) is present in vCD
+            # get availability Zone
+            vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
+            # check if provided av zone(hostGroup) is present in vCD VIM
+            status = self.check_availibility_zone(vm_az, respool_href, headers)
+            if status is False:
+                msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
+                       "resource pool {} status: {}".format(vm_az,respool_href,status)
+                self.log_message(msg)
+            else:
+                self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
+
+            #Step4: Find VM group references to create vm group
+            vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
+            if vmgrp_href == None:
+                msg = "new_vminstance(): No reference to VmGroup found in resource pool"
+                self.log_message(msg)
+
+            #Step5: Create a VmGroup with name az_VmGroup
+            vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
+            status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
+            if status is not True:
+                msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
+                self.log_message(msg)
+
+            #VM Group url to add vms to vm group
+            vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
+
+            #Step6: Add VM to VM Group
+            #Find VM uuid from vapp_uuid
+            vm_details = self.get_vapp_details_rest(vapp_uuid)
+            vm_uuid = vm_details['vmuuid']
+
+            status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
+            if status is not True:
+                msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
+                self.log_message(msg)
+
+            #Step7: Create VM to Host affinity rule
+            addrule_href = self.get_add_rule_reference (respool_href, headers)
+            if addrule_href is None:
+                msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
+                      .format(respool_href)
+                self.log_message(msg)
+
+            status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity",  headers)
+            if status is False:
+                msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
+                      .format(name, vm_az)
+                self.log_message(msg)
+            else:
+                self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
+                                    .format(name, vm_az))
+            #Reset token to a normal user to perform other operations
+            self.get_token()
+
         if vapp_uuid is not None:
-            return vapp_uuid
+            return vapp_uuid, None
         else:
             raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
 
-    ##
-    ##
-    ##  based on current discussion
-    ##
-    ##
-    ##  server:
-    #   created: '2016-09-08T11:51:58'
-    #   description: simple-instance.linux1.1
-    #   flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
-    #   hostId: e836c036-74e7-11e6-b249-0800273e724c
-    #   image: dde30fe6-75a9-11e6-ad5f-0800273e724c
-    #   status: ACTIVE
-    #   error_msg:
-    #   interfaces: …
-    #
-    def get_vminstance(self, vim_vm_uuid=None):
-        """Returns the VM instance information from VIM"""
+    def create_config_drive_iso(self, user_data):
+        tmpdir = tempfile.mkdtemp()
+        iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
+        latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
+        os.makedirs(latest_dir)
+        with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
+                open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
+            userdata_file_obj.write(user_data)
+            meta_file_obj.write(json.dumps({"availability_zone": "nova",
+                                            "launch_index": 0,
+                                            "name": "ConfigDrive",
+                                            "uuid": str(uuid.uuid4())}
+                                           )
+                                )
+        genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
+            iso_path=iso_path, source_dir_path=tmpdir)
+        self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
+        try:
+            FNULL = open(os.devnull, 'w')
+            subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
+        except subprocess.CalledProcessError as e:
+            shutil.rmtree(tmpdir, ignore_errors=True)
+            error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+        return iso_path
+
+    def upload_iso_to_catalog(self, catalog_id, iso_file_path):
+        if not os.path.isfile(iso_file_path):
+            error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+        iso_file_stat = os.stat(iso_file_path)
+        xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
+                            <Media
+                                xmlns="http://www.vmware.com/vcloud/v1.5"
+                                name="{iso_name}"
+                                size="{iso_size}"
+                                imageType="iso">
+                                <Description>ISO image for config-drive</Description>
+                            </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+        headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
+        catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
+        response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
+
+        if response.status_code != 201:
+            error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+
+        catalogItem = XmlElementTree.fromstring(response.content)
+        entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
+        entity_href = entity.get('href')
+
+        response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
+        if response.status_code != 200:
+            raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
+
+        match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
+        if match:
+            media_upload_href = match.group(1)
+        else:
+            raise Exception('Could not parse the upload URL for the media file from the last response')
+        upload_iso_task = self.get_task_from_response(response.content)
+        headers['Content-Type'] = 'application/octet-stream'
+        response = self.perform_request(req_type='PUT',
+                                        url=media_upload_href,
+                                        headers=headers,
+                                        data=open(iso_file_path, 'rb'))
 
-        self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+        if response.status_code != 200:
+            raise Exception('PUT request to "{}" failed'.format(media_upload_href))
+        result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
+        if result.get('status') != 'success':
+            raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
 
-        vdc = vca.get_vdc(self.tenant_name)
-        if vdc is None:
-            raise vimconn.vimconnConnectionException(
-                "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+    def get_vcd_availibility_zones(self,respool_href, headers):
+        """ Method to find presence of av zone is VIM resource pool
 
-        vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
-        if not vm_info_dict:
-            self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
-            raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+            Args:
+                respool_href - resource pool href
+                headers - header information
 
-        status_key = vm_info_dict['status']
-        error = ''
+            Returns:
+               vcd_az - list of azone present in vCD
+        """
+        vcd_az = []
+        url=respool_href
+        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
+        else:
+        #Get the href to hostGroups and find provided hostGroup is present in it
+            resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
+                                hostGroup = schild.attrib.get('href')
+                                hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
+                                if hg_resp.status_code != requests.codes.ok:
+                                    self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
+                                else:
+                                    hg_resp_xml =  XmlElementTree.fromstring(hg_resp.content)
+                                    for hostGroup in hg_resp_xml:
+                                        if 'HostGroup' in hostGroup.tag:
+                                            #append host group name to the list
+                                            vcd_az.append(hostGroup.attrib.get("name"))
+        return vcd_az
+
+
+    def set_availability_zones(self):
+        """
+        Set vim availability zone
+        """
+
+        vim_availability_zones = None
+        availability_zone = None
+        if 'availability_zone' in self.config:
+            vim_availability_zones = self.config.get('availability_zone')
+        if isinstance(vim_availability_zones, str):
+            availability_zone = [vim_availability_zones]
+        elif isinstance(vim_availability_zones, list):
+            availability_zone = vim_availability_zones
+        else:
+            return availability_zone
+
+        return availability_zone
+
+
+    def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
+        """
+        Return the availability zone to be used by the created VM.
+        returns: The VIM availability zone to be used or None
+        """
+        if availability_zone_index is None:
+            if not self.config.get('availability_zone'):
+                return None
+            elif isinstance(self.config.get('availability_zone'), str):
+                return self.config['availability_zone']
+            else:
+                return self.config['availability_zone'][0]
+
+        vim_availability_zones = self.availability_zone
+
+        # check if VIM offer enough availability zones describe in the VNFD
+        if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+            # check if all the names of NFV AV match VIM AV names
+            match_by_index = False
+            for av in availability_zone_list:
+                if av not in vim_availability_zones:
+                    match_by_index = True
+                    break
+            if match_by_index:
+                self.logger.debug("Required Availability zone or Host Group not found in VIM config")
+                self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
+                self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
+                self.logger.debug("VIM Availability zones will be used by index")
+                return vim_availability_zones[availability_zone_index]
+            else:
+                return availability_zone_list[availability_zone_index]
+        else:
+            raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
+
+
+    def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
+        """ Method to create VM to Host Affinity rule in vCD
+
+        Args:
+            addrule_href - href to make a POST request
+            vmgrpname - name of the VM group created
+            hostgrpnmae - name of the host group created earlier
+            polarity - Affinity or Anti-affinity (default: Affinity)
+            headers - headers to make REST call
+
+        Returns:
+            True- if rule is created
+            False- Failed to create rule due to some error
+
+        """
+        task_status = False
+        rule_name = polarity + "_" + vmgrpname
+        payload = """<?xml version="1.0" encoding="UTF-8"?>
+                     <vmext:VMWVmHostAffinityRule
+                       xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
+                       xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
+                       type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
+                       <vcloud:Name>{}</vcloud:Name>
+                       <vcloud:IsEnabled>true</vcloud:IsEnabled>
+                       <vcloud:IsMandatory>true</vcloud:IsMandatory>
+                       <vcloud:Polarity>{}</vcloud:Polarity>
+                       <vmext:HostGroupName>{}</vmext:HostGroupName>
+                       <vmext:VmGroupName>{}</vmext:VmGroupName>
+                     </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
+
+        resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
+
+        if resp.status_code != requests.codes.accepted:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
+            task_status = False
+            return task_status
+        else:
+            affinity_task = self.get_task_from_response(resp.content)
+            self.logger.debug ("affinity_task: {}".format(affinity_task))
+            if affinity_task is None or affinity_task is False:
+                raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
+            # wait for task to complete
+            result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
+            if result.get('status') == 'success':
+                self.logger.debug("Successfully created affinity rule {}".format(rule_name))
+                return True
+            else:
+                raise vimconn.vimconnUnexpectedResponse(
+                      "failed to create affinity rule {}".format(rule_name))
+
+
+    def get_add_rule_reference (self, respool_href, headers):
+        """ This method finds href to add vm to host affinity rule to vCD
+
+        Args:
+            respool_href- href to resource pool
+            headers- header information to make REST call
+
+        Returns:
+            None - if no valid href to add rule found or
+            addrule_href - href to add vm to host affinity rule of resource pool
+        """
+        addrule_href = None
+        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
+        else:
+
+            resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
+                                schild.attrib.get('rel') == "add":
+                                addrule_href = schild.attrib.get('href')
+                                break
+
+        return addrule_href
+
+
+    def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
+        """ Method to add deployed VM to newly created VM Group.
+            This is required to create VM to Host affinity in vCD
+
+        Args:
+            vm_uuid- newly created vm uuid
+            vmGroupNameURL- URL to VM Group name
+            vmGroup_name- Name of VM group created
+            headers- Headers for REST request
+
+        Returns:
+            True- if VM added to VM group successfully
+            False- if any error encounter
+        """
+
+        addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
+
+        if addvm_resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
+                               .format(vmGroupNameURL, addvm_resp.status_code))
+            return False
+        else:
+            resp_xml = XmlElementTree.fromstring(addvm_resp.content)
+            for child in resp_xml:
+                if child.tag.split('}')[1] == 'Link':
+                    if child.attrib.get("rel") == "addVms":
+                        addvmtogrpURL =  child.attrib.get("href")
+
+        #Get vm details
+        url_list = [self.url, '/api/vApp/vm-',vm_uuid]
+        vmdetailsURL = ''.join(url_list)
+
+        resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
+            return False
+
+        #Parse VM details
+        resp_xml = XmlElementTree.fromstring(resp.content)
+        if resp_xml.tag.split('}')[1] == "Vm":
+            vm_id = resp_xml.attrib.get("id")
+            vm_name = resp_xml.attrib.get("name")
+            vm_href = resp_xml.attrib.get("href")
+            #print vm_id, vm_name, vm_href
+        #Add VM into VMgroup
+        payload = """<?xml version="1.0" encoding="UTF-8"?>\
+                   <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
+                    xmlns="http://www.vmware.com/vcloud/versions" \
+                    xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
+                    xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
+                    xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
+                    xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
+                    xmlns:ns7="http://www.vmware.com/schema/ovf" \
+                    xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
+                    xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
+                    <ns2:VmReference href="{}" id="{}" name="{}" \
+                    type="application/vnd.vmware.vcloud.vm+xml" />\
+                   </ns2:Vms>""".format(vm_href, vm_id, vm_name)
+
+        addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
+
+        if addvmtogrp_resp.status_code != requests.codes.accepted:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
+            return False
+        else:
+            self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
+            return True
+
+
+    def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
+        """Method to create a VM group in vCD
+
+           Args:
+              vmgroup_name : Name of VM group to be created
+              vmgroup_href : href for vmgroup
+              headers- Headers for REST request
+        """
+        #POST to add URL with required data
+        vmgroup_status = False
+        payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
+                       xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
+                   <vmCount>1</vmCount>\
+                   </VMWVmGroup>""".format(vmgroup_name)
+        resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
+
+        if resp.status_code != requests.codes.accepted:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
+            return vmgroup_status
+        else:
+            vmgroup_task = self.get_task_from_response(resp.content)
+            if vmgroup_task is None or vmgroup_task is False:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
+
+            # wait for task to complete
+            result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
+
+            if result.get('status') == 'success':
+                self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
+                #time.sleep(10)
+                vmgroup_status = True
+                return vmgroup_status
+            else:
+                raise vimconn.vimconnUnexpectedResponse(\
+                        "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
+
+
+    def find_vmgroup_reference(self, url, headers):
+        """ Method to create a new VMGroup which is required to add created VM
+            Args:
+               url- resource pool href
+               headers- header information
+
+            Returns:
+               returns href to VM group to create VM group
+        """
+        #Perform GET on resource pool to find 'add' link to create VMGroup
+        #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
+        vmgrp_href = None
+        resp = self.perform_request(req_type='GET',url=url, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
+        else:
+            #Get the href to add vmGroup to vCD
+            resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            #Find href with type VMGroup and rel with add
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
+                                and schild.attrib.get('rel') == "add":
+                                vmgrp_href = schild.attrib.get('href')
+                                return vmgrp_href
+
+
+    def check_availibility_zone(self, az, respool_href, headers):
+        """ Method to verify requested av zone is present or not in provided
+            resource pool
+
+            Args:
+                az - name of hostgroup (availibility_zone)
+                respool_href - Resource Pool href
+                headers - Headers to make REST call
+            Returns:
+                az_found - True if availibility_zone is found else False
+        """
+        az_found = False
+        headers['Accept']='application/*+xml;version=27.0'
+        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
+        else:
+        #Get the href to hostGroups and find provided hostGroup is present in it
+            resp_xml = XmlElementTree.fromstring(resp.content)
+
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
+                                hostGroup_href = schild.attrib.get('href')
+                                hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
+                                if hg_resp.status_code != requests.codes.ok:
+                                    self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
+                                else:
+                                    hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
+                                    for hostGroup in hg_resp_xml:
+                                        if 'HostGroup' in hostGroup.tag:
+                                            if hostGroup.attrib.get("name") == az:
+                                                az_found = True
+                                                break
+        return az_found
+
+
+    def get_pvdc_for_org(self, org_vdc, headers):
+        """ This method gets provider vdc references from organisation
+
+            Args:
+               org_vdc - name of the organisation VDC to find pvdc
+               headers - headers to make REST call
+
+            Returns:
+               None - if no pvdc href found else
+               pvdc_href - href to pvdc
+        """
+
+        #Get provider VDC references from vCD
+        pvdc_href = None
+        #url = '<vcd url>/api/admin/extension/providerVdcReferences'
+        url_list = [self.url, '/api/admin/extension/providerVdcReferences']
+        url = ''.join(url_list)
+
+        response = self.perform_request(req_type='GET',url=url, headers=headers)
+        if response.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}"\
+                               .format(url, response.status_code))
+        else:
+            xmlroot_response = XmlElementTree.fromstring(response.content)
+            for child in xmlroot_response:
+                if 'ProviderVdcReference' in child.tag:
+                    pvdc_href = child.attrib.get('href')
+                    #Get vdcReferences to find org
+                    pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
+                    if pvdc_resp.status_code != requests.codes.ok:
+                        raise vimconn.vimconnException("REST API call {} failed. "\
+                                                       "Return status code {}"\
+                                                       .format(url, pvdc_resp.status_code))
+
+                    pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
+                    for child in pvdc_resp_xml:
+                        if 'Link' in child.tag:
+                            if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
+                                vdc_href = child.attrib.get('href')
+
+                                #Check if provided org is present in vdc
+                                vdc_resp = self.perform_request(req_type='GET',
+                                                                url=vdc_href,
+                                                                headers=headers)
+                                if vdc_resp.status_code != requests.codes.ok:
+                                    raise vimconn.vimconnException("REST API call {} failed. "\
+                                                                   "Return status code {}"\
+                                                                   .format(url, vdc_resp.status_code))
+                                vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
+                                for child in vdc_resp_xml:
+                                    if 'VdcReference' in child.tag:
+                                        if child.attrib.get('name') == org_vdc:
+                                            return pvdc_href
+
+
+    def get_resource_pool_details(self, pvdc_href, headers):
+        """ Method to get resource pool information.
+            Host groups are property of resource group.
+            To get host groups, we need to GET details of resource pool.
+
+            Args:
+                pvdc_href: href to pvdc details
+                headers: headers
+
+            Returns:
+                respool_href - Returns href link reference to resource pool
+        """
+        respool_href = None
+        resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}"\
+                               .format(pvdc_href, resp.status_code))
+        else:
+            respool_resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in respool_resp_xml:
+                if 'Link' in child.tag:
+                    if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
+                        respool_href = child.attrib.get("href")
+                        break
+        return respool_href
+
+
+    def log_message(self, msg):
+        """
+            Method to log error messages related to Affinity rule creation
+            in new_vminstance & raise Exception
+                Args :
+                    msg - Error message to be logged
+
+        """
+        #get token to connect vCD as a normal user
+        self.get_token()
+        self.logger.debug(msg)
+        raise vimconn.vimconnException(msg)
+
+
+    ##
+    ##
+    ##  based on current discussion
+    ##
+    ##
+    ##  server:
+    #   created: '2016-09-08T11:51:58'
+    #   description: simple-instance.linux1.1
+    #   flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
+    #   hostId: e836c036-74e7-11e6-b249-0800273e724c
+    #   image: dde30fe6-75a9-11e6-ad5f-0800273e724c
+    #   status: ACTIVE
+    #   error_msg:
+    #   interfaces: …
+    #
+    def get_vminstance(self, vim_vm_uuid=None):
+        """Returns the VM instance information from VIM"""
+
+        self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
+
+        org, vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnConnectionException(
+                "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+        vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
+        if not vm_info_dict:
+            self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+            raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+
+        status_key = vm_info_dict['status']
+        error = ''
         try:
             vm_dict = {'created': vm_info_dict['created'],
                        'description': vm_info_dict['name'],
@@ -1521,7 +2715,7 @@ class vimconnector(vimconn.vimconnector):
 
         return vm_dict
 
-    def delete_vminstance(self, vm__vim_uuid):
+    def delete_vminstance(self, vm__vim_uuid, created_items=None):
         """Method poweroff and remove VM instance from vcloud director network.
 
         Args:
@@ -1532,46 +2726,39 @@ class vimconnector(vimconn.vimconnector):
         """
 
         self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
 
-        vdc = vca.get_vdc(self.tenant_name)
-        if vdc is None:
+        org, vdc = self.get_vdc_details()
+        vdc_obj = VDC(self.client, href=vdc.get('href'))
+        if vdc_obj is None:
             self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
                 self.tenant_name))
             raise vimconn.vimconnException(
                 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
 
         try:
-            vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
+            vapp_name = self.get_namebyvappid(vm__vim_uuid)
             if vapp_name is None:
                 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
                 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
-            else:
-                self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+            self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+            vapp_resource = vdc_obj.get_vapp(vapp_name)
+            vapp = VApp(self.client, resource=vapp_resource)
 
             # Delete vApp and wait for status change if task executed and vApp is None.
-            vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
 
             if vapp:
-                if vapp.me.deployed:
+                if vapp_resource.get('deployed') == 'true':
                     self.logger.info("Powering off vApp {}".format(vapp_name))
                     #Power off vApp
                     powered_off = False
                     wait_time = 0
                     while wait_time <= MAX_WAIT_TIME:
-                        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
-                        if not vapp:
-                            self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
-                            return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+                        power_off_task = vapp.power_off()
+                        result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
 
-                        power_off_task = vapp.poweroff()
-                        if type(power_off_task) is GenericTask:
-                            result = vca.block_until_completed(power_off_task)
-                            if result:
-                                powered_off = True
-                                break
+                        if result.get('status') == 'success':
+                            powered_off = True
+                            break
                         else:
                             self.logger.info("Wait for vApp {} to power off".format(vapp_name))
                             time.sleep(INTERVAL_TIME)
@@ -1587,17 +2774,16 @@ class vimconnector(vimconn.vimconnector):
                     wait_time = 0
                     undeployed = False
                     while wait_time <= MAX_WAIT_TIME:
-                        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+                        vapp = VApp(self.client, resource=vapp_resource)
                         if not vapp:
                             self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
                             return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
-                        undeploy_task = vapp.undeploy(action='powerOff')
+                        undeploy_task = vapp.undeploy()
 
-                        if type(undeploy_task) is GenericTask:
-                            result = vca.block_until_completed(undeploy_task)
-                            if result:
-                                undeployed = True
-                                break
+                        result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
+                        if result.get('status') == 'success':
+                            undeployed = True
+                            break
                         else:
                             self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
                             time.sleep(INTERVAL_TIME)
@@ -1605,47 +2791,52 @@ class vimconnector(vimconn.vimconnector):
                         wait_time +=INTERVAL_TIME
 
                     if not undeployed:
-                        self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid)) 
+                        self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
 
                 # delete vapp
                 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
-                vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
 
                 if vapp is not None:
                     wait_time = 0
                     result = False
 
                     while wait_time <= MAX_WAIT_TIME:
-                        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+                        vapp = VApp(self.client, resource=vapp_resource)
                         if not vapp:
                             self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
                             return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
 
-                        delete_task = vapp.delete()
+                        delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
 
-                        if type(delete_task) is GenericTask:
-                            vca.block_until_completed(delete_task)
-                            result = vca.block_until_completed(delete_task)
-                            if result:
-                                break
+                        result = self.client.get_task_monitor().wait_for_success(task=delete_task)
+                        if result.get('status') == 'success':
+                            break
                         else:
                             self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
                             time.sleep(INTERVAL_TIME)
 
                         wait_time +=INTERVAL_TIME
 
-                    if not result:
+                    if result is None:
                         self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
-
+                    else:
+                        self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
+                        config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
+                        catalog_list = self.get_image_list()
+                        try:
+                            config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
+                                                       if catalog_['name'] == config_drive_catalog_name][0]
+                        except IndexError:
+                            pass
+                        if config_drive_catalog_id:
+                            self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
+                                              'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
+                            self.delete_image(config_drive_catalog_id)
+                        return vm__vim_uuid
         except:
             self.logger.debug(traceback.format_exc())
             raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
 
-        if vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) is None:
-            self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
-            return vm__vim_uuid
-        else:
-            raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
 
     def refresh_vms_status(self, vm_list):
         """Get the status of the virtual machines and their interfaces/ports
@@ -1673,14 +2864,172 @@ class vimconnector(vimconn.vimconnector):
 
         self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
 
-        mac_ip_addr={}
+        org,vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+        vms_dict = {}
+        nsx_edge_list = []
+        for vmuuid in vm_list:
+            vapp_name = self.get_namebyvappid(vmuuid)
+            if vapp_name is not None:
+
+                try:
+                    vm_pci_details = self.get_vm_pci_details(vmuuid)
+                    vdc_obj = VDC(self.client, href=vdc.get('href'))
+                    vapp_resource = vdc_obj.get_vapp(vapp_name)
+                    the_vapp = VApp(self.client, resource=vapp_resource)
+
+                    vm_details = {}
+                    for vm in the_vapp.get_all_vms():
+                        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                        response = self.perform_request(req_type='GET',
+                                                        url=vm.get('href'),
+                                                        headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
+                                                            "status code : {}".format(vm.get('href'),
+                                                                                    response.content,
+                                                                               response.status_code))
+                            raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
+                                                                         "VM details")
+                        xmlroot = XmlElementTree.fromstring(response.content)
+
+                        
+                        result = response.content.replace("\n"," ")
+                        hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
+                        if hdd_match:
+                            hdd_mb = hdd_match.group(1)
+                            vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
+                        cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
+                        if cpus_match:
+                            cpus = cpus_match.group(1)
+                            vm_details['cpus'] = int(cpus) if cpus else None
+                        memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                        vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
+                        vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
+                        vm_details['id'] = xmlroot.get('id')
+                        vm_details['name'] = xmlroot.get('name')
+                        vm_info = [vm_details]
+                        if vm_pci_details:
+                            vm_info[0].update(vm_pci_details)
+
+                        vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
+                                   'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
+                                   'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
+
+                        # get networks
+                        vm_ip = None
+                        vm_mac = None
+                        networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
+                        for network in networks:
+                            mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
+                            vm_mac = mac_s.group(1) if mac_s else None
+                            ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
+                            vm_ip = ip_s.group(1) if ip_s else None
+
+                            if vm_ip is None:
+                                if not nsx_edge_list:
+                                    nsx_edge_list = self.get_edge_details()
+                                    if nsx_edge_list is None:
+                                        raise vimconn.vimconnException("refresh_vms_status:"\
+                                                                       "Failed to get edge details from NSX Manager")
+                                if vm_mac is not None:
+                                    vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
+
+                            net_s = re.search('network="(.*?)"',network)
+                            network_name = net_s.group(1) if net_s else None
+
+                            vm_net_id = self.get_network_id_by_name(network_name)
+                            interface = {"mac_address": vm_mac,
+                                         "vim_net_id": vm_net_id,
+                                         "vim_interface_id": vm_net_id,
+                                         "ip_address": vm_ip}
+
+                            vm_dict["interfaces"].append(interface)
+
+                    # add a vm to vm dict
+                    vms_dict.setdefault(vmuuid, vm_dict)
+                    self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
+                except Exception as exp:
+                    self.logger.debug("Error in response {}".format(exp))
+                    self.logger.debug(traceback.format_exc())
+
+        return vms_dict
+
+
+    def get_edge_details(self):
+        """Get the NSX edge list from NSX Manager
+           Returns list of NSX edges
+        """
+        edge_list = []
+        rheaders = {'Content-Type': 'application/xml'}
+        nsx_api_url = '/api/4.0/edges'
+
+        self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
+
+        try:
+            resp = requests.get(self.nsx_manager + nsx_api_url,
+                                auth = (self.nsx_user, self.nsx_password),
+                                verify = False, headers = rheaders)
+            if resp.status_code == requests.codes.ok:
+                paged_Edge_List = XmlElementTree.fromstring(resp.text)
+                for edge_pages in paged_Edge_List:
+                    if edge_pages.tag == 'edgePage':
+                        for edge_summary in edge_pages:
+                            if edge_summary.tag == 'pagingInfo':
+                                for element in edge_summary:
+                                    if element.tag == 'totalCount' and element.text == '0':
+                                        raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
+                                                                       .format(self.nsx_manager))
+
+                            if edge_summary.tag == 'edgeSummary':
+                                for element in edge_summary:
+                                    if element.tag == 'id':
+                                        edge_list.append(element.text)
+                    else:
+                        raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
+                                                       .format(self.nsx_manager))
+
+                if not edge_list:
+                    raise vimconn.vimconnException("get_edge_details: "\
+                                                   "No NSX edge details found: {}"
+                                                   .format(self.nsx_manager))
+                else:
+                    self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
+                    return edge_list
+            else:
+                self.logger.debug("get_edge_details: "
+                                  "Failed to get NSX edge details from NSX Manager: {}"
+                                  .format(resp.content))
+                return None
+
+        except Exception as exp:
+            self.logger.debug("get_edge_details: "\
+                              "Failed to get NSX edge details from NSX Manager: {}"
+                              .format(exp))
+            raise vimconn.vimconnException("get_edge_details: "\
+                                           "Failed to get NSX edge details from NSX Manager: {}"
+                                           .format(exp))
+
+
+    def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
+        """Get IP address details from NSX edges, using the MAC address
+           PARAMS: nsx_edges : List of NSX edges
+                   mac_address : Find IP address corresponding to this MAC address
+           Returns: IP address corrresponding to the provided MAC address
+        """
+
+        ip_addr = None
         rheaders = {'Content-Type': 'application/xml'}
-        iso_edges = ['edge-2','edge-3','edge-6','edge-7','edge-8','edge-9','edge-10']
+
+        self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
 
         try:
-            for edge in iso_edges:
+            for edge in nsx_edges:
                 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
-                self.logger.debug("refresh_vms_status: NSX Manager url: {}".format(nsx_api_url))
 
                 resp = requests.get(self.nsx_manager + nsx_api_url,
                                     auth = (self.nsx_user, self.nsx_password),
@@ -1694,66 +3043,27 @@ class vimconnector(vimconn.vimconnector):
                             for leaseInfo in dhcpLeaseInfo:
                                 for elem in leaseInfo:
                                     if (elem.tag)=='macAddress':
-                                        mac_addr = elem.text
+                                        edge_mac_addr = elem.text
                                     if (elem.tag)=='ipAddress':
                                         ip_addr = elem.text
-                                if (mac_addr) is not None:
-                                    mac_ip_addr[mac_addr]= ip_addr
-                    self.logger.debug("NSX Manager DHCP Lease info: mac_ip_addr : {}".format(mac_ip_addr))
+                                if edge_mac_addr is not None:
+                                    if edge_mac_addr == mac_address:
+                                        self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
+                                                          .format(ip_addr, mac_address,edge))
+                                        return ip_addr
                 else:
-                    self.logger.debug("Error occurred while getting DHCP lease info from NSX Manager: {}".format(resp.content))
-        except KeyError:
-            self.logger.debug("Error in response from NSX Manager {}".format(KeyError.message))
-            self.logger.debug(traceback.format_exc())
-
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
-
-        vdc = vca.get_vdc(self.tenant_name)
-        if vdc is None:
-            raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
-
-        vms_dict = {}
-        for vmuuid in vm_list:
-            vmname = self.get_namebyvappid(vca, vdc, vmuuid)
-            if vmname is not None:
+                    self.logger.debug("get_ipaddr_from_NSXedge: "\
+                                      "Error occurred while getting DHCP lease info from NSX Manager: {}"
+                                      .format(resp.content))
 
-                the_vapp = vca.get_vapp(vdc, vmname)
-                vm_info = the_vapp.get_vms_details()
-                vm_status = vm_info[0]['status']
-                vm_pci_details = self.get_vm_pci_details(vmuuid)
-                vm_info[0].update(vm_pci_details)
-
-                vm_dict = {'status': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
-                           'error_msg': vcdStatusCode2manoFormat[the_vapp.me.get_status()],
-                           'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
+            self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
+            return None
 
-                # get networks
-                try:
-                    vm_app_networks = the_vapp.get_vms_network_info()
-                    for vapp_network in vm_app_networks:
-                        for vm_network in vapp_network:
-                            if vm_network['name'] == vmname:
-                                #Assign IP Address based on MAC Address in NSX DHCP lease info
-                                for mac_adres,ip_adres in mac_ip_addr.iteritems():
-                                    if mac_adres == vm_network['mac']:
-                                        vm_network['ip']=ip_adres
-                                interface = {"mac_address": vm_network['mac'],
-                                             "vim_net_id": self.get_network_id_by_name(vm_network['network_name']),
-                                             "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']),
-                                             'ip_address': vm_network['ip']}
-                                # interface['vim_info'] = yaml.safe_dump(vm_network)
-                                vm_dict["interfaces"].append(interface)
-                    # add a vm to vm dict
-                    vms_dict.setdefault(vmuuid, vm_dict)
-                except KeyError:
-                    self.logger.debug("Error in respond {}".format(KeyError.message))
-                    self.logger.debug(traceback.format_exc())
+        except XmlElementTree.ParseError as Err:
+            self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
 
-        return vms_dict
 
-    def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
+    def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
         """Send and action over a VM instance from VIM
         Returns the vm_id if the action was successfully sent to the VIM"""
 
@@ -1761,15 +3071,11 @@ class vimconnector(vimconn.vimconnector):
         if vm__vim_uuid is None or action_dict is None:
             raise vimconn.vimconnException("Invalid request. VM id or action is None.")
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
-
-        vdc = vca.get_vdc(self.tenant_name)
+        org, vdc = self.get_vdc_details()
         if vdc is None:
-            return -1, "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name)
+            raise  vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
 
-        vapp_name = self.get_namebyvappid(vca, vdc, vm__vim_uuid)
+        vapp_name = self.get_namebyvappid(vm__vim_uuid)
         if vapp_name is None:
             self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
             raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
@@ -1777,61 +3083,61 @@ class vimconnector(vimconn.vimconnector):
             self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
 
         try:
-            the_vapp = vca.get_vapp(vdc, vapp_name)
-            # TODO fix all status
+            vdc_obj = VDC(self.client, href=vdc.get('href'))
+            vapp_resource = vdc_obj.get_vapp(vapp_name)
+            vapp = VApp(self.client, resource=vapp_resource)
             if "start" in action_dict:
-                vm_info = the_vapp.get_vms_details()
-                vm_status = vm_info[0]['status']
-                self.logger.info("Power on vApp: vm_status:{} {}".format(type(vm_status),vm_status))
-                if vm_status == "Suspended" or vm_status == "Powered off":
-                    power_on_task = the_vapp.poweron()
-                    if power_on_task is not None and type(power_on_task) is GenericTask:
-                        result = vca.block_until_completed(power_on_task)
-                        if result:
-                            self.logger.info("action_vminstance: Powered on vApp: {}".format(vapp_name))
-                        else:
-                            self.logger.info("action_vminstance: Failed to power on vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Wait for vApp {} to power on".format(vapp_name))
+                self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
+                poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
+                result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+                self.instance_actions_result("start", result, vapp_name)
             elif "rebuild" in action_dict:
-                self.logger.info("action_vminstance: Rebuilding vApp: {}".format(vapp_name))
-                power_on_task = the_vapp.deploy(powerOn=True)
-                if type(power_on_task) is GenericTask:
-                    result = vca.block_until_completed(power_on_task)
-                    if result:
-                        self.logger.info("action_vminstance: Rebuilt vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Failed to rebuild vApp: {}".format(vapp_name))
-                else:
-                    self.logger.info("action_vminstance: Wait for vApp rebuild {} to power on".format(vapp_name))
+                self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
+                rebuild_task = vapp.deploy(power_on=True)
+                result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
+                self.instance_actions_result("rebuild", result, vapp_name)
             elif "pause" in action_dict:
-                pass
-                ## server.pause()
+                self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
+                pause_task = vapp.undeploy(action='suspend')
+                result = self.client.get_task_monitor().wait_for_success(task=pause_task)
+                self.instance_actions_result("pause", result, vapp_name)
             elif "resume" in action_dict:
-                pass
-                ## server.resume()
+                self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
+                poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
+                result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+                self.instance_actions_result("resume", result, vapp_name)
             elif "shutoff" in action_dict or "shutdown" in action_dict:
-                power_off_task = the_vapp.undeploy(action='powerOff')
-                if type(power_off_task) is GenericTask:
-                    result = vca.block_until_completed(power_off_task)
-                    if result:
-                        self.logger.info("action_vminstance: Powered off vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Failed to power off vApp: {}".format(vapp_name))
+                action_name , value = action_dict.items()[0]
+                #For python3
+                #action_name , value = list(action_dict.items())[0]
+                self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
+                shutdown_task = vapp.shutdown()
+                result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
+                if action_name == "shutdown":
+                    self.instance_actions_result("shutdown", result, vapp_name)
                 else:
-                    self.logger.info("action_vminstance: Wait for vApp {} to power off".format(vapp_name))
+                    self.instance_actions_result("shutoff", result, vapp_name)
             elif "forceOff" in action_dict:
-                the_vapp.reset()
-            elif "terminate" in action_dict:
-                the_vapp.delete()
-            # elif "createImage" in action_dict:
-            #     server.create_image()
+                result = vapp.undeploy(action='powerOff')
+                self.instance_actions_result("forceOff", result, vapp_name)
+            elif "reboot" in action_dict:
+                self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
+                reboot_task = vapp.reboot()
+                self.client.get_task_monitor().wait_for_success(task=reboot_task)
             else:
-                pass
-        except:
-            pass
+                raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
+            return vm__vim_uuid
+        except Exception as exp :
+            self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
+            raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
+
+    def instance_actions_result(self, action, result, vapp_name):
+        if result.get('status') == 'success':
+            self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
+        else:
+            self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
 
-    def get_vminstance_console(self, vm_id, console_type="vnc"):
+    def get_vminstance_console(self, vm_id, console_type="novnc"):
         """
         Get a console for the virtual machine
         Params:
@@ -1845,7 +3151,57 @@ class vimconnector(vimconn.vimconnector):
                 port:     the http, ssh, ... port
                 suffix:   extra text, e.g. the http path and query string
         """
-        raise vimconn.vimconnNotImplemented("Should have implemented this")
+        console_dict = {}
+
+        if console_type==None or console_type=='novnc':
+
+            url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='POST',
+                                         url=url_rest_call,
+                                           headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                         response.content,
+                                                    response.status_code))
+                raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
+                                                                     "VM Mks ticket details")
+            s = re.search("<Host>(.*?)</Host>",response.content)
+            console_dict['server'] = s.group(1) if s else None
+            s1 = re.search("<Port>(\d+)</Port>",response.content)
+            console_dict['port'] = s1.group(1) if s1 else None
+
+
+            url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='POST',
+                                         url=url_rest_call,
+                                           headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                         response.content,
+                                                    response.status_code))
+                raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
+                                                                     "VM console details")
+            s = re.search(">.*?/(vm-\d+.*)</",response.content)
+            console_dict['suffix'] = s.group(1) if s else None
+            console_dict['protocol'] = "https"
+
+        return console_dict
 
     # NOT USED METHODS in current version
 
@@ -1904,10 +3260,6 @@ class vimconnector(vimconn.vimconnector):
             The return network name.
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
-
         if not network_uuid:
             return None
 
@@ -1934,10 +3286,6 @@ class vimconnector(vimconn.vimconnector):
             network_uuid: network_id
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
-
         if not network_name:
             self.logger.debug("get_network_id_by_name() : Network name is empty")
             return None
@@ -1947,6 +3295,8 @@ class vimconnector(vimconn.vimconnector):
             if org_dict and 'networks' in org_dict:
                 org_network_dict = org_dict['networks']
                 for net_uuid,net_name in org_network_dict.iteritems():
+                #For python3
+                #for net_uuid,net_name in org_network_dict.items():
                     if net_name == network_name:
                         return net_uuid
 
@@ -1966,19 +3316,20 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return XML respond
         """
+        url_list = [self.url, '/api/org']
+        vm_list_rest_call = ''.join(url_list)
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
 
-        url_list = [vca.host, '/api/org']
-        vm_list_rest_call = ''.join(url_list)
+            response = self.perform_request(req_type='GET',
+                                     url=vm_list_rest_call,
+                                           headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
 
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-            response = Http.get(url=vm_list_rest_call,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
             if response.status_code == requests.codes.ok:
                 return response.content
 
@@ -1986,34 +3337,35 @@ class vimconnector(vimconn.vimconnector):
 
     def get_org_action(self, org_uuid=None):
         """
-        Method leverages vCloud director and retrieve available object fdr organization.
+        Method leverages vCloud director and retrieve available object for organization.
 
         Args:
-            vca - is active VCA connection.
-            vdc_name - is a vdc name that will be used to query vms action
+            org_uuid - vCD organization uuid
+            self.client - is active connection.
 
             Returns:
                 The return XML respond
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
-
         if org_uuid is None:
             return None
 
-        url_list = [vca.host, '/api/org/', org_uuid]
+        url_list = [self.url, '/api/org/', org_uuid]
         vm_list_rest_call = ''.join(url_list)
 
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-            response = Http.get(url=vm_list_rest_call,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
+
             if response.status_code == requests.codes.ok:
                 return response.content
-
         return None
 
     def get_org(self, org_uuid=None):
@@ -2031,9 +3383,6 @@ class vimconnector(vimconn.vimconnector):
         """
 
         org_dict = {}
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
 
         if org_uuid is None:
             return org_dict
@@ -2071,9 +3420,6 @@ class vimconnector(vimconn.vimconnector):
         """
 
         org_dict = {}
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
 
         content = self.list_org_action()
         try:
@@ -2107,6 +3453,9 @@ class vimconnector(vimconn.vimconnector):
         if not (not vca.vcloud_session or not vca.vcloud_session.organization):
             refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
                           vca.vcloud_session.organization.Link)
+            #For python3
+            #refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and\
+            #        ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
             if len(refs) == 1:
                 response = Http.get(url=vm_list_rest_call,
                                     headers=vca.vcloud_session.get_vcloud_headers(),
@@ -2245,21 +3594,23 @@ class vimconnector(vimconn.vimconnector):
                 The return XML respond
         """
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
-
         if network_uuid is None:
             return None
 
-        url_list = [vca.host, '/api/network/', network_uuid]
+        url_list = [self.url, '/api/network/', network_uuid]
         vm_list_rest_call = ''.join(url_list)
 
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-            response = Http.get(url=vm_list_rest_call,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+            #Retry login if session expired & retry sending request
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
+
             if response.status_code == requests.codes.ok:
                 return response.content
 
@@ -2303,8 +3654,8 @@ class vimconnector(vimconn.vimconnector):
         if network_uuid is None:
             return network_uuid
 
-        content = self.get_network_action(network_uuid=network_uuid)
         try:
+            content = self.get_network_action(network_uuid=network_uuid)
             vm_list_xmlroot = XmlElementTree.fromstring(content)
 
             network_configuration['status'] = vm_list_xmlroot.get("status")
@@ -2320,8 +3671,9 @@ class vimconnector(vimconn.vimconnector):
                         if tagKey != "":
                             network_configuration[tagKey] = configuration.text.strip()
             return network_configuration
-        except:
-            pass
+        except Exception as exp :
+            self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
+            raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
 
         return network_configuration
 
@@ -2335,22 +3687,21 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return None or XML respond or false
         """
-
-        vca = self.connect_as_admin()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+        client = self.connect_as_admin()
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
         if network_uuid is None:
             return False
 
-        url_list = [vca.host, '/api/admin/network/', network_uuid]
+        url_list = [self.url, '/api/admin/network/', network_uuid]
         vm_list_rest_call = ''.join(url_list)
 
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-            response = Http.delete(url=vm_list_rest_call,
-                                   headers=vca.vcloud_session.get_vcloud_headers(),
-                                   verify=vca.verify,
-                                   logger=vca.logger)
-
+        if client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='DELETE',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
             if response.status_code == 202:
                 return True
 
@@ -2387,7 +3738,7 @@ class vimconnector(vimconn.vimconnector):
             vm_list_xmlroot = XmlElementTree.fromstring(content)
             vcd_uuid = vm_list_xmlroot.get('id').split(":")
             if len(vcd_uuid) == 4:
-                self.logger.info("Create new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
+                self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
                 return vcd_uuid[3]
         except:
             self.logger.debug("Failed create network {}".format(network_name))
@@ -2409,20 +3760,22 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return network uuid or return None
         """
-
-        vca = self.connect_as_admin()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+        client_as_admin = self.connect_as_admin()
+        if not client_as_admin:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD.")
         if network_name is None:
             return None
 
-        url_list = [vca.host, '/api/admin/vdc/', self.tenant_id]
+        url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
         vm_list_rest_call = ''.join(url_list)
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-            response = Http.get(url=vm_list_rest_call,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+
+        if client_as_admin._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
+
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
 
             provider_network = None
             available_networks = None
@@ -2449,17 +3802,14 @@ class vimconnector(vimconn.vimconnector):
                     return None
 
             # find  pvdc provided available network
-            response = Http.get(url=provider_network,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+            response = self.perform_request(req_type='GET',
+                                            url=provider_network,
+                                            headers=headers)
             if response.status_code != requests.codes.ok:
                 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
                                                                                           response.status_code))
                 return None
 
-            # available_networks.split("/")[-1]
-
             if parent_network_uuid is None:
                 try:
                     vm_list_xmlroot = XmlElementTree.fromstring(response.content)
@@ -2473,118 +3823,119 @@ class vimconnector(vimconn.vimconnector):
                 except:
                     return None
 
-            #Configure IP profile of the network
-            ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
-
-            gateway_address=ip_profile['gateway_address']
-            dhcp_count=int(ip_profile['dhcp_count'])
-            subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+            try:
+                #Configure IP profile of the network
+                ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
 
-            if ip_profile['dhcp_enabled']==True:
-                dhcp_enabled='true'
-            else:
-                dhcp_enabled='false'
-            dhcp_start_address=ip_profile['dhcp_start_address']
+                if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
+                    subnet_rand = random.randint(0, 255)
+                    ip_base = "192.168.{}.".format(subnet_rand)
+                    ip_profile['subnet_address'] = ip_base + "0/24"
+                else:
+                    ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
+
+                if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
+                    ip_profile['gateway_address']=ip_base + "1"
+                if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
+                    ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
+                if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
+                    ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
+                if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
+                    ip_profile['dhcp_start_address']=ip_base + "3"
+                if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
+                    ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
+                if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
+                    ip_profile['dns_address']=ip_base + "2"
+
+                gateway_address=ip_profile['gateway_address']
+                dhcp_count=int(ip_profile['dhcp_count'])
+                subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+
+                if ip_profile['dhcp_enabled']==True:
+                    dhcp_enabled='true'
+                else:
+                    dhcp_enabled='false'
+                dhcp_start_address=ip_profile['dhcp_start_address']
 
-            #derive dhcp_end_address from dhcp_start_address & dhcp_count
-            end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
-            end_ip_int += dhcp_count - 1
-            dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
+                #derive dhcp_end_address from dhcp_start_address & dhcp_count
+                end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
+                end_ip_int += dhcp_count - 1
+                dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
 
-            ip_version=ip_profile['ip_version']
-            dns_address=ip_profile['dns_address']
+                ip_version=ip_profile['ip_version']
+                dns_address=ip_profile['dns_address']
+            except KeyError as exp:
+                self.logger.debug("Create Network REST: Key error {}".format(exp))
+                raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
 
             # either use client provided UUID or search for a first available
             #  if both are not defined we return none
             if parent_network_uuid is not None:
-                url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
+                provider_network = None
+                available_networks = None
+                add_vdc_rest_url = None
+
+                url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
                 add_vdc_rest_url = ''.join(url_list)
 
-            if net_type=='ptp':
-                fence_mode="isolated"
-                isshared='false'
-                is_inherited='false'
-                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
-                                <Description>Openmano created</Description>
-                                        <Configuration>
-                                            <IpScopes>
-                                                <IpScope>
-                                                    <IsInherited>{1:s}</IsInherited>
-                                                    <Gateway>{2:s}</Gateway>
-                                                    <Netmask>{3:s}</Netmask>
-                                                    <Dns1>{4:s}</Dns1>
-                                                    <IsEnabled>{5:s}</IsEnabled>
-                                                    <IpRanges>
-                                                        <IpRange>
-                                                            <StartAddress>{6:s}</StartAddress>
-                                                            <EndAddress>{7:s}</EndAddress>
-                                                        </IpRange>
-                                                    </IpRanges>
-                                                </IpScope>
-                                            </IpScopes>
-                                            <FenceMode>{8:s}</FenceMode>
-                                        </Configuration>
-                                        <IsShared>{9:s}</IsShared>
-                            </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
-                                                        subnet_address, dns_address, dhcp_enabled,
-                                                        dhcp_start_address, dhcp_end_address, fence_mode, isshared)
+                url_list = [self.url, '/api/admin/network/', parent_network_uuid]
+                available_networks = ''.join(url_list)
+
+            #Creating all networks as Direct Org VDC type networks.
+            #Unused in case of Underlay (data/ptp) network interface.
+            fence_mode="isolated"
+            is_inherited='false'
+            dns_list = dns_address.split(";")
+            dns1 = dns_list[0]
+            dns2_text = ""
+            if len(dns_list) >= 2:
+                dns2_text = "\n                                                <Dns2>{}</Dns2>\n".format(dns_list[1])
+            data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+                            <Description>Openmano created</Description>
+                                    <Configuration>
+                                        <IpScopes>
+                                            <IpScope>
+                                                <IsInherited>{1:s}</IsInherited>
+                                                <Gateway>{2:s}</Gateway>
+                                                <Netmask>{3:s}</Netmask>
+                                                <Dns1>{4:s}</Dns1>{5:s}
+                                                <IsEnabled>{6:s}</IsEnabled>
+                                                <IpRanges>
+                                                    <IpRange>
+                                                        <StartAddress>{7:s}</StartAddress>
+                                                        <EndAddress>{8:s}</EndAddress>
+                                                    </IpRange>
+                                                </IpRanges>
+                                            </IpScope>
+                                        </IpScopes>
+                                        <FenceMode>{9:s}</FenceMode>
+                                    </Configuration>
+                                    <IsShared>{10:s}</IsShared>
+                        </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
+                                                    subnet_address, dns1, dns2_text, dhcp_enabled,
+                                                    dhcp_start_address, dhcp_end_address,
+                                                    fence_mode, isshared)
 
-            else:
-                fence_mode="bridged"
-                is_inherited='false'
-                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
-                                <Description>Openmano created</Description>
-                                        <Configuration>
-                                            <IpScopes>
-                                                <IpScope>
-                                                    <IsInherited>{1:s}</IsInherited>
-                                                    <Gateway>{2:s}</Gateway>
-                                                    <Netmask>{3:s}</Netmask>
-                                                    <Dns1>{4:s}</Dns1>
-                                                    <IsEnabled>{5:s}</IsEnabled>
-                                                    <IpRanges>
-                                                        <IpRange>
-                                                            <StartAddress>{6:s}</StartAddress>
-                                                            <EndAddress>{7:s}</EndAddress>
-                                                        </IpRange>
-                                                    </IpRanges>
-                                                </IpScope>
-                                            </IpScopes>
-                                            <ParentNetwork href="{8:s}"/>
-                                            <FenceMode>{9:s}</FenceMode>
-                                        </Configuration>
-                                        <IsShared>{10:s}</IsShared>
-                            </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
-                                                        subnet_address, dns_address, dhcp_enabled,
-                                                        dhcp_start_address, dhcp_end_address, available_networks,
-                                                        fence_mode, isshared)
-
-            headers = vca.vcloud_session.get_vcloud_headers()
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
             try:
-                response = Http.post(url=add_vdc_rest_url,
-                                     headers=headers,
-                                     data=data,
-                                     verify=vca.verify,
-                                     logger=vca.logger)
+                response = self.perform_request(req_type='POST',
+                                           url=add_vdc_rest_url,
+                                           headers=headers,
+                                           data=data)
 
                 if response.status_code != 201:
-                    self.logger.debug("Create Network POST REST API call failed. Return status code {}"
-                                      .format(response.status_code))
+                    self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
+                                      .format(response.status_code,response.content))
                 else:
-                    network = networkType.parseString(response.content, True)
-                    create_nw_task = network.get_Tasks().get_Task()[0]
-
-                    # if we all ok we respond with content after network creation completes
-                    # otherwise by default return None
-                    if create_nw_task is not None:
-                        self.logger.debug("Create Network REST : Waiting for Nw creation complete")
-                        status = vca.block_until_completed(create_nw_task)
-                        if status:
-                            return response.content
-                        else:
-                            self.logger.debug("create_network_rest task failed. Network Create response : {}"
-                                              .format(response.content))
+                    network_task = self.get_task_from_response(response.content)
+                    self.logger.debug("Create Network REST : Waiting for Network creation complete")
+                    time.sleep(5)
+                    result = self.client.get_task_monitor().wait_for_success(task=network_task)
+                    if result.get('status') == 'success':
+                        return response.content
+                    else:
+                        self.logger.debug("create_network_rest task failed. Network Create response : {}"
+                                          .format(response.content))
             except Exception as exp:
                 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
 
@@ -2620,11 +3971,13 @@ class vimconnector(vimconn.vimconnector):
                 The return xml content of respond or None
         """
 
-        url_list = [vca.host, '/api/admin']
-        response = Http.get(url=''.join(url_list),
-                            headers=vca.vcloud_session.get_vcloud_headers(),
-                            verify=vca.verify,
-                            logger=vca.logger)
+        url_list = [self.url, '/api/admin']
+        if vca:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=''.join(url_list),
+                                            headers=headers)
 
         if response.status_code == requests.codes.ok:
             return response.content
@@ -2651,7 +4004,7 @@ class vimconnector(vimconn.vimconnector):
     def create_vdc_from_tmpl_rest(self, vdc_name=None):
         """
         Method create vdc in vCloud director based on VDC template.
-        it uses pre-defined template that must be named openmano
+        it uses pre-defined template.
 
         Args:
             vdc_name -  name of a new vdc.
@@ -2659,20 +4012,22 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return xml content of respond or None
         """
-
+        # pre-requesite atleast one vdc template should be available in vCD
         self.logger.info("Creating new vdc {}".format(vdc_name))
-        vca = self.connect()
+        vca = self.connect_as_admin()
         if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
         if vdc_name is None:
             return None
 
-        url_list = [vca.host, '/api/vdcTemplates']
+        url_list = [self.url, '/api/vdcTemplates']
         vm_list_rest_call = ''.join(url_list)
-        response = Http.get(url=vm_list_rest_call,
-                            headers=vca.vcloud_session.get_vcloud_headers(),
-                            verify=vca.verify,
-                            logger=vca.logger)
+
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                    'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+        response = self.perform_request(req_type='GET',
+                                        url=vm_list_rest_call,
+                                        headers=headers)
 
         # container url to a template
         vdc_template_ref = None
@@ -2682,8 +4037,7 @@ class vimconnector(vimconn.vimconnector):
                 # application/vnd.vmware.admin.providervdc+xml
                 # we need find a template from witch we instantiate VDC
                 if child.tag.split("}")[1] == 'VdcTemplate':
-                    if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml' and child.attrib.get(
-                            'name') == 'openmano':
+                    if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
                         vdc_template_ref = child.attrib.get('href')
         except:
             self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
@@ -2696,16 +4050,23 @@ class vimconnector(vimconn.vimconnector):
 
         try:
             # instantiate vdc
-            url_list = [vca.host, '/api/org/', self.org_uuid, '/action/instantiate']
+            url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
             vm_list_rest_call = ''.join(url_list)
             data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
                                         <Source href="{1:s}"></Source>
                                         <Description>opnemano</Description>
                                         </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
-            headers = vca.vcloud_session.get_vcloud_headers()
+
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
-            response = Http.post(url=vm_list_rest_call, headers=headers, data=data, verify=vca.verify,
-                                 logger=vca.logger)
+
+            response = self.perform_request(req_type='POST',
+                                            url=vm_list_rest_call,
+                                            headers=headers,
+                                            data=data)
+
+            vdc_task = self.get_task_from_response(response.content)
+            self.client.get_task_monitor().wait_for_success(task=vdc_task)
+
             # if we all ok we respond with content otherwise by default None
             if response.status_code >= 200 and response.status_code < 300:
                 return response.content
@@ -2721,29 +4082,28 @@ class vimconnector(vimconn.vimconnector):
         Method create network in vCloud director
 
         Args:
-            network_name - is network name to be created.
-            parent_network_uuid - is parent provider vdc network that will be used for mapping.
-            It optional attribute. by default if no parent network indicate the first available will be used.
-
+            vdc_name - vdc name to be created
             Returns:
-                The return network uuid or return None
+                The return response
         """
 
         self.logger.info("Creating new vdc {}".format(vdc_name))
 
         vca = self.connect_as_admin()
         if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
         if vdc_name is None:
             return None
 
-        url_list = [vca.host, '/api/admin/org/', self.org_uuid]
+        url_list = [self.url, '/api/admin/org/', self.org_uuid]
         vm_list_rest_call = ''.join(url_list)
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-            response = Http.get(url=vm_list_rest_call,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+
+        if vca._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                      'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
 
             provider_vdc_ref = None
             add_vdc_rest_url = None
@@ -2793,10 +4153,12 @@ class vimconnector(vimconn.vimconnector):
                                                                                                   escape(vdc_name),
                                                                                                   provider_vdc_ref)
 
-                    headers = vca.vcloud_session.get_vcloud_headers()
                     headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
-                    response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify,
-                                         logger=vca.logger)
+
+                    response = self.perform_request(req_type='POST',
+                                                    url=add_vdc_rest_url,
+                                                    headers=headers,
+                                                    data=data)
 
                     # if we all ok we respond with content otherwise by default None
                     if response.status_code == 201:
@@ -2820,21 +4182,26 @@ class vimconnector(vimconn.vimconnector):
         if need_admin_access:
             vca = self.connect_as_admin()
         else:
-            vca = self.connect()
+            vca = self.client
 
         if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
         if vapp_uuid is None:
             return None
 
-        url_list = [vca.host, '/api/vApp/vapp-', vapp_uuid]
+        url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
         get_vapp_restcall = ''.join(url_list)
 
-        if vca.vcloud_session and vca.vcloud_session.organization:
-            response = Http.get(url=get_vapp_restcall,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+        if vca._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=get_vapp_restcall,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                if need_admin_access == False:
+                    response = self.retry_rest('GET', get_vapp_restcall)
 
             if response.status_code != requests.codes.ok:
                 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
@@ -2940,23 +4307,23 @@ class vimconnector(vimconn.vimconnector):
                 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
         return parsed_respond
 
-    def acuire_console(self, vm_uuid=None):
+    def acquire_console(self, vm_uuid=None):
 
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
         if vm_uuid is None:
             return None
-
-        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
-            vm_dict = self.get_vapp_details_rest(self, vapp_uuid=vm_uuid)
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
             console_dict = vm_dict['acquireTicket']
             console_rest_call = console_dict['href']
 
-            response = Http.post(url=console_rest_call,
-                                 headers=vca.vcloud_session.get_vcloud_headers(),
-                                 verify=vca.verify,
-                                 logger=vca.logger)
+            response = self.perform_request(req_type='POST',
+                                            url=console_rest_call,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('POST', console_rest_call)
 
             if response.status_code == requests.codes.ok:
                 return response.content
@@ -3013,17 +4380,18 @@ class vimconnector(vimconn.vimconnector):
             Returns:
                 The return network uuid or return None
         """
-        vca = self.connect()
-        if not vca:
-            raise vimconn.vimconnConnectionException("self.connect() is failed")
         if disk_href is None or disk_size is None:
             return None
 
-        if vca.vcloud_session and vca.vcloud_session.organization:
-            response = Http.get(url=disk_href,
-                                headers=vca.vcloud_session.get_vcloud_headers(),
-                                verify=vca.verify,
-                                logger=vca.logger)
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                                url=disk_href,
+                                                headers=headers)
+
+        if response.status_code == 403:
+            response = self.retry_rest('GET', disk_href)
 
         if response.status_code != requests.codes.ok:
             self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
@@ -3032,6 +4400,8 @@ class vimconnector(vimconn.vimconnector):
         try:
             lxmlroot_respond = lxmlElementTree.fromstring(response.content)
             namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
             namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
 
             for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
@@ -3045,23 +4415,26 @@ class vimconnector(vimconn.vimconnector):
                                              xml_declaration=True)
 
             #Send PUT request to modify disk size
-            headers = vca.vcloud_session.get_vcloud_headers()
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
 
-            response = Http.put(url=disk_href,
-                                data=data,
-                                headers=headers,
-                                verify=vca.verify, logger=self.logger)
+            response = self.perform_request(req_type='PUT',
+                                                url=disk_href,
+                                                headers=headers,
+                                                data=data)
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', disk_href, add_headers, data)
 
             if response.status_code != 202:
                 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
                                                                             response.status_code))
             else:
-                modify_disk_task = taskType.parseString(response.content, True)
-                if type(modify_disk_task) is GenericTask:
-                    status = vca.block_until_completed(modify_disk_task)
-                    return status
-
+                modify_disk_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
+                if result.get('status') == 'success':
+                    return True
+                else:
+                    return False
             return None
 
         except Exception as exp :
@@ -3081,33 +4454,16 @@ class vimconnector(vimconn.vimconnector):
                 vcenter_conect object
         """
         vm_obj = None
-        vcenter_conect = None
         self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
-        try:
-            vm_vcenter_info = self.get_vm_vcenter_info(vapp_uuid)
-        except Exception as exp:
-            self.logger.error("Error occurred while getting vCenter infromationn"\
-                             " for VM : {}".format(exp))
-            raise vimconn.vimconnException(message=exp)
+        vcenter_conect, content = self.get_vcenter_content()
+        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
 
-        if vm_vcenter_info["vm_moref_id"]:
-            context = None
-            if hasattr(ssl, '_create_unverified_context'):
-                context = ssl._create_unverified_context()
+        if vm_moref_id:
             try:
                 no_of_pci_devices = len(pci_devices)
                 if no_of_pci_devices > 0:
-                    vcenter_conect = SmartConnect(
-                                            host=vm_vcenter_info["vm_vcenter_ip"],
-                                            user=vm_vcenter_info["vm_vcenter_user"],
-                                            pwd=vm_vcenter_info["vm_vcenter_password"],
-                                            port=int(vm_vcenter_info["vm_vcenter_port"]),
-                                            sslContext=context)
-                    atexit.register(Disconnect, vcenter_conect)
-                    content = vcenter_conect.RetrieveContent()
-
                     #Get VM and its host
-                    host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
                     self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
                     if host_obj and vm_obj:
                         #get PCI devies from host on which vapp is currently installed
@@ -3376,7 +4732,7 @@ class vimconnector(vimconn.vimconnector):
                                                                              exp))
         return task
 
-    def get_vm_vcenter_info(self , vapp_uuid):
+    def get_vm_vcenter_info(self):
         """
         Method to get details of vCenter and vm
 
@@ -3409,16 +4765,8 @@ class vimconnector(vimconn.vimconnector):
         else:
             raise vimconn.vimconnException(message="vCenter user password is not provided."\
                                            " Please provide vCenter user password while attaching datacenter to tenant in --config")
-        try:
-            vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
-            if vm_details and "vm_vcenter_info" in vm_details:
-                vm_vcenter_info["vm_moref_id"] = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
 
-            return vm_vcenter_info
-
-        except Exception as exp:
-            self.logger.error("Error occurred while getting vCenter infromationn"\
-                             " for VM : {}".format(exp))
+        return vm_vcenter_info
 
 
     def get_vm_pci_details(self, vmuuid):
@@ -3434,23 +4782,12 @@ class vimconnector(vimconn.vimconnector):
         """
         vm_pci_devices_info = {}
         try:
-            vm_vcenter_info = self.get_vm_vcenter_info(vmuuid)
-            if vm_vcenter_info["vm_moref_id"]:
-                context = None
-                if hasattr(ssl, '_create_unverified_context'):
-                    context = ssl._create_unverified_context()
-                vcenter_conect = SmartConnect(host=vm_vcenter_info["vm_vcenter_ip"],
-                                        user=vm_vcenter_info["vm_vcenter_user"],
-                                        pwd=vm_vcenter_info["vm_vcenter_password"],
-                                        port=int(vm_vcenter_info["vm_vcenter_port"]),
-                                        sslContext=context
-                                    )
-                atexit.register(Disconnect, vcenter_conect)
-                content = vcenter_conect.RetrieveContent()
-
+            vcenter_conect, content = self.get_vcenter_content()
+            vm_moref_id = self.get_vm_moref_id(vmuuid)
+            if vm_moref_id:
                 #Get VM and its host
                 if content:
-                    host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
                     if host_obj and vm_obj:
                         vm_pci_devices_info["host_name"]= host_obj.name
                         vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
@@ -3469,3 +4806,1813 @@ class vimconnector(vimconn.vimconnector):
                              " for VM : {}".format(exp))
             raise vimconn.vimconnException(message=exp)
 
+
+    def reserve_memory_for_all_vms(self, vapp, memory_mb):
+        """
+            Method to reserve memory for all VMs
+            Args :
+                vapp - VApp
+                memory_mb - Memory in MB
+            Returns:
+                None
+        """
+
+        self.logger.info("Reserve memory for all VMs")
+        for vms in vapp.get_all_vms():
+            vm_id = vms.get('id').split(':')[-1]
+
+            url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
+            response = self.perform_request(req_type='GET',
+                                            url=url_rest_call,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                            response.content,
+                                                            response.status_code))
+                raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
+                                               "memory")
+
+            bytexml = bytes(bytearray(response.content, encoding='utf-8'))
+            contentelem = lxmlElementTree.XML(bytexml)
+            namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.iteritems() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+            # Find the reservation element in the response
+            memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
+            for memelem in memelem_list:
+                memelem.text = str(memory_mb)
+
+            newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
+
+            response = self.perform_request(req_type='PUT',
+                                            url=url_rest_call,
+                                            headers=headers,
+                                            data=newdata)
+
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+            if response.status_code != 202:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {} ".format(url_rest_call,
+                                  response.content,
+                                  response.status_code))
+                raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
+                                               "virtual hardware memory section")
+            else:
+                mem_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=mem_task)
+                if result.get('status') == 'success':
+                    self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
+                                      .format(vm_id))
+                else:
+                    self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
+                                      .format(vm_id))
+
+    def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
+        """
+            Configure VApp network config with org vdc network
+            Args :
+                vapp - VApp
+            Returns:
+                None
+        """
+
+        self.logger.info("Connecting vapp {} to org vdc network {}".
+                         format(vapp_id, net_name))
+
+        url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
+
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+        response = self.perform_request(req_type='GET',
+                                        url=url_rest_call,
+                                        headers=headers)
+
+        if response.status_code == 403:
+            response = self.retry_rest('GET', url_rest_call)
+
+        if response.status_code != 200:
+            self.logger.error("REST call {} failed reason : {}"\
+                              "status code : {}".format(url_rest_call,
+                                                        response.content,
+                                                        response.status_code))
+            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
+                                           "network config section")
+
+        data = response.content
+        headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
+        net_id = self.get_network_id_by_name(net_name)
+        if not net_id:
+            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
+                                           "existing network")
+
+        bytexml = bytes(bytearray(data, encoding='utf-8'))
+        newelem = lxmlElementTree.XML(bytexml)
+        namespaces = {prefix: uri for prefix, uri in newelem.nsmap.iteritems() if prefix}
+        namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
+        nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
+
+        # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
+        parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
+        if parentnetworklist:
+            for pn in parentnetworklist:
+                if "href" not in pn.keys():
+                    id_val = pn.get("id")
+                    href_val = "{}/api/network/{}".format(self.url, id_val)
+                    pn.set("href", href_val)
+
+        newstr = """<NetworkConfig networkName="{}">
+                  <Configuration>
+                       <ParentNetwork href="{}/api/network/{}"/>
+                       <FenceMode>bridged</FenceMode>
+                  </Configuration>
+              </NetworkConfig>
+           """.format(net_name, self.url, net_id)
+        newcfgelem = lxmlElementTree.fromstring(newstr)
+        if nwcfglist:
+            nwcfglist[0].addnext(newcfgelem)
+
+        newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
+
+        response = self.perform_request(req_type='PUT',
+                                        url=url_rest_call,
+                                        headers=headers,
+                                        data=newdata)
+
+        if response.status_code == 403:
+            add_headers = {'Content-Type': headers['Content-Type']}
+            response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+        if response.status_code != 202:
+            self.logger.error("REST call {} failed reason : {}"\
+                              "status code : {} ".format(url_rest_call,
+                              response.content,
+                              response.status_code))
+            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
+                                           "network config section")
+        else:
+            vapp_task = self.get_task_from_response(response.content)
+            result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
+            if result.get('status') == 'success':
+                self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
+                                 "network {}".format(vapp_id, net_name))
+            else:
+                self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
+                                  "connect to network {}".format(vapp_id, net_name))
+
+    def remove_primary_network_adapter_from_all_vms(self, vapp):
+        """
+            Method to remove network adapter type to vm
+            Args :
+                vapp - VApp
+            Returns:
+                None
+        """
+
+        self.logger.info("Removing network adapter from all VMs")
+        for vms in vapp.get_all_vms():
+            vm_id = vms.get('id').split(':')[-1]
+
+            url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=url_rest_call,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                            response.content,
+                                                            response.status_code))
+                raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
+                                               "network connection section")
+
+            data = response.content
+            data = data.split('<Link rel="edit"')[0]
+
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+
+            newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+                      <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
+                              xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
+                              xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
+                              xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
+                              xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
+                              xmlns:vmw="http://www.vmware.com/schema/ovf"
+                              xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
+                              xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
+                              xmlns:ns9="http://www.vmware.com/vcloud/versions"
+                              href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
+                              <ovf:Info>Specifies the available VM network connections</ovf:Info>
+                             <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
+                             <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
+                      </NetworkConnectionSection>""".format(url=url_rest_call)
+            response = self.perform_request(req_type='PUT',
+                                            url=url_rest_call,
+                                            headers=headers,
+                                            data=newdata)
+
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+            if response.status_code != 202:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {} ".format(url_rest_call,
+                                  response.content,
+                                  response.status_code))
+                raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
+                                               "network connection section")
+            else:
+                nic_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                if result.get('status') == 'success':
+                    self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
+                                      "default NIC type".format(vm_id))
+                else:
+                    self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
+                                      "connect NIC type".format(vm_id))
+
+    def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
+        """
+            Method to add network adapter type to vm
+            Args :
+                network_name - name of network
+                primary_nic_index - int value for primary nic index
+                nicIndex - int value for nic index
+                nic_type - specify model name to which add to vm
+            Returns:
+                None
+        """
+
+        self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
+                         format(network_name, nicIndex, nic_type))
+        try:
+            ip_address = None
+            floating_ip = False
+            mac_address = None
+            if 'floating_ip' in net: floating_ip = net['floating_ip']
+
+            # Stub for ip_address feature
+            if 'ip_address' in net: ip_address = net['ip_address']
+
+            if 'mac_address' in net: mac_address = net['mac_address']
+
+            if floating_ip:
+                allocation_mode = "POOL"
+            elif ip_address:
+                allocation_mode = "MANUAL"
+            else:
+                allocation_mode = "DHCP"
+
+            if not nic_type:
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                    response = self.perform_request(req_type='GET',
+                                                    url=url_rest_call,
+                                                    headers=headers)
+
+                    if response.status_code == 403:
+                        response = self.retry_rest('GET', url_rest_call)
+
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {}".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                         "network connection section")
+
+                    data = response.content
+                    data = data.split('<Link rel="edit"')[0]
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        self.logger.debug("add_network_adapter PrimaryNIC not in data")
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                                         allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            item =  item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
+                    else:
+                        self.logger.debug("add_network_adapter PrimaryNIC in data")
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                          allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data + new_item + '</NetworkConnectionSection>'
+
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+
+                    response = self.perform_request(req_type='PUT',
+                                                    url=url_rest_call,
+                                                    headers=headers,
+                                                    data=data)
+
+                    if response.status_code == 403:
+                        add_headers = {'Content-Type': headers['Content-Type']}
+                        response = self.retry_rest('PUT', url_rest_call, add_headers, data)
+
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {} ".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                            "network connection section")
+                    else:
+                        nic_task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                        if result.get('status') == 'success':
+                            self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
+                                                               "default NIC type".format(vm_id))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
+                                                              "connect NIC type".format(vm_id))
+            else:
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                    response = self.perform_request(req_type='GET',
+                                                    url=url_rest_call,
+                                                    headers=headers)
+
+                    if response.status_code == 403:
+                        response = self.retry_rest('GET', url_rest_call)
+
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                        "network connection section")
+                    data = response.content
+                    data = data.split('<Link rel="edit"')[0]
+                    vcd_netadapter_type = nic_type
+                    if nic_type in ['SR-IOV', 'VF']:
+                        vcd_netadapter_type = "SRIOVETHERNETCARD"
+
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                <NetworkAdapterType>{}</NetworkAdapterType>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                               allocation_mode, vcd_netadapter_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
+                    else:
+                        self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    <NetworkAdapterType>{}</NetworkAdapterType>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                allocation_mode, vcd_netadapter_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data + new_item + '</NetworkConnectionSection>'
+
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+
+                    response = self.perform_request(req_type='PUT',
+                                                    url=url_rest_call,
+                                                    headers=headers,
+                                                    data=data)
+
+                    if response.status_code == 403:
+                        add_headers = {'Content-Type': headers['Content-Type']}
+                        response = self.retry_rest('PUT', url_rest_call, add_headers, data)
+
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                           "network connection section")
+                    else:
+                        nic_task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                        if result.get('status') == 'success':
+                            self.logger.info("add_network_adapter_to_vms(): VM {} "\
+                                               "conneced to NIC type {}".format(vm_id, nic_type))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} "\
+                                               "failed to connect NIC type {}".format(vm_id, nic_type))
+        except Exception as exp:
+            self.logger.error("add_network_adapter_to_vms() : exception occurred "\
+                                               "while adding Network adapter")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def set_numa_affinity(self, vmuuid, paired_threads_id):
+        """
+            Method to assign numa affinity in vm configuration parammeters
+            Args :
+                vmuuid - vm uuid
+                paired_threads_id - one or more virtual processor
+                                    numbers
+            Returns:
+                return if True
+        """
+        try:
+            vcenter_conect, content = self.get_vcenter_content()
+            vm_moref_id = self.get_vm_moref_id(vmuuid)
+
+            host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
+            if vm_obj:
+                config_spec = vim.vm.ConfigSpec()
+                config_spec.extraConfig = []
+                opt = vim.option.OptionValue()
+                opt.key = 'numa.nodeAffinity'
+                opt.value = str(paired_threads_id)
+                config_spec.extraConfig.append(opt)
+                task = vm_obj.ReconfigVM_Task(config_spec)
+                if task:
+                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                    extra_config = vm_obj.config.extraConfig
+                    flag = False
+                    for opts in extra_config:
+                        if 'numa.nodeAffinity' in opts.key:
+                            flag = True
+                            self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
+                                                     "value {} for vm {}".format(opt.value, vm_obj))
+                        if flag:
+                            return
+            else:
+                self.logger.error("set_numa_affinity: Failed to assign numa affinity")
+        except Exception as exp:
+            self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
+                                                       "for VM {} : {}".format(vm_obj, vm_moref_id))
+            raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
+                                                                           "affinity".format(exp))
+
+
+    def cloud_init(self, vapp, cloud_config):
+        """
+        Method to inject ssh-key
+        vapp - vapp object
+        cloud_config a dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
+        """
+        try:
+            if not isinstance(cloud_config, dict):
+                raise Exception("cloud_init : parameter cloud_config is not a dictionary")
+            else:
+                key_pairs = []
+                userdata = []
+                if "key-pairs" in cloud_config:
+                    key_pairs = cloud_config["key-pairs"]
+
+                if "users" in cloud_config:
+                    userdata = cloud_config["users"]
+
+                self.logger.debug("cloud_init : Guest os customization started..")
+                customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
+                customize_script = customize_script.replace("&","&amp;")
+                self.guest_customization(vapp, customize_script)
+
+        except Exception as exp:
+            self.logger.error("cloud_init : exception occurred while injecting "\
+                                                                       "ssh-key")
+            raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
+                                                               "ssh-key".format(exp))
+
+    def format_script(self, key_pairs=[], users_list=[]):
+        bash_script = """#!/bin/sh
+        echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+        if [ "$1" = "precustomization" ];then
+            echo performing precustomization tasks   on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+        """
+
+        keys = "\n".join(key_pairs)
+        if keys:
+            keys_data = """
+            if [ ! -d /root/.ssh ];then
+                mkdir /root/.ssh
+                chown root:root /root/.ssh
+                chmod 700 /root/.ssh
+                touch /root/.ssh/authorized_keys
+                chown root:root /root/.ssh/authorized_keys
+                chmod 600 /root/.ssh/authorized_keys
+                # make centos with selinux happy
+                which restorecon && restorecon -Rv /root/.ssh
+            else
+                touch /root/.ssh/authorized_keys
+                chown root:root /root/.ssh/authorized_keys
+                chmod 600 /root/.ssh/authorized_keys
+            fi
+            echo '{key}' >> /root/.ssh/authorized_keys
+            """.format(key=keys)
+
+            bash_script+= keys_data
+
+        for user in users_list:
+            if 'name' in user: user_name = user['name']
+            if 'key-pairs' in user:
+                user_keys = "\n".join(user['key-pairs'])
+            else:
+                user_keys = None
+
+            add_user_name = """
+                useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
+                """.format(user_name=user_name)
+
+            bash_script+= add_user_name
+
+            if user_keys:
+                user_keys_data = """
+                mkdir /home/{user_name}/.ssh
+                chown {user_name}:{user_name} /home/{user_name}/.ssh
+                chmod 700 /home/{user_name}/.ssh
+                touch /home/{user_name}/.ssh/authorized_keys
+                chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+                chmod 600 /home/{user_name}/.ssh/authorized_keys
+                # make centos with selinux happy
+                which restorecon && restorecon -Rv /home/{user_name}/.ssh
+                echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+                """.format(user_name=user_name,user_key=user_keys)
+
+                bash_script+= user_keys_data
+
+        return bash_script+"\n\tfi"
+
+    def guest_customization(self, vapp, customize_script):
+        """
+        Method to customize guest os
+        vapp - Vapp object
+        customize_script - Customize script to be run at first boot of VM.
+        """
+        for vm in vapp.get_all_vms():
+            vm_id = vm.get('id').split(':')[-1]
+            vm_name = vm.get('name')
+            vm_name = vm_name.replace('_','-')
+
+            vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
+
+            data = """<GuestCustomizationSection
+                           xmlns="http://www.vmware.com/vcloud/v1.5"
+                           xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
+                           ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
+                           <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
+                           <Enabled>true</Enabled>
+                           <ChangeSid>false</ChangeSid>
+                           <VirtualMachineId>{}</VirtualMachineId>
+                           <JoinDomainEnabled>false</JoinDomainEnabled>
+                           <UseOrgSettings>false</UseOrgSettings>
+                           <AdminPasswordEnabled>false</AdminPasswordEnabled>
+                           <AdminPasswordAuto>true</AdminPasswordAuto>
+                           <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
+                           <AdminAutoLogonCount>0</AdminAutoLogonCount>
+                           <ResetPasswordRequired>false</ResetPasswordRequired>
+                           <CustomizationScript>{}</CustomizationScript>
+                           <ComputerName>{}</ComputerName>
+                           <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
+                       </GuestCustomizationSection>
+                   """.format(vm_customization_url,
+                                             vm_id,
+                                  customize_script,
+                                           vm_name,
+                              vm_customization_url)
+
+            response = self.perform_request(req_type='PUT',
+                                             url=vm_customization_url,
+                                             headers=headers,
+                                             data=data)
+            if response.status_code == 202:
+                guest_task = self.get_task_from_response(response.content)
+                self.client.get_task_monitor().wait_for_success(task=guest_task)
+                self.logger.info("guest_customization : customized guest os task "\
+                                             "completed for VM {}".format(vm_name))
+            else:
+                self.logger.error("guest_customization : task for customized guest os"\
+                                                    "failed for VM {}".format(vm_name))
+                raise vimconn.vimconnException("guest_customization : failed to perform"\
+                                       "guest os customization on VM {}".format(vm_name))
+
+    def add_new_disk(self, vapp_uuid, disk_size):
+        """
+            Method to create an empty vm disk
+
+            Args:
+                vapp_uuid - is vapp identifier.
+                disk_size - size of disk to be created in GB
+
+            Returns:
+                None
+        """
+        status = False
+        vm_details = None
+        try:
+            #Disk size in GB, convert it into MB
+            if disk_size is not None:
+                disk_size_mb = int(disk_size) * 1024
+                vm_details = self.get_vapp_details_rest(vapp_uuid)
+
+            if vm_details and "vm_virtual_hardware" in vm_details:
+                self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+                disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+                status = self.add_new_disk_rest(disk_href, disk_size_mb)
+
+        except Exception as exp:
+            msg = "Error occurred while creating new disk {}.".format(exp)
+            self.rollback_newvm(vapp_uuid, msg)
+
+        if status:
+            self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+        else:
+            #If failed to add disk, delete VM
+            msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def add_new_disk_rest(self, disk_href, disk_size_mb):
+        """
+        Retrives vApp Disks section & add new empty disk
+
+        Args:
+            disk_href: Disk section href to addd disk
+            disk_size_mb: Disk size in MB
+
+            Returns: Status of add new disk task
+        """
+        status = False
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=disk_href,
+                                            headers=headers)
+
+        if response.status_code == 403:
+            response = self.retry_rest('GET', disk_href)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
+                              .format(disk_href, response.status_code))
+            return status
+        try:
+            #Find but type & max of instance IDs assigned to disks
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+            instance_id = 0
+            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                if item.find("rasd:Description",namespaces).text == "Hard disk":
+                    inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
+                    if inst_id > instance_id:
+                        instance_id = inst_id
+                        disk_item = item.find("rasd:HostResource" ,namespaces)
+                        bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
+                        bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
+
+            instance_id = instance_id + 1
+            new_item =   """<Item>
+                                <rasd:Description>Hard disk</rasd:Description>
+                                <rasd:ElementName>New disk</rasd:ElementName>
+                                <rasd:HostResource
+                                    xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
+                                    vcloud:capacity="{}"
+                                    vcloud:busSubType="{}"
+                                    vcloud:busType="{}"></rasd:HostResource>
+                                <rasd:InstanceID>{}</rasd:InstanceID>
+                                <rasd:ResourceType>17</rasd:ResourceType>
+                            </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
+
+            new_data = response.content
+            #Add new item at the bottom
+            new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
+
+            # Send PUT request to modify virtual hardware section with new disk
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+            response = self.perform_request(req_type='PUT',
+                                            url=disk_href,
+                                            data=new_data,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', disk_href, add_headers, new_data)
+
+            if response.status_code != 202:
+                self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
+                                  .format(disk_href, response.status_code, response.content))
+            else:
+                add_disk_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
+                if result.get('status') == 'success':
+                    status = True
+                else:
+                    self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
+
+        except Exception as exp:
+            self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
+
+        return status
+
+
+    def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
+        """
+            Method to add existing disk to vm
+            Args :
+                catalogs - List of VDC catalogs
+                image_id - Catalog ID
+                template_name - Name of template in catalog
+                vapp_uuid - UUID of vApp
+            Returns:
+                None
+        """
+        disk_info = None
+        vcenter_conect, content = self.get_vcenter_content()
+        #find moref-id of vm in image
+        catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
+                                                         image_id=image_id,
+                                                        )
+
+        if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
+            if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
+                catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
+                if catalog_vm_moref_id:
+                    self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
+                    host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
+                    if catalog_vm_obj:
+                        #find existing disk
+                        disk_info = self.find_disk(catalog_vm_obj)
+                    else:
+                        exp_msg = "No VM with image id {} found".format(image_id)
+                        self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+        else:
+            exp_msg = "No Image found with image ID {} ".format(image_id)
+            self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+
+        if disk_info:
+            self.logger.info("Existing disk_info : {}".format(disk_info))
+            #get VM
+            vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+            host, vm_obj = self.get_vm_obj(content, vm_moref_id)
+            if vm_obj:
+                status = self.add_disk(vcenter_conect=vcenter_conect,
+                                       vm=vm_obj,
+                                       disk_info=disk_info,
+                                       size=size,
+                                       vapp_uuid=vapp_uuid
+                                       )
+            if status:
+                self.logger.info("Disk from image id {} added to {}".format(image_id,
+                                                                            vm_obj.config.name)
+                                 )
+        else:
+            msg = "No disk found with image id {} to add in VM {}".format(
+                                                            image_id,
+                                                            vm_obj.config.name)
+            self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
+
+
+    def find_disk(self, vm_obj):
+        """
+         Method to find details of existing disk in VM
+            Args :
+                vm_obj - vCenter object of VM
+                image_id - Catalog ID
+            Returns:
+                disk_info : dict of disk details
+        """
+        disk_info = {}
+        if vm_obj:
+            try:
+                devices = vm_obj.config.hardware.device
+                for device in devices:
+                    if type(device) is vim.vm.device.VirtualDisk:
+                        if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
+                            disk_info["full_path"] = device.backing.fileName
+                            disk_info["datastore"] = device.backing.datastore
+                            disk_info["capacityKB"] = device.capacityInKB
+                            break
+            except Exception as exp:
+                self.logger.error("find_disk() : exception occurred while "\
+                                  "getting existing disk details :{}".format(exp))
+        return disk_info
+
+
+    def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
+        """
+         Method to add existing disk in VM
+            Args :
+                vcenter_conect - vCenter content object
+                vm - vCenter vm object
+                disk_info : dict of disk details
+            Returns:
+                status : status of add disk task
+        """
+        datastore = disk_info["datastore"] if "datastore" in disk_info else None
+        fullpath = disk_info["full_path"] if "full_path" in disk_info else None
+        capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
+        if size is not None:
+            #Convert size from GB to KB
+            sizeKB = int(size) * 1024 * 1024
+            #compare size of existing disk and user given size.Assign whicherver is greater
+            self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
+                                                                    sizeKB, capacityKB))
+            if sizeKB > capacityKB:
+                capacityKB = sizeKB
+
+        if datastore and fullpath and capacityKB:
+            try:
+                spec = vim.vm.ConfigSpec()
+                # get all disks on a VM, set unit_number to the next available
+                unit_number = 0
+                for dev in vm.config.hardware.device:
+                    if hasattr(dev.backing, 'fileName'):
+                        unit_number = int(dev.unitNumber) + 1
+                        # unit_number 7 reserved for scsi controller
+                        if unit_number == 7:
+                            unit_number += 1
+                    if isinstance(dev, vim.vm.device.VirtualDisk):
+                        #vim.vm.device.VirtualSCSIController
+                        controller_key = dev.controllerKey
+
+                self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
+                                                                    unit_number, controller_key))
+                # add disk here
+                dev_changes = []
+                disk_spec = vim.vm.device.VirtualDeviceSpec()
+                disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+                disk_spec.device = vim.vm.device.VirtualDisk()
+                disk_spec.device.backing = \
+                    vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+                disk_spec.device.backing.thinProvisioned = True
+                disk_spec.device.backing.diskMode = 'persistent'
+                disk_spec.device.backing.datastore  = datastore
+                disk_spec.device.backing.fileName  = fullpath
+
+                disk_spec.device.unitNumber = unit_number
+                disk_spec.device.capacityInKB = capacityKB
+                disk_spec.device.controllerKey = controller_key
+                dev_changes.append(disk_spec)
+                spec.deviceChange = dev_changes
+                task = vm.ReconfigVM_Task(spec=spec)
+                status = self.wait_for_vcenter_task(task, vcenter_conect)
+                return status
+            except Exception as exp:
+                exp_msg = "add_disk() : exception {} occurred while adding disk "\
+                          "{} to vm {}".format(exp,
+                                               fullpath,
+                                               vm.config.name)
+                self.rollback_newvm(vapp_uuid, exp_msg)
+        else:
+            msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def get_vcenter_content(self):
+        """
+         Get the vsphere content object
+        """
+        try:
+            vm_vcenter_info = self.get_vm_vcenter_info()
+        except Exception as exp:
+            self.logger.error("Error occurred while getting vCenter infromationn"\
+                             " for VM : {}".format(exp))
+            raise vimconn.vimconnException(message=exp)
+
+        context = None
+        if hasattr(ssl, '_create_unverified_context'):
+            context = ssl._create_unverified_context()
+
+        vcenter_conect = SmartConnect(
+                    host=vm_vcenter_info["vm_vcenter_ip"],
+                    user=vm_vcenter_info["vm_vcenter_user"],
+                    pwd=vm_vcenter_info["vm_vcenter_password"],
+                    port=int(vm_vcenter_info["vm_vcenter_port"]),
+                    sslContext=context
+                )
+        atexit.register(Disconnect, vcenter_conect)
+        content = vcenter_conect.RetrieveContent()
+        return vcenter_conect, content
+
+
+    def get_vm_moref_id(self, vapp_uuid):
+        """
+        Get the moref_id of given VM
+        """
+        try:
+            if vapp_uuid:
+                vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
+                if vm_details and "vm_vcenter_info" in vm_details:
+                    vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
+            return vm_moref_id
+
+        except Exception as exp:
+            self.logger.error("Error occurred while getting VM moref ID "\
+                             " for VM : {}".format(exp))
+            return None
+
+
+    def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
+        """
+            Method to get vApp template details
+                Args :
+                    catalogs - list of VDC catalogs
+                    image_id - Catalog ID to find
+                    template_name : template name in catalog
+                Returns:
+                    parsed_respond : dict of vApp tempalte details
+        """
+        parsed_response = {}
+
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+
+        try:
+            org, vdc = self.get_vdc_details()
+            catalog = self.get_catalog_obj(image_id, catalogs)
+            if catalog:
+                items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
+                catalog_items = [items.attrib]
+
+                if len(catalog_items) == 1:
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+
+                    response = self.perform_request(req_type='GET',
+                                                    url=catalog_items[0].get('href'),
+                                                    headers=headers)
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    vapp_tempalte_href = entity.get("href")
+                    #get vapp details and parse moref id
+
+                    namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                                  'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                                  'vmw': 'http://www.vmware.com/schema/ovf',
+                                  'vm': 'http://www.vmware.com/vcloud/v1.5',
+                                  'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                                  'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
+                                  'xmlns':"http://www.vmware.com/vcloud/v1.5"
+                                }
+
+                    if vca._session:
+                        response = self.perform_request(req_type='GET',
+                                                    url=vapp_tempalte_href,
+                                                    headers=headers)
+
+                        if response.status_code != requests.codes.ok:
+                            self.logger.debug("REST API call {} failed. Return status code {}".format(
+                                                vapp_tempalte_href, response.status_code))
+
+                        else:
+                            xmlroot_respond = XmlElementTree.fromstring(response.content)
+                            children_section = xmlroot_respond.find('vm:Children/', namespaces)
+                            if children_section is not None:
+                                vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                            if vCloud_extension_section is not None:
+                                vm_vcenter_info = {}
+                                vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+                                vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                                if vmext is not None:
+                                    vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                                parsed_response["vm_vcenter_info"]= vm_vcenter_info
+
+        except Exception as exp :
+            self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+
+        return parsed_response
+
+
+    def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
+        """
+            Method to delete vApp
+                Args :
+                    vapp_uuid - vApp UUID
+                    msg - Error message to be logged
+                    exp_type : Exception type
+                Returns:
+                    None
+        """
+        if vapp_uuid:
+            status = self.delete_vminstance(vapp_uuid)
+        else:
+            msg = "No vApp ID"
+        self.logger.error(msg)
+        if exp_type == "Genric":
+            raise vimconn.vimconnException(msg)
+        elif exp_type == "NotFound":
+            raise vimconn.vimconnNotFoundException(message=msg)
+
+    def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
+        """
+            Method to attach SRIOV adapters to VM
+
+             Args:
+                vapp_uuid - uuid of vApp/VM
+                sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
+                vmname_andid - vmname
+
+            Returns:
+                The status of add SRIOV adapter task , vm object and
+                vcenter_conect object
+        """
+        vm_obj = None
+        vcenter_conect, content = self.get_vcenter_content()
+        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+
+        if vm_moref_id:
+            try:
+                no_of_sriov_devices = len(sriov_nets)
+                if no_of_sriov_devices > 0:
+                    #Get VM and its host
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                    self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+                    if host_obj and vm_obj:
+                        #get SRIOV devies from host on which vapp is currently installed
+                        avilable_sriov_devices = self.get_sriov_devices(host_obj,
+                                                                no_of_sriov_devices,
+                                                                )
+
+                        if len(avilable_sriov_devices) == 0:
+                            #find other hosts with active pci devices
+                            new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
+                                                                content,
+                                                                no_of_sriov_devices,
+                                                                )
+
+                            if new_host_obj is not None and len(avilable_sriov_devices)> 0:
+                                #Migrate vm to the host where SRIOV devices are available
+                                self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
+                                                                                    new_host_obj))
+                                task = self.relocate_vm(new_host_obj, vm_obj)
+                                if task is not None:
+                                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                                    self.logger.info("Migrate VM status: {}".format(result))
+                                    host_obj = new_host_obj
+                                else:
+                                    self.logger.info("Fail to migrate VM : {}".format(result))
+                                    raise vimconn.vimconnNotFoundException(
+                                    "Fail to migrate VM : {} to host {}".format(
+                                                    vmname_andid,
+                                                    new_host_obj)
+                                        )
+
+                        if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
+                            #Add SRIOV devices one by one
+                            for sriov_net in sriov_nets:
+                                network_name = sriov_net.get('net_id')
+                                dvs_portgr_name = self.create_dvPort_group(network_name)
+                                if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
+                                    #add vlan ID ,Modify portgroup for vlan ID
+                                    self.configure_vlanID(content, vcenter_conect, network_name)
+
+                                task = self.add_sriov_to_vm(content,
+                                                            vm_obj,
+                                                            host_obj,
+                                                            network_name,
+                                                            avilable_sriov_devices[0]
+                                                            )
+                                if task:
+                                    status= self.wait_for_vcenter_task(task, vcenter_conect)
+                                    if status:
+                                        self.logger.info("Added SRIOV {} to VM {}".format(
+                                                                        no_of_sriov_devices,
+                                                                        str(vm_obj)))
+                                else:
+                                    self.logger.error("Fail to add SRIOV {} to VM {}".format(
+                                                                        no_of_sriov_devices,
+                                                                        str(vm_obj)))
+                                    raise vimconn.vimconnUnexpectedResponse(
+                                    "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
+                                        )
+                            return True, vm_obj, vcenter_conect
+                        else:
+                            self.logger.error("Currently there is no host with"\
+                                              " {} number of avaialble SRIOV "\
+                                              "VFs required for VM {}".format(
+                                                                no_of_sriov_devices,
+                                                                vmname_andid)
+                                              )
+                            raise vimconn.vimconnNotFoundException(
+                                    "Currently there is no host with {} "\
+                                    "number of avaialble SRIOV devices required for VM {}".format(
+                                                                            no_of_sriov_devices,
+                                                                            vmname_andid))
+                else:
+                    self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
+
+            except vmodl.MethodFault as error:
+                self.logger.error("Error occurred while adding SRIOV {} ",error)
+        return None, vm_obj, vcenter_conect
+
+
+    def get_sriov_devices(self,host, no_of_vfs):
+        """
+            Method to get the details of SRIOV devices on given host
+             Args:
+                host - vSphere host object
+                no_of_vfs - number of VFs needed on host
+
+             Returns:
+                array of SRIOV devices
+        """
+        sriovInfo=[]
+        if host:
+            for device in host.config.pciPassthruInfo:
+                if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
+                    if device.numVirtualFunction >= no_of_vfs:
+                        sriovInfo.append(device)
+                        break
+        return sriovInfo
+
+
+    def get_host_and_sriov_devices(self, content, no_of_vfs):
+        """
+         Method to get the details of SRIOV devices infromation on all hosts
+
+            Args:
+                content - vSphere host object
+                no_of_vfs - number of pci VFs needed on host
+
+            Returns:
+                 array of SRIOV devices and host object
+        """
+        host_obj = None
+        sriov_device_objs = None
+        try:
+            if content:
+                container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                            [vim.HostSystem], True)
+                for host in container.view:
+                    devices = self.get_sriov_devices(host, no_of_vfs)
+                    if devices:
+                        host_obj = host
+                        sriov_device_objs = devices
+                        break
+        except Exception as exp:
+            self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
+
+        return host_obj,sriov_device_objs
+
+
+    def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
+        """
+         Method to add SRIOV adapter to vm
+
+            Args:
+                host_obj - vSphere host object
+                vm_obj - vSphere vm object
+                content - vCenter content object
+                network_name - name of distributed virtaul portgroup
+                sriov_device - SRIOV device info
+
+            Returns:
+                 task object
+        """
+        devices = []
+        vnic_label = "sriov nic"
+        try:
+            dvs_portgr = self.get_dvport_group(network_name)
+            network_name = dvs_portgr.name
+            nic = vim.vm.device.VirtualDeviceSpec()
+            # VM device
+            nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+            nic.device = vim.vm.device.VirtualSriovEthernetCard()
+            nic.device.addressType = 'assigned'
+            #nic.device.key = 13016
+            nic.device.deviceInfo = vim.Description()
+            nic.device.deviceInfo.label = vnic_label
+            nic.device.deviceInfo.summary = network_name
+            nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
+
+            nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
+            nic.device.backing.deviceName = network_name
+            nic.device.backing.useAutoDetect = False
+            nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+            nic.device.connectable.startConnected = True
+            nic.device.connectable.allowGuestControl = True
+
+            nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
+            nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+            nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
+
+            devices.append(nic)
+            vmconf = vim.vm.ConfigSpec(deviceChange=devices)
+            task = vm_obj.ReconfigVM_Task(vmconf)
+            return task
+        except Exception as exp:
+            self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
+            return None
+
+
+    def create_dvPort_group(self, network_name):
+        """
+         Method to create disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                portgroup key
+        """
+        try:
+            new_network_name = [network_name, '-', str(uuid.uuid4())]
+            network_name=''.join(new_network_name)
+            vcenter_conect, content = self.get_vcenter_content()
+
+            dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
+            if dv_switch:
+                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                dv_pg_spec.name = network_name
+
+                dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
+                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
+                dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
+                dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
+                dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
+
+                task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
+                self.wait_for_vcenter_task(task, vcenter_conect)
+
+                dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
+                if dvPort_group:
+                    self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
+                    return dvPort_group.key
+            else:
+                self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
+
+        except Exception as exp:
+            self.logger.error("Error occurred while creating disributed virtaul port group {}"\
+                             " : {}".format(network_name, exp))
+        return None
+
+    def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
+        """
+         Method to reconfigure disributed virtual portgroup
+
+            Args:
+                dvPort_group_name - name of disributed virtual portgroup
+                content - vCenter content object
+                config_info - disributed virtual portgroup configuration
+
+            Returns:
+                task object
+        """
+        try:
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                dv_pg_spec.configVersion = dvPort_group.config.configVersion
+                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                if "vlanID" in config_info:
+                    dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
+                    dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
+
+                task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
+                return task
+            else:
+                return None
+        except Exception as exp:
+            self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
+                             " : {}".format(dvPort_group_name, exp))
+            return None
+
+
+    def destroy_dvport_group(self , dvPort_group_name):
+        """
+         Method to destroy disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                True if portgroup successfully got deleted else false
+        """
+        vcenter_conect, content = self.get_vcenter_content()
+        try:
+            status = None
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                task = dvPort_group.Destroy_Task()
+                status = self.wait_for_vcenter_task(task, vcenter_conect)
+            return status
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
+                                                                    exp, dvPort_group_name))
+            return None
+
+
+    def get_dvport_group(self, dvPort_group_name):
+        """
+        Method to get disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                portgroup object
+        """
+        vcenter_conect, content = self.get_vcenter_content()
+        dvPort_group = None
+        try:
+            container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
+            for item in container.view:
+                if item.key == dvPort_group_name:
+                    dvPort_group = item
+                    break
+            return dvPort_group
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                            exp, dvPort_group_name))
+            return None
+
+    def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
+        """
+         Method to get disributed virtual portgroup vlanID
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                vlan ID
+        """
+        vlanId = None
+        try:
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                            exp, dvPort_group_name))
+        return vlanId
+
+
+    def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
+        """
+         Method to configure vlanID in disributed virtual portgroup vlanID
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                None
+        """
+        vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
+        if vlanID == 0:
+            #configure vlanID
+            vlanID = self.genrate_vlanID(dvPort_group_name)
+            config = {"vlanID":vlanID}
+            task = self.reconfig_portgroup(content, dvPort_group_name,
+                                    config_info=config)
+            if task:
+                status= self.wait_for_vcenter_task(task, vcenter_conect)
+                if status:
+                    self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
+                                                        dvPort_group_name,vlanID))
+            else:
+                self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
+                                        dvPort_group_name, vlanID))
+
+
+    def genrate_vlanID(self, network_name):
+        """
+         Method to get unused vlanID
+            Args:
+                network_name - name of network/portgroup
+            Returns:
+                vlanID
+        """
+        vlan_id = None
+        used_ids = []
+        if self.config.get('vlanID_range') == None:
+            raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
+                        "at config value before creating sriov network with vlan tag")
+        if "used_vlanIDs" not in self.persistent_info:
+                self.persistent_info["used_vlanIDs"] = {}
+        else:
+            used_ids = self.persistent_info["used_vlanIDs"].values()
+            #For python3
+            #used_ids = list(self.persistent_info["used_vlanIDs"].values())
+
+        for vlanID_range in self.config.get('vlanID_range'):
+            start_vlanid , end_vlanid = vlanID_range.split("-")
+            if start_vlanid > end_vlanid:
+                raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
+                                                                        vlanID_range))
+
+            for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
+            #For python3
+            #for id in range(int(start_vlanid), int(end_vlanid) + 1):
+                if id not in used_ids:
+                    vlan_id = id
+                    self.persistent_info["used_vlanIDs"][network_name] = vlan_id
+                    return vlan_id
+        if vlan_id is None:
+            raise vimconn.vimconnConflictException("All Vlan IDs are in use")
+
+
+    def get_obj(self, content, vimtype, name):
+        """
+         Get the vsphere object associated with a given text name
+        """
+        obj = None
+        container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
+        for item in container.view:
+            if item.name == name:
+                obj = item
+                break
+        return obj
+
+
+    def insert_media_to_vm(self, vapp, image_id):
+        """
+        Method to insert media CD-ROM (ISO image) from catalog to vm.
+        vapp - vapp object to get vm id
+        Image_id - image id for cdrom to be inerted to vm
+        """
+        # create connection object
+        vca = self.connect()
+        try:
+            # fetching catalog details
+            rest_url = "{}/api/catalog/{}".format(self.url, image_id)
+            if vca._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                                url=rest_url,
+                                                headers=headers)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                             "status code : {}".format(url_rest_call,
+                                                    response.content,
+                                               response.status_code))
+                raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
+                                                                    "catalog details")
+            # searching iso name and id
+            iso_name,media_id = self.get_media_details(vca, response.content)
+
+            if iso_name and media_id:
+                data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+                     <ns6:MediaInsertOrEjectParams
+                     xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" 
+                     xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" 
+                     xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" 
+                     xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" 
+                     xmlns:ns6="http://www.vmware.com/vcloud/v1.5" 
+                     xmlns:ns7="http://www.vmware.com/schema/ovf" 
+                     xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" 
+                     xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
+                     <ns6:Media
+                        type="application/vnd.vmware.vcloud.media+xml"
+                        name="{}"
+                        id="urn:vcloud:media:{}"
+                        href="https://{}/api/media/{}"/>
+                     </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
+                                                                self.url,media_id)
+
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
+
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
+                    rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
+
+                    response = self.perform_request(req_type='POST',
+                                                       url=rest_url,
+                                                          data=data,
+                                                    headers=headers)
+
+                    if response.status_code != 202:
+                        error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
+                                    "Status code {}".format(response.text, response.status_code)
+                        self.logger.error(error_msg)
+                        raise vimconn.vimconnException(error_msg)
+                    else:
+                        task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=task)
+                        if result.get('status') == 'success':
+                            self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
+                                                                    " image to vm {}".format(vm_id))
+
+        except Exception as exp:
+            self.logger.error("insert_media_to_vm() : exception occurred "\
+                                            "while inserting media CD-ROM")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def get_media_details(self, vca, content):
+        """
+        Method to get catalog item details
+        vca - connection object
+        content - Catalog details
+        Return - Media name, media id
+        """
+        cataloghref_list = []
+        try:
+            if content:
+                vm_list_xmlroot = XmlElementTree.fromstring(content)
+                for child in vm_list_xmlroot.iter():
+                    if 'CatalogItem' in child.tag:
+                        cataloghref_list.append(child.attrib.get('href'))
+                if cataloghref_list is not None:
+                    for href in cataloghref_list:
+                        if href:
+                            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                            response = self.perform_request(req_type='GET',
+                                                                  url=href,
+                                                           headers=headers)
+                            if response.status_code != 200:
+                                self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {}".format(href,
+                                                           response.content,
+                                                      response.status_code))
+                                raise vimconn.vimconnException("get_media_details : Failed to get "\
+                                                                         "catalogitem details")
+                            list_xmlroot = XmlElementTree.fromstring(response.content)
+                            for child in list_xmlroot.iter():
+                                if 'Entity' in child.tag:
+                                    if 'media' in child.attrib.get('href'):
+                                        name = child.attrib.get('name')
+                                        media_id = child.attrib.get('href').split('/').pop()
+                                        return name,media_id
+                            else:
+                                self.logger.debug("Media name and id not found")
+                                return False,False
+        except Exception as exp:
+            self.logger.error("get_media_details : exception occurred "\
+                                               "getting media details")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def retry_rest(self, method, url, add_headers=None, data=None):
+        """ Method to get Token & retry respective REST request
+            Args:
+                api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
+                url - request url to be used
+                add_headers - Additional headers (optional)
+                data - Request payload data to be passed in request
+            Returns:
+                response - Response of request
+        """
+        response = None
+
+        #Get token
+        self.get_token()
+
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+        if add_headers:
+            headers.update(add_headers)
+
+        if method == 'GET':
+            response = self.perform_request(req_type='GET',
+                                            url=url,
+                                            headers=headers)
+        elif method == 'PUT':
+            response = self.perform_request(req_type='PUT',
+                                            url=url,
+                                            headers=headers,
+                                            data=data)
+        elif method == 'POST':
+            response = self.perform_request(req_type='POST',
+                                            url=url,
+                                            headers=headers,
+                                            data=data)
+        elif method == 'DELETE':
+            response = self.perform_request(req_type='DELETE',
+                                            url=url,
+                                            headers=headers)
+        return response
+
+
+    def get_token(self):
+        """ Generate a new token if expired
+
+            Returns:
+                The return client object that letter can be used to connect to vCloud director as admin for VDC
+        """
+        try:
+            self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
+                                                                                      self.user,
+                                                                                      self.org_name))
+            host = self.url
+            client = Client(host, verify_ssl_certs=False)
+            client.set_highest_supported_version()
+            client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
+            # connection object
+            self.client = client
+
+        except:
+            raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
+                                                     "{} as user: {}".format(self.org_name, self.user))
+
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
+
+
+    def get_vdc_details(self):
+        """ Get VDC details using pyVcloud Lib
+
+            Returns org and vdc object
+        """
+        vdc = None
+        try:
+            org = Org(self.client, resource=self.client.get_org())
+            vdc = org.get_vdc(self.tenant_name)
+        except Exception as e:
+            # pyvcloud not giving a specific exception, Refresh nevertheless
+            self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
+
+        #Retry once, if failed by refreshing token
+        if vdc is None:
+            self.get_token()
+            org = Org(self.client, resource=self.client.get_org())
+            vdc = org.get_vdc(self.tenant_name)
+
+        return org, vdc
+
+
+    def perform_request(self, req_type, url, headers=None, data=None):
+        """Perform the POST/PUT/GET/DELETE request."""
+
+        #Log REST request details
+        self.log_request(req_type, url=url, headers=headers, data=data)
+        # perform request and return its result
+        if req_type == 'GET':
+            response = requests.get(url=url,
+                                headers=headers,
+                                verify=False)
+        elif req_type == 'PUT':
+            response = requests.put(url=url,
+                                headers=headers,
+                                data=data,
+                                verify=False)
+        elif req_type == 'POST':
+            response = requests.post(url=url,
+                                 headers=headers,
+                                 data=data,
+                                 verify=False)
+        elif req_type == 'DELETE':
+            response = requests.delete(url=url,
+                                 headers=headers,
+                                 verify=False)
+        #Log the REST response
+        self.log_response(response)
+
+        return response
+
+
+    def log_request(self, req_type, url=None, headers=None, data=None):
+        """Logs REST request details"""
+
+        if req_type is not None:
+            self.logger.debug("Request type: {}".format(req_type))
+
+        if url is not None:
+            self.logger.debug("Request url: {}".format(url))
+
+        if headers is not None:
+            for header in headers:
+                self.logger.debug("Request header: {}: {}".format(header, headers[header]))
+
+        if data is not None:
+            self.logger.debug("Request data: {}".format(data))
+
+
+    def log_response(self, response):
+        """Logs REST response details"""
+
+        self.logger.debug("Response status code: {} ".format(response.status_code))
+
+
+    def get_task_from_response(self, content):
+        """
+        content - API response content(response.content)
+        return task object
+        """
+        xmlroot = XmlElementTree.fromstring(content)
+        if xmlroot.tag.split('}')[1] == "Task":
+            return xmlroot
+        else:
+            for ele in xmlroot:
+                if ele.tag.split("}")[1] == "Tasks":
+                    task = ele[0]
+                    break
+            return task
+
+
+    def power_on_vapp(self,vapp_id, vapp_name):
+        """
+        vapp_id - vApp uuid
+        vapp_name - vAapp name
+        return - Task object
+        """
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+        poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
+                                                                          vapp_id)
+        response = self.perform_request(req_type='POST',
+                                       url=poweron_href,
+                                        headers=headers)
+
+        if response.status_code != 202:
+            self.logger.error("REST call {} failed reason : {}"\
+                         "status code : {} ".format(poweron_href,
+                                                response.content,
+                                           response.status_code))
+            raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
+                                                      "vApp {}".format(vapp_name))
+        else:
+            poweron_task = self.get_task_from_response(response.content)
+            return poweron_task
+
+