feature 8029 change RO to python3. Using vim plugins

Change-Id: I1e7bf61db9c39c66e0233c81bd8b4caa6650d389
Signed-off-by: tierno <alfonso.tiernosepulveda@telefonica.com>
diff --git a/RO-VIM-vmware/Makefile b/RO-VIM-vmware/Makefile
new file mode 100644
index 0000000..283afdf
--- /dev/null
+++ b/RO-VIM-vmware/Makefile
@@ -0,0 +1,26 @@
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+all: clean package
+
+clean:
+	rm -rf dist deb_dist osm_rovim_vmware-*.tar.gz osm_rovim_vmware.egg-info .eggs
+
+package:
+	python3 setup.py --command-packages=stdeb.command sdist_dsc
+	cp debian/python3-osm-rovim-vmware.postinst deb_dist/osm-rovim-vmware*/debian/
+	cd deb_dist/osm-rovim-vmware*/ && dpkg-buildpackage -rfakeroot -uc -us
+
diff --git a/RO-VIM-vmware/debian/python3-osm-rovim-vmware.postinst b/RO-VIM-vmware/debian/python3-osm-rovim-vmware.postinst
new file mode 100755
index 0000000..e7ce877
--- /dev/null
+++ b/RO-VIM-vmware/debian/python3-osm-rovim-vmware.postinst
@@ -0,0 +1,29 @@
+#!/bin/bash
+
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact with: OSM_TECH@list.etsi.org
+##
+
+echo "POST INSTALL OSM-ROVIM-VMWARE"
+
+#Pip packages required for vmware connector
+python3 -m pip install --upgrade pip
+python3 -m pip install --upgrade pyvcloud==19.1.1
+python3 -m pip install --upgrade progressbar
+python3 -m pip install --upgrade pyvmomi
+# python3 -m pip install --upgrade prettytable
+# python3 -m pip install --upgrade pyang pyangbind
diff --git a/RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware.py b/RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware.py
new file mode 100755
index 0000000..f2ab68c
--- /dev/null
+++ b/RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware.py
@@ -0,0 +1,980 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2016-2017 VMware Inc.
+# This file is part of ETSI OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact:  osslegalrouting@vmware.com
+##
+
+
+from vimconn_vmware import vimconnector
+from osm_ro.vimconn import vimconnUnexpectedResponse,vimconnNotFoundException,vimconnException
+from pyvcloud.vcd.client import Client
+from lxml import etree as lxmlElementTree
+from pyvcloud.vcd.org import Org
+from pyvcloud.vcd.vdc import VDC
+from pyvcloud.vcd.vapp import VApp
+import os
+import unittest
+import mock
+import test_vimconn_vmware_xml_response as xml_resp
+from os import path
+
+__author__ = "Prakash Kasar"
+
+class TestVimconn_VMware(unittest.TestCase):
+    def setUp(self):
+        config = { "admin_password": "admin",
+                  "admin_username":"user",
+                  "nsx_user": "nsx",
+                  "nsx_password": "nsx",
+                  "nsx_manager":"https://test-nsx" }
+
+        self.client = Client('test', verify_ssl_certs=False)
+
+        # get vcd org object
+        org_resp = xml_resp.org_xml_response
+        get_org = lxmlElementTree.fromstring(org_resp)
+        self.org = Org(self.client, resource=get_org)
+
+        self.vim = vimconnector(uuid='12354',
+                                 name='test',
+                         tenant_id='abc1234',
+                          tenant_name='test',
+                          url='https://test',
+                               config=config)
+
+
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'perform_request')
+    def test_get_network_not_found(self, perform_request, connect, get_vdc_details):
+        """
+        Testcase to get network with invalid network id
+        """
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        self.vim.client = self.vim.connect()
+        perform_request.return_value.status_code = 200
+        perform_request.return_value.content = xml_resp.vdc_xml_response
+
+        # call to VIM connector method with invalid id
+        self.assertRaises(vimconnNotFoundException,self.vim.get_network,'mgmt-net')
+
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    def test_get_network(self, connect, get_vdc_details, perform_request):
+        """
+        Testcase to get network with valid network id
+        """
+        net_id = '5c04dc6d-6096-47c6-b72b-68f19013d491'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        self.vim.client = self.vim.connect()
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.vdc_xml_response),
+                                       mock.Mock(status_code = 200,
+                                       content = xml_resp.network_xml_response)]
+        # call to VIM connector method with network_id
+        result = self.vim.get_network(net_id)
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(net_id, result['id'])
+
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    def test_get_network_list_not_found(self, connect, get_vdc_details, perform_request):
+        """
+        Testcase to get list of available networks by invalid network id
+        """
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        network_xml_resp = xml_resp.network_xml_response
+        # created vdc object
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        self.vim.client = self.vim.connect()
+        perform_request.return_value.status_code = 200
+        perform_request.return_value.content = network_xml_resp
+
+        # call to VIM connector method with network_id
+        result = self.vim.get_network_list({'id':'45hdfg-345nb-345'})
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(list(), result)
+
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    def test_get_network_list(self, connect, get_vdc_details, perform_request):
+        """
+        Testcase to get list of available networks by valid network id
+        """
+        #import pdb;pdb.set_trace() ## Not working
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        net_id = '5c04dc6d-6096-47c6-b72b-68f19013d491'
+        # created vdc object
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # created network object
+        network_xml_resp = xml_resp.network_xml_response
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        self.vim.client = self.vim.connect()
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.vdc_xml_response),
+                                       mock.Mock(status_code = 200,
+                                       content = network_xml_resp)]
+        perform_request.reset_mock()
+        perform_request()
+
+        # call to VIM connector method with network_id
+        result = self.vim.get_network_list({'id': net_id})
+
+        # assert verified expected and return result from VIM connector
+        for item in result:
+            self.assertEqual(item.get('id'), net_id)
+            self.assertEqual(item.get('status'), 'ACTIVE')
+            self.assertEqual(item.get('shared'), False)
+
+    @mock.patch.object(vimconnector,'create_network_rest')
+    def test_new_network(self, create_network_rest):
+        """
+        Testcase to create new network by passing network name and type
+        """
+        # create network reposnse
+        create_net_xml_resp = xml_resp.create_network_xml_response
+        net_name = 'Test_network'
+        net_type = 'bridge'
+        # assumed return value from VIM connector
+        create_network_rest.return_value = create_net_xml_resp
+        # call to VIM connector method with network name and type
+        result = self.vim.new_network(net_name, net_type)
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, 'df1956fa-da04-419e-a6a2-427b6f83788f')
+
+    @mock.patch.object(vimconnector, 'create_network_rest')
+    def test_new_network_not_created(self, create_network_rest):
+        """
+        Testcase to create new network by assigning empty xml data
+        """
+        # assumed return value from VIM connector
+        create_network_rest.return_value = """<?xml version="1.0" encoding="UTF-8"?>
+                                              <OrgVdcNetwork></OrgVdcNetwork>"""
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnUnexpectedResponse,self.vim.new_network,
+                                                              'test_net',
+                                                                'bridge')
+
+    @mock.patch.object(vimconnector, 'connect')
+    @mock.patch.object(vimconnector, 'get_network_action')
+    @mock.patch.object(vimconnector, 'delete_network_action')
+    def test_delete_network(self, delete_network_action, get_network_action, connect):
+        """
+        Testcase to delete network by network id
+        """
+        net_uuid = '0a55e5d1-43a2-4688-bc92-cb304046bf87'
+        # delete network response
+        delete_net_xml_resp = xml_resp.delete_network_xml_response
+
+        # assumed return value from VIM connector
+        self.vim.client = self.vim.connect()
+        get_network_action.return_value = delete_net_xml_resp
+        delete_network_action.return_value = True
+        # call to VIM connector method with network_id
+        result = self.vim.delete_network(net_uuid)
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, net_uuid)
+
+    @mock.patch.object(vimconnector, 'get_vcd_network')
+    def test_delete_network_not_found(self, get_vcd_network):
+        """
+        Testcase to delete network by invalid network id
+        """
+        # assumed return value from VIM connector
+        get_vcd_network.return_value = False
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnNotFoundException,self.vim.delete_network,
+                                    '2a23e5d1-42a2-0648-bc92-cb508046bf87')
+
+    def test_get_flavor(self):
+        """
+        Testcase to get flavor data
+        """
+        flavor_data = {'a646eb8a-95bd-4e81-8321-5413ee72b62e': {'disk': 10,
+                                                                'vcpus': 1,
+                                                               'ram': 1024}}
+        vimconnector.flavorlist = flavor_data
+        result = self.vim.get_flavor('a646eb8a-95bd-4e81-8321-5413ee72b62e')
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, flavor_data['a646eb8a-95bd-4e81-8321-5413ee72b62e'])
+
+    def test_get_flavor_not_found(self):
+        """
+        Testcase to get flavor data with invalid id
+        """
+        vimconnector.flavorlist = {}
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnNotFoundException,self.vim.get_flavor,
+                                'a646eb8a-95bd-4e81-8321-5413ee72b62e')
+
+    def test_new_flavor(self):
+        """
+        Testcase to create new flavor data
+        """
+        flavor_data = {'disk': 10, 'vcpus': 1, 'ram': 1024}
+        result = self.vim.new_flavor(flavor_data)
+        # assert verified expected and return result from VIM connector
+        self.assertIsNotNone(result)
+
+    def test_delete_flavor(self):
+        """
+        Testcase to delete flavor data
+        """
+        flavor_data = {'2cb3dffb-5c51-4355-8406-28553ead28ac': {'disk': 10,
+                                                                'vcpus': 1,
+                                                               'ram': 1024}}
+        vimconnector.flavorlist = flavor_data
+        # return value from VIM connector
+        result = self.vim.delete_flavor('2cb3dffb-5c51-4355-8406-28553ead28ac')
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, '2cb3dffb-5c51-4355-8406-28553ead28ac')
+
+    @mock.patch.object(vimconnector,'connect_as_admin')
+    @mock.patch.object(vimconnector,'perform_request')
+    def test_delete_image_not_found(self, perform_request, connect_as_admin):
+        """
+        Testcase to delete image by invalid image id
+        """
+        # creating conn object
+        self.vim.client = self.vim.connect_as_admin()
+
+        # assumed return value from VIM connector
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.delete_catalog_xml_response),
+                                       mock.Mock(status_code = 201,
+                                       content = xml_resp.delete_catalog_item_xml_response)
+                                       ]
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnNotFoundException, self.vim.delete_image, 'invali3453')
+
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(Org,'list_catalogs')
+    def test_get_image_list(self, list_catalogs, connect, get_vdc_details):
+        """
+        Testcase to get image list by valid image id
+        """
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        self.vim.client = self.vim.connect()
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '1', 'creationDate': '2018-02-15T02:16:58.300-08:00', 'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a', 'name': 'cirros034'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'true', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2018-01-26T02:09:12.387-08:00', 'id': 'b139ed82-7ca4-49fb-9882-5f841f59c890', 'name': 'Ubuntu_plugtest-1'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org2', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-06-18T21:33:16.430-07:00', 'id': 'b31e6973-86d2-404b-a522-b16846d099dc', 'name': 'Ubuntu_Cat'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '0', 'creationDate': '2018-02-15T22:26:28.910-08:00', 'id': 'c3b56180-f980-4256-9109-a93168d73ff2', 'name': 'de4ffcf2ad21f1a5d0714d6b868e2645'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-08-23T05:54:56.780-07:00', 'id': 'd0eb0b02-718d-42e0-b889-56575000b52d', 'name': 'Test_Cirros'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-03-08T21:25:05.923-08:00', 'id': 'd3fa3df2-b311-4571-9138-4c66541d7f46', 'name': 'cirros_10'}, {'isShared': 'false', 'numberOfVAppTemplates': '0', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-07-12T22:45:20.537-07:00', 'id': 'd64b2617-ea4b-4b90-910b-102c99dd2031', 'name': 'Ubuntu16'}, {'isShared': 'true', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'true', 'ownerName': 'system', 'numberOfMedia': '1', 'creationDate': '2017-10-14T23:52:37.260-07:00', 'id': 'e8d953db-8dc9-46d5-9cab-329774cd2ad9', 'name': 'Ubuntu_no_nic'}]
+
+        result = self.vim.get_image_list({'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a'})
+
+        # assert verified expected and return result from VIM connector
+        for item in result:
+            self.assertEqual(item['id'], '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a')
+
+    @mock.patch.object(vimconnector,'get_vapp_details_rest')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    def test_get_vminstance(self, get_vdc_details, get_vapp_details_rest):
+        """
+        Testcase to get vminstance by valid vm id
+        """
+        vapp_info = {'status': '4',
+                   'acquireMksTicket': {'href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireMksTicket',
+                   'type': 'application/vnd.vmware.vcloud.mksTicket+xml', 'rel': 'screen:acquireMksTicket'},
+                   'vm_virtual_hardware': {'disk_edit_href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/disks', 'disk_size': '40960'},
+                   'name': 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa',
+                   'created': '2017-09-21T01:15:31.627-07:00',
+                    'IsEnabled': 'true',
+                   'EndAddress': '12.16.24.199',
+                   'interfaces': [{'MACAddress': '00:50:56:01:12:a2',
+                                   'NetworkConnectionIndex': '0',
+                                   'network': 'testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d',
+                                   'IpAddressAllocationMode': 'DHCP',
+                                   'IsConnected': 'true',
+                                   'IpAddress': '12.16.24.200'}],
+                   'ovfDescriptorUploaded': 'true',
+                   'nestedHypervisorEnabled': 'false',
+                   'Gateway': '12.16.24.1',
+                   'acquireTicket': {'href': 'https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireTicket',
+                   'rel': 'screen:acquireTicket'},
+                   'vmuuid': '47d12505-5968-4e16-95a7-18743edb0c8b',
+                   'Netmask': '255.255.255.0',
+                   'StartAddress': '12.16.24.100',
+                   'primarynetwork': '0',
+                   'networkname': 'External-Network-1074',
+                   'IsInherited': 'false',
+                   'deployed': 'true'} 
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        get_vapp_details_rest.return_value = vapp_info
+
+        result = self.vim.get_vminstance('47d12505-5968-4e16-95a7-18743edb0c8b')
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result['status'], 'ACTIVE')
+        self.assertEqual(result['hostId'], '47d12505-5968-4e16-95a7-18743edb0c8b')
+
+
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'get_namebyvappid')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(VDC,'get_vapp')
+    @mock.patch.object(VApp,'power_off')
+    @mock.patch.object(VApp,'undeploy')
+    @mock.patch.object(VDC,'delete_vapp')
+    @mock.patch.object(Client,'get_task_monitor')
+    def test_delete_vminstance(self, get_task_monitor, delete_vapp,
+                                               undeploy, poweroff,
+                                         get_vapp, get_vdc_details,
+                                        get_namebyvappid, connect):
+        """
+        Testcase to delete vminstance by valid vm id
+        """
+        vm_id = '4f6a9b49-e92d-4935-87a1-0e4dc9c3a069'
+        vm_name = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        # assumed return value from VIM connector
+        self.vim.client = self.vim.connect()
+        get_vdc_details.return_value = self.org, vdc
+        get_namebyvappid.return_name = vm_name
+
+        vapp_resp = xml_resp.vapp_xml_response
+        vapp = lxmlElementTree.fromstring(vapp_resp)
+        get_vapp.return_value = vapp
+
+        power_off_resp = xml_resp.poweroff_task_xml
+        power_off = lxmlElementTree.fromstring(power_off_resp)
+        poweroff.return_value = power_off
+
+        status_resp = xml_resp.status_task_xml
+        status = lxmlElementTree.fromstring(status_resp)
+        self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
+
+        # call to VIM connector method
+        result = self.vim.delete_vminstance(vm_id)
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, vm_id)
+
+    @mock.patch.object(vimconnector,'get_network_id_by_name')
+    @mock.patch.object(vimconnector,'get_vm_pci_details')
+    @mock.patch.object(VDC,'get_vapp')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'get_namebyvappid')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(VApp,'get_all_vms')
+    def test_refresh_vms_status(self, get_all_vms, perform_request, get_vdc_details,
+                                                          get_namebyvappid, connect,
+                                                       get_vapp, get_vm_pci_details,
+                                                            get_network_id_by_name):
+        """
+        Testcase to refresh vms status by valid vm id
+        """
+        vm_id = '53a529b2-10d8-4d56-a7ad-8182acdbe71c'
+
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # assumed return value from VIM connector
+        self.vim.client = self.vim.connect()
+        get_vdc_details.return_value = self.org, vdc
+
+        get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+        get_vm_pci_details.return_value = {'host_name': 'test-esx-1.corp.local', 'host_ip': '12.19.24.31'}
+        vapp_resp = xml_resp.vapp_xml_response
+        vapp = lxmlElementTree.fromstring(vapp_resp)
+        get_vapp.return_value = vapp
+        get_network_id_by_name.return_value = '47d12505-5968-4e16-95a7-18743edb0c8b'
+
+        vm_resp = xml_resp.vm_xml_response
+        vm_list = lxmlElementTree.fromstring(vm_resp)
+        get_all_vms.return_value = vm_list
+
+        perform_request.return_value.status_code = 200
+        perform_request.return_value.content = vm_resp
+        # call to VIM connector method
+        result = self.vim.refresh_vms_status([vm_id])
+        for attr in result[vm_id]:
+            if attr == 'status':
+                # assert verified expected and return result from VIM connector
+                self.assertEqual(result[vm_id][attr], 'ACTIVE')
+
+    @mock.patch.object(vimconnector,'get_vcd_network')
+    def test_refresh_nets_status(self, get_vcd_network):
+        net_id = 'c2d0f28f-d38b-4588-aecc-88af3d4af58b'
+        network_dict = {'status': '1','isShared': 'false','IpScope': '',
+                        'EndAddress':'12.19.21.15',
+                        'name': 'testing_gwyRXlvWYL1-9ebb6d7b-5c74-472f-be77-963ed050d44d',
+                        'Dns1': '12.19.21.10', 'IpRanges': '',
+                        'Gateway': '12.19.21.23', 'Netmask': '255.255.255.0',
+                        'RetainNetInfoAcrossDeployments': 'false',
+                        'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local',
+                        'StartAddress': '12.19.21.11', 'IpRange': '',
+                        'Configuration': '', 'FenceMode': 'bridged',
+                        'IsInherited': 'true', 'uuid': 'c2d0f28f-d38b-4588-aecc-88af3d4af58b'}
+        # assumed return value from VIM connector
+        get_vcd_network.return_value = network_dict
+        result = self.vim.refresh_nets_status([net_id])
+        # assert verified expected and return result from VIM connector
+        for attr in result[net_id]:
+            if attr == 'status':
+                self.assertEqual(result[net_id][attr], 'ACTIVE')
+
+    @mock.patch.object(VDC,'get_vapp')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'get_namebyvappid')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    def test_action_vminstance(self, get_vdc_details, get_namebyvappid,
+                                                               connect,
+                                                             get_vapp):
+        """
+        Testcase for action vm instance by vm id
+        """
+        task_resp = xml_resp.poweroff_task_xml
+        vm_id = '05e6047b-6938-4275-8940-22d1ea7245b8'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+        self.vim.client = self.vim.connect()
+        power_off_resp = xml_resp.poweroff_task_xml
+        power_off = lxmlElementTree.fromstring(power_off_resp)
+        get_vapp.return_value.undeploy.return_value = power_off
+
+        status_resp = xml_resp.status_task_xml
+        status = lxmlElementTree.fromstring(status_resp)
+        self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
+
+        # call to VIM connector method
+        result = self.vim.action_vminstance(vm_id,{'shutdown': None})
+
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(result, vm_id)
+
+    @mock.patch.object(vimconnector,'get_org')
+    def test_get_tenant_list(self, get_org):
+        """
+        Test case for get tenant list
+        """
+        org_dict = {'catalogs': {'4c4fdb5d-0c7d-4fee-9efd-cb061f327a01': '80d8488f67ba1de98b7f485fba6abbd2', '1b98ca02-b0a6-4ca7-babe-eadc0ae59677': 'Ubuntu', 'e7f27dfe-14b7-49e1-918e-173bda02683a': '834bdd1f28fd15dcbe830456ec58fbca', '9441ee69-0486-4438-ac62-8d8082c51302': 'centos', 'e660cce0-47a6-4315-a5b9-97a39299a374': 'cirros01', '0fd96c61-c3d1-4abf-9a34-0dff8fb65743': 'cirros034', '1c703be3-9bd2-46a2-854c-3e678d5cdda8': 'Ubuntu_plugtest-1', 'bc4e342b-f84c-41bd-a93a-480f35bacf69': 'Cirros', '8a206fb5-3ef9-4571-9bcc-137615f4d930': '255eb079a62ac155e7f942489f14b0c4'}, 'vdcs': {'e6436c6a-d922-4b39-9c1c-b48e766fce5e': 'osm', '3852f762-18ae-4833-a229-42684b6e7373': 'cloud-1-vdc'}, 'networks': {'e203cacd-9320-4422-9be0-12c7def3ab56': 'testing_lNejr37B-38e4ca67-1e26-486f-ad2f-f14bb099e068', 'a6623349-2bef-4367-9fda-d33f9ab927f8': 'Vlan_3151', 'adf780cb-358c-47c2-858d-ae5778ccaf17': 'testing_xwBultc-99b8a2ae-c091-4dd3-bbf7-762a51612385', '721f9efc-11fe-4c13-936d-252ba0ed93c8': 'testing_tLljy8WB5e-a898cb28-e75b-4867-a22e-f2bad285c144', '1512d97a-929d-4b06-b8af-cf5ac42a2aee': 'Managment', 'd9167301-28af-4b89-b9e0-09f612e962fa': 'testing_prMW1VThk-063cb428-eaee-44b8-9d0d-df5fb77a5b4d', '004ae853-f899-43fd-8981-7513a3b40d6b': 'testing_RTtKVi09rld-fab00b16-7996-49af-8249-369c6bbfa02d'}}
+        tenant_name = 'osm'
+        get_org.return_value = org_dict
+
+        # call to VIM connector method
+        results = self.vim.get_tenant_list({'name' : tenant_name})
+        # assert verified expected and return result from VIM connector
+        for result in results:
+            self.assertEqual(tenant_name,result['name'])
+
+    @mock.patch.object(vimconnector,'get_org')
+    def test_get_tenant_list_negative(self, get_org):
+        """
+        Test case for get tenant list negative
+        """
+        org_dict = {'vdcs': {}}
+        tenant_name = 'testosm'
+        get_org.return_value = org_dict
+
+        # call to VIM connector method
+        results = self.vim.get_tenant_list({'name' : tenant_name})
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(results, [])
+
+    @mock.patch.object(vimconnector,'create_vdc')
+    def test_new_tenant(self, create_vdc):
+        """
+        Test case for create new tenant
+        """
+        tenant_name = 'test'
+        vdc = {'a493aa2c-3104-4d63-969b-fc9e72304c9f': 'https://localhost/api/task/e658d84c-007d-4fd8-9590-3a8f93cc0de4'}
+        create_vdc.return_value = vdc
+
+        # call to VIM connector method
+        result = self.vim.new_tenant(tenant_name)
+        # assert verified expected and return result from VIM connector
+        self.assertEqual('a493aa2c-3104-4d63-969b-fc9e72304c9f', result)
+
+    @mock.patch.object(vimconnector,'create_vdc')
+    def test_new_tenant_negative(self, create_vdc):
+        """
+        Test case for create new tenant
+        """
+        tenant_name = 'test'
+        create_vdc.return_value = None
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnException,self.vim.new_tenant,tenant_name)
+
+    @mock.patch.object(vimconnector,'connect_as_admin')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'perform_request')
+    def test_delete_tenant(self, perform_request, connect, connect_as_admin):
+        """
+        Test case to delete tenant
+        """
+        tenant_id = '753227f5-d6c6-4478-9546-acc5cfff21e9'
+        delete_tenant_resp = xml_resp.delete_tenant
+
+        self.vim.client = self.vim.connect()
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = delete_tenant_resp),
+                                       mock.Mock(status_code = 202,
+                                       content = None)
+                                       ]
+
+        # call to VIM connector method
+        result = self.vim.delete_tenant(tenant_id)
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(tenant_id, result)
+
+    @mock.patch.object(vimconnector,'connect_as_admin')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'perform_request')
+    def test_delete_tenant_negative(self, perform_request, connect, connect_as_admin):
+        """
+        Test case to delete tenant
+        """
+        tenant_id = 'ten45klsjdf'
+
+        self.vim.client = self.vim.connect()
+        perform_request.return_value.status_code = 201
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnNotFoundException,self.vim.delete_tenant,tenant_id)
+
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vcd_network')
+    @mock.patch.object(Org,'get_vdc')
+    @mock.patch.object(Org,'get_catalog_item')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(Client,'get_task_monitor')
+    @mock.patch.object(VDC,'get_vapp')
+    @mock.patch.object(vimconnector,'get_network_list')
+    @mock.patch.object(vimconnector,'power_on_vapp')
+    def test_new_vminstance(self, power_on, get_network_list, get_vapp,
+                            get_task_monitor, perform_request, connect,
+                            get_catalog_item, get_vdc, get_vcd_network,
+                                       list_catalogs, get_vdc_details):
+        """
+        Test case for new vm instance
+        """
+        image_id = '34925a30-0f4a-4018-9759-0d6799063b51'
+        vimconnector.flavorlist = {'123347db-536b-4936-8b62-1fcdc721865d': {'vcpus': 1,
+                                                                            'disk': 10,
+                                                                            'ram': 1024}}
+
+        flavor_id = '123347db-536b-4936-8b62-1fcdc721865d'
+        net_list = [{'use': 'bridge', 'name': 'eth0', 'floating_ip': False, 'vpci': '0000:00:11.0', 'port_security': True, 'type': 'virtual', 'net_id': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}]
+
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}]
+
+        network_dict = {'status': '1', 'isShared': 'false', 'IpScope': '', 'EndAddress': '192.169.241.150', 'name': 'testing_6n5mJwUyx-ad9d62fc-8223-4dbe-88c4-9f16458ebeec', 'Dns1': '192.169.241.102', 'IpRanges': '', 'Gateway': '192.169.241.253', 'Netmask': '255.255.255.0', 'RetainNetInfoAcrossDeployments': 'false', 'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local', 'StartAddress': '192.169.241.115', 'IpRange': '', 'Configuration': '', 'FenceMode': 'bridged', 'IsInherited': 'true', 'uuid': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}
+
+        network_list = [{'status': 'ACTIVE', 'name': 'default', 'admin_state_up': True, 'shared': False, 'tenant_id': '2584137f-6541-4c04-a2a2-e56bfca14c69', 'type': 'bridge', 'id': '1fd6421e-929a-4576-bc19-a0c48aea1969'}]
+
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog_list = lxmlElementTree.fromstring(xml_resp.catalog_list_xml)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        get_vcd_network.return_value = network_dict
+        get_vdc.return_value = vdc
+        get_catalog_item.return_value = catalog_list
+        self.vim.client = self.vim.connect()
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.catalogItem_xml),
+                                       mock.Mock(status_code = 200,
+                                       content = xml_resp.vapp_template_xml),
+                                       mock.Mock(status_code = 201,
+                                       content = xml_resp.deployed_vapp_xml)]
+
+        status_resp = xml_resp.status_task_xml
+        status = lxmlElementTree.fromstring(status_resp)
+        self.vim.connect.return_value.get_task_monitor.return_value.wait_for_success.return_value = status
+        vapp_resp = xml_resp.vapp_xml_response
+        vapp = lxmlElementTree.fromstring(vapp_resp)
+        get_vapp.return_value = vapp
+        get_network_list.return_value = network_list
+        power_on_resp = xml_resp.poweroff_task_xml
+        poweron = lxmlElementTree.fromstring(power_on_resp)
+        power_on.return_value = poweron
+
+        # call to VIM connector method
+        result = self.vim.new_vminstance(name='Test1_vm', image_id=image_id,
+                                                        flavor_id=flavor_id,
+                                                          net_list=net_list)
+        # assert verified expected and return result from VIM connector
+        self.assertIsNotNone(result)
+
+
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vcd_network')
+    @mock.patch.object(Org,'get_vdc')
+    @mock.patch.object(Org,'get_catalog_item')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'perform_request')
+    @mock.patch.object(Client,'get_task_monitor')
+    @mock.patch.object(VDC,'get_vapp')
+    @mock.patch.object(vimconnector,'get_network_list')
+    @mock.patch.object(vimconnector,'power_on_vapp')
+    def test_new_vminstance_negative(self, power_on, get_network_list, get_vapp,
+                            get_task_monitor, perform_request, connect,
+                            get_catalog_item, get_vdc, get_vcd_network,
+                                       list_catalogs, get_vdc_details):
+        """
+        Test case for new vm instance
+        """
+        image_id = '34925a30-0f4a-4018-9759-0d6799063b51'
+        vimconnector.flavorlist = {'123347db-536b-4936-8b62-1fcdc721865d': {'vcpus': 1,
+                                                                            'disk': 10,
+                                                                            'ram': 1024}}
+        flavor_id = '123347db-536b-4936-8b62-1fcdc721865d'
+        net_list = [{'use': 'bridge', 'name': 'eth0', 'floating_ip': False, 'vpci': '0000:00:11.0', 'port_security': True, 'type': 'virtual', 'net_id': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}]
+
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}]
+
+        network_dict = {'status': '1', 'isShared': 'false', 'IpScope': '', 'EndAddress': '192.169.241.150', 'name': 'testing_6n5mJwUyx-ad9d62fc-8223-4dbe-88c4-9f16458ebeec', 'Dns1': '192.169.241.102', 'IpRanges': '', 'Gateway': '192.169.241.253', 'Netmask': '255.255.255.0', 'RetainNetInfoAcrossDeployments': 'false', 'IpScopes': '', 'IsEnabled': 'true', 'DnsSuffix': 'corp.local', 'StartAddress': '192.169.241.115', 'IpRange': '', 'Configuration': '', 'FenceMode': 'bridged', 'IsInherited': 'true', 'uuid': '69c713cb-3eec-452c-9a32-0e95c8ffe567'}
+
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog_list = lxmlElementTree.fromstring(xml_resp.catalog_list_xml)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        get_vcd_network.return_value = network_dict
+        get_vdc.return_value = vdc
+        get_catalog_item.return_value = catalog_list
+        self.vim.client = self.vim.connect()
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.catalogItem_xml),
+                                       mock.Mock(status_code = 200,
+                                       content = xml_resp.vapp_template_xml),
+                                       mock.Mock(status_code = 400,
+                                       content = "Bad request error")]
+
+        # call to VIM connector method
+        self.assertRaises(vimconnUnexpectedResponse,self.vim.new_vminstance,
+                                                                 name='Test1_vm',
+                                                                 image_id=image_id,
+                                                                 flavor_id=flavor_id,
+                                                                 net_list=net_list)
+
+    @mock.patch.object(vimconnector,'get_catalogid')
+    @mock.patch.object(vimconnector,'upload_vimimage')
+    @mock.patch.object(Org,'create_catalog')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(path,'isfile')
+    @mock.patch.object(os,'access')
+    def test_new_image(self, access, isfile,
+                              get_vdc_details,
+                                list_catalogs,
+                               create_catalog,
+                               upload_vimimage,
+                                get_catalogid):
+        """
+        Test case for create new image
+        """
+        path = '/tmp/cirros/cirros.ovf'
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '9759-0d6799063b51', 'name': 'cirros'}]
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
+
+        # assumed return value from VIM connector
+        isfile.return_value = True
+        access.return_value = True
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        create_catalog.return_value = catalog
+        upload_vimimage.return_value = True
+        get_catalogid.return_value = '9759-0d6799063b51'
+        result = self.vim.new_image({'name': 'TestImage', 'location' : path})
+
+        # assert verified expected and return result from VIM connector
+        self.assertIsNotNone(result)
+
+    @mock.patch.object(vimconnector,'get_catalogid')
+    @mock.patch.object(vimconnector,'upload_vimimage')
+    @mock.patch.object(Org,'create_catalog')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    def test_new_image_negative(self, get_vdc_details, list_catalogs,
+                                              create_catalog,
+                                              upload_vimimage,
+                                              get_catalogid):
+        """
+        Test case for create new image with negative scenario
+        """
+        path = '/tmp/cirros/cirros.ovf'
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org1', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'test'}]
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        create_catalog.return_value = catalog
+        upload_vimimage.return_value = False
+        get_catalogid.return_value = '34925a30-0f4a-4018-9759-0d6799063b51'
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnException,self.vim.new_image,{'name':'TestImage', 'location':path})
+
+    @mock.patch.object(vimconnector,'connect_as_admin')
+    @mock.patch.object(vimconnector,'perform_request')
+    def test_delete_image(self, perform_request, connect_as_admin):
+        """
+        Testcase to delete image by image id
+        """
+        image_id = 'f3bf3733-465b-419f-b675-52f91d18edbb'
+        # creating conn object
+        self.vim.client = self.vim.connect_as_admin()
+
+        # assumed return value from VIM connector
+        perform_request.side_effect = [mock.Mock(status_code = 200,
+                                       content = xml_resp.delete_catalog_xml_response),
+                                       mock.Mock(status_code = 200,
+                                       content = xml_resp.delete_catalog_item_xml_response),
+                                       mock.Mock(status_code = 204,
+                                       content = ''),
+                                       mock.Mock(status_code = 204,
+                                       content = '')
+                                       ]
+
+        # call to vim connctor method
+        result = self.vim.delete_image(image_id)
+        # assert verified expected and return result from VIM connector
+        self.assertEqual(image_id, result)
+
+    @mock.patch.object(vimconnector,'get_catalogid')
+    @mock.patch.object(vimconnector,'upload_vimimage')
+    @mock.patch.object(Org,'create_catalog')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(path,'isfile')
+    @mock.patch.object(os,'access')
+    def test_get_image_id_from_path(self, access, isfile,
+                                              get_vdc_details,
+                                              list_catalogs,
+                                              create_catalog,
+                                              upload_vimimage,
+                                              get_catalogid):
+        """
+        Test case to get image id from image path
+        """
+        path = '/tmp/ubuntu/ubuntu.ovf'
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '7208-0f6777052c30', 'name': 'ubuntu'}]
+
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
+
+        # assumed return value from VIM connector
+        isfile.return_value = True
+        access.return_value = True
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        create_catalog.return_value = catalog
+        upload_vimimage.return_value = True
+        get_catalogid.return_value = '7208-0f6777052c30'
+        result = self.vim.get_image_id_from_path(path=path)
+
+        # assert verified expected and return result from VIM connector
+        self.assertIsNotNone(result)
+
+    @mock.patch.object(vimconnector,'get_catalogid')
+    @mock.patch.object(vimconnector,'upload_vimimage')
+    @mock.patch.object(Org,'create_catalog')
+    @mock.patch.object(Org,'list_catalogs')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(path,'isfile')
+    @mock.patch.object(os,'access')
+    def test_get_image_id_from_path_negative(self, access, isfile,
+                                              get_vdc_details,
+                                              list_catalogs,
+                                              create_catalog,
+                                              upload_vimimage,
+                                              get_catalogid):
+        """
+        Test case to get image id from image path with negative scenario
+        """
+        path = '/tmp/ubuntu/ubuntu.ovf'
+        cat_list = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '7208-0f6777052c30', 'name': 'ubuntu'}]
+
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        catalog = lxmlElementTree.fromstring(xml_resp.catalog1_xml_response)
+
+        # assumed return value from VIM connector
+        isfile.return_value = True
+        access.return_value = True
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = cat_list
+        create_catalog.return_value = catalog
+        upload_vimimage.return_value = False
+        get_catalogid.return_value = '7208-0f6777052c30'
+        self.assertRaises(vimconnException, self.vim.get_image_id_from_path, path)
+
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(Org,'list_catalogs')
+    def test_get_image_list_negative(self, list_catalogs, connect, get_vdc_details):
+        """
+        Testcase to get image list by invalid image id
+        """
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        self.vim.client = self.vim.connect()
+
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        list_catalogs.return_value = [{'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'system', 'numberOfMedia': '0', 'creationDate': '2017-10-15T02:03:59.403-07:00', 'id': '34925a30-0f4a-4018-9759-0d6799063b51', 'name': 'Ubuntu_1nic'}, {'isShared': 'false', 'numberOfVAppTemplates': '1', 'orgName': 'Org3', 'isPublished': 'false', 'ownerName': 'orgadmin', 'numberOfMedia': '1', 'creationDate': '2018-02-15T02:16:58.300-08:00', 'id': '4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a', 'name': 'cirros034'}]
+
+        # call to vim connector method with invalid image id
+        self.vim.get_image_list({'id': 'b46c-3f35ba45ca4a'})
+
+    @mock.patch.object(vimconnector,'get_vapp_details_rest')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    def test_get_vminstance_negative(self, get_vdc_details, get_vapp_details_rest):
+        """
+        Testcase to get vminstance by invalid vm id
+        """
+
+        invalid_vmid = '18743edb0c8b-sdfsf-fg'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        get_vapp_details_rest.return_value = False
+
+        # assert verified expected and return result from VIM connector
+        self.assertRaises(vimconnNotFoundException, self.vim.get_vminstance,invalid_vmid)
+
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'get_namebyvappid')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    @mock.patch.object(VDC,'get_vapp')
+    def test_delete_vminstance_negative(self, get_vapp, get_vdc_details,
+                                             get_namebyvappid, connect):
+        """
+        Testcase to delete vminstance by invalid vm id
+        """
+        vm_id = 'sdfrtt4935-87a1-0e4dc9c3a069'
+        vm_name = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+
+        # assumed return value from VIM connector
+        self.vim.client = self.vim.connect()
+        get_vdc_details.return_value = self.org, vdc
+        get_namebyvappid.return_name = vm_name
+
+        get_vapp.return_value = None
+
+        # call to VIM connector method
+        self.assertRaises(vimconnException, self.vim.delete_vminstance,vm_id)
+
+    @mock.patch.object(vimconnector,'get_vcd_network')
+    def test_refresh_nets_status_negative(self, get_vcd_network):
+        """
+        Testcase for refresh nets status by invalid vm id
+        """
+        net_id = 'sjkldf-456mfd-345'
+
+        # assumed return value from VIM connector
+        get_vcd_network.return_value = None
+        result = self.vim.refresh_nets_status([net_id])
+
+        # assert verified expected and return result from VIM connector
+        for attr in result[net_id]:
+            if attr == 'status':
+                self.assertEqual(result[net_id][attr], 'DELETED')
+
+    @mock.patch.object(vimconnector,'connect')
+    @mock.patch.object(vimconnector,'get_namebyvappid')
+    @mock.patch.object(vimconnector,'get_vdc_details')
+    def test_action_vminstance_negative(self, get_vdc_details,
+                                             get_namebyvappid,
+                                                     connect):
+        """
+        Testcase for action vm instance by invalid action
+        """
+        vm_id = '8413-4cb8-bad7-b5afaec6f9fa'
+        # created vdc object
+        vdc_xml_resp = xml_resp.vdc_xml_response
+        vdc = lxmlElementTree.fromstring(vdc_xml_resp)
+        # assumed return value from VIM connector
+        get_vdc_details.return_value = self.org, vdc
+        get_namebyvappid.return_value = 'Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa'
+        self.vim.client = self.vim.connect()
+
+        # call to VIM connector method
+        self.assertRaises(vimconnException, self.vim.action_vminstance, vm_id,{'invalid': None})
diff --git a/RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware_xml_response.py b/RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware_xml_response.py
new file mode 100644
index 0000000..968cb1f
--- /dev/null
+++ b/RO-VIM-vmware/osm_rovim_vmware/tests/test_vimconn_vmware_xml_response.py
@@ -0,0 +1,637 @@
+# -*- coding: utf-8 -*-

+

+##

+# Copyright 2016-2017 VMware Inc.

+# This file is part of ETSI OSM

+# All Rights Reserved.

+#

+# Licensed under the Apache License, Version 2.0 (the "License"); you may

+# not use this file except in compliance with the License. You may obtain

+# a copy of the License at

+#

+#         http://www.apache.org/licenses/LICENSE-2.0

+#

+# Unless required by applicable law or agreed to in writing, software

+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT

+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the

+# License for the specific language governing permissions and limitations

+# under the License.

+#

+# For those usages not covered by the Apache License, Version 2.0 please

+# contact:  osslegalrouting@vmware.com

+##

+

+vdc_xml_response = """<?xml version="1.0" encoding="UTF-8"?>

+        <Vdc xmlns="http://www.vmware.com/vcloud/v1.5" status="1" name="Org3-VDC-PVDC1" id="urn:vcloud:vdc:2584137f-6541-4c04-a2a2-e56bfca14c69" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+		<Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>

+		<Link rel="down" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+		<Link rel="edit" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>

+		<Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/uploadVAppTemplate" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>

+		<Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/media" type="application/vnd.vmware.vcloud.media+xml"/>

+		<Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/instantiateOvf" type="application/vnd.vmware.vcloud.instantiateOvfParams+xml"/>

+		<Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/instantiateVAppTemplate" type="application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"/>

+		<Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/cloneVApp" type="application/vnd.vmware.vcloud.cloneVAppParams+xml"/>

+		<Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/cloneVAppTemplate" type="application/vnd.vmware.vcloud.cloneVAppTemplateParams+xml"/>

+		<Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/cloneMedia" type="application/vnd.vmware.vcloud.cloneMediaParams+xml"/>

+		<Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>

+		<Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/action/composeVApp" type="application/vnd.vmware.vcloud.composeVAppParams+xml"/>

+		<Link rel="add" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/disk" type="application/vnd.vmware.vcloud.diskCreateParams+xml"/>

+		<Link rel="edgeGateways" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/edgeGateways" type="application/vnd.vmware.vcloud.query.records+xml"/>

+		<Link rel="add" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/networks" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml"/>

+		<Link rel="orgVdcNetworks" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69/networks" type="application/vnd.vmware.vcloud.query.records+xml"/>

+		<Link rel="alternate" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.admin.vdc+xml"/>

+		<Description>Org3-VDC-PVDC1</Description>

+		<AllocationModel>AllocationVApp</AllocationModel>

+		<ComputeCapacity>

+		<Cpu>

+		<Units>MHz</Units>

+		<Allocated>0</Allocated>

+		<Limit>0</Limit>

+		<Reserved>0</Reserved>

+		<Used>2000</Used>

+		<Overhead>0</Overhead>

+		</Cpu>

+		<Memory>

+		<Units>MB</Units>

+		<Allocated>0</Allocated>

+		<Limit>0</Limit>

+		<Reserved>0</Reserved>

+		<Used>2048</Used>

+		<Overhead>71</Overhead>

+		</Memory>

+		</ComputeCapacity>

+		<ResourceEntities>

+		<ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-2999a787-ca96-4d1c-8b7c-9d0a8bd14bce" name="cirros" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>

+        <ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-324649a3-d263-4446-aace-4e2c801a85bd" name="cirros_10" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>

+		<ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-8ea35d43-0c72-4267-bac9-42e4a5248c32" name="Test_Cirros" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>

+		<ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-9bf292a2-58c4-4d4b-995b-623e88b74226" name="Ubuntu-vm" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>

+		<ResourceEntity href="https://localhost/api/vAppTemplate/vappTemplate-be93140e-da0d-4b8c-8ab4-06d132bf47c0" name="Ubuntu16" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>

+		<ResourceEntity href="https://localhost/api/vApp/vapp-0da5344d-4d65-4362-bac6-e8524c97edb1" name="Inst10.linux1.a-e9f75c31-eadf-4b48-9a5e-d957314530d7" type="application/vnd.vmware.vcloud.vApp+xml"/>

+		<ResourceEntity href="https://localhost/api/vApp/vapp-3e0df975-1380-4544-9f25-0683f9eb41f0" name="Inst12.linux1.a-93854e6d-d87c-4f0a-ba10-eaf59d7555bf" type="application/vnd.vmware.vcloud.vApp+xml"/>

+		<ResourceEntity href="https://localhost/api/vApp/vapp-6f5848b8-5498-4854-a35e-45cb25b8fdb0" name="Inst11.linux1.a-5ca666e8-e077-4268-aff2-99960af28eb5" type="application/vnd.vmware.vcloud.vApp+xml"/>

+		<ResourceEntity href="https://localhost/api/vApp/vapp-76510a06-c949-4bea-baad-629daaccb84a" name="cirros_nsd.cirros_vnfd__1.a-a9c957c4-29a5-4559-a630-00ae028592f7" type="application/vnd.vmware.vcloud.vApp+xml"/>

+		</ResourceEntities><AvailableNetworks><Network href="https://localhost/api/network/1627b438-68bf-44be-800c-8f48029761f6" name="default-17c27654-2a45-4713-a799-94cb91de2610" type="application/vnd.vmware.vcloud.network+xml"/>

+		<Network href="https://localhost/api/network/190e9e04-a904-412b-877e-92d8e8699abd" name="cirros_nsd.cirros_nsd_vld1-86c861a9-d985-4e31-9c20-21de1e8a619d" type="application/vnd.vmware.vcloud.network+xml"/>

+		<Network href="https://localhost/api/network/3838c23e-cb0e-492f-a91f-f3352918ff8b" name="cirros_nsd.cirros_nsd_vld1-75ce0375-b2e6-4b7f-b821-5b395276bcd8" type="application/vnd.vmware.vcloud.network+xml"/>

+		<Network href="https://localhost/api/network/5aca5c32-c0a2-4e1b-980e-8fd906a49f4e" name="default-60a54140-66dd-4806-8ca3-069d34530478" type="application/vnd.vmware.vcloud.network+xml"/>

+		<Network href="https://localhost/api/network/de854aa2-0b77-4ace-a696-85494a3dc3c4" name="default-971acee6-0298-4085-b107-7601bc8c8712" type="application/vnd.vmware.vcloud.network+xml"/>

+		</AvailableNetworks>

+		<Capabilities>

+		<SupportedHardwareVersions>

+		<SupportedHardwareVersion>vmx-04</SupportedHardwareVersion>

+		<SupportedHardwareVersion>vmx-07</SupportedHardwareVersion>

+		<SupportedHardwareVersion>vmx-08</SupportedHardwareVersion>

+		<SupportedHardwareVersion>vmx-09</SupportedHardwareVersion>

+		<SupportedHardwareVersion>vmx-10</SupportedHardwareVersion>

+		<SupportedHardwareVersion>vmx-11</SupportedHardwareVersion>

+		</SupportedHardwareVersions>

+		</Capabilities>

+		<NicQuota>0</NicQuota>

+		<NetworkQuota>1000</NetworkQuota>

+		<UsedNetworkCount>0</UsedNetworkCount>

+		<VmQuota>0</VmQuota>

+		<IsEnabled>true</IsEnabled>

+		<VdcStorageProfiles>

+		<VdcStorageProfile href="https://localhost/api/vdcStorageProfile/3b82941c-11ed-407e-ada0-42d282fcd425" name="NFS Storage Policy" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>

+		<VdcStorageProfile href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>

+		</VdcStorageProfiles>

+        <VCpuInMhz2>1000</VCpuInMhz2>

+        </Vdc>"""

+

+network_xml_response = """<?xml version="1.0" encoding="UTF-8"?>

+             <OrgVdcNetwork xmlns="http://www.vmware.com/vcloud/v1.5" status="1" name="testing_6XXftDTroat1-03b18565-de01-4154-af51-8dbea42f0d84" id="urn:vcloud:network:5c04dc6d-6096-47c6-b72b-68f19013d491" href="https://localhost/api/network/5c04dc6d-6096-47c6-b72b-68f19013d491" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+             <Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>

+             <Link rel="down" href="https://localhost/api/network/5c04dc6d-6096-47c6-b72b-68f19013d491/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+             <Link rel="down" href="https://localhost/api/network/5c04dc6d-6096-47c6-b72b-68f19013d491/allocatedAddresses/" type="application/vnd.vmware.vcloud.allocatedNetworkAddress+xml"/>

+             <Description>Openmano created</Description>

+             <Configuration>

+             <IpScopes>

+             <IpScope>

+             <IsInherited>true</IsInherited>

+             <Gateway>12.169.24.23</Gateway>

+             <Netmask>255.255.255.0</Netmask>

+             <Dns1>12.169.24.102</Dns1>

+             <DnsSuffix>corp.local</DnsSuffix>

+             <IsEnabled>true</IsEnabled>

+             <IpRanges>

+             <IpRange>

+             <StartAddress>12.169.24.115</StartAddress>

+             <EndAddress>12.169.241.150</EndAddress>

+             </IpRange>

+             </IpRanges>

+             </IpScope>

+             </IpScopes>

+             <FenceMode>bridged</FenceMode>

+             <RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments>

+             </Configuration>

+             <IsShared>false</IsShared>

+             </OrgVdcNetwork>"""

+

+delete_network_xml_response = """<?xml version="1.0" encoding="UTF-8"?>

+            <OrgVdcNetwork xmlns="http://www.vmware.com/vcloud/v1.5" status="1" name="testing_negjXxdlB-7fdcf9f3-de32-4ae6-b9f9-fb725a80a74f" id="urn:vcloud:network:0a55e5d1-43a2-4688-bc92-cb304046bf87" href="https://localhost/api/network/0a55e5d1-43a2-4688-bc92-cb304046bf87" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+			<Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>

+			<Link rel="down" href="https://localhost/api/network/0a55e5d1-43a2-4688-bc92-cb304046bf87/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+			<Link rel="down" href="https://localhost/api/network/0a55e5d1-43a2-4688-bc92-cb304046bf87/allocatedAddresses/"  type="application/vnd.vmware.vcloud.allocatedNetworkAddress+xml"/>

+			<Description>Openmano created</Description>

+			<Configuration>

+			<IpScopes>

+			<IpScope>

+			<IsInherited>true</IsInherited>

+			<Gateway>12.169.24.23</Gateway>

+			<Netmask>255.255.255.0</Netmask>

+			<Dns1>12.169.24.102</Dns1>

+			<DnsSuffix>corp.local</DnsSuffix>

+			<IsEnabled>true</IsEnabled>

+			<IpRanges>

+			<IpRange>

+			<StartAddress>12.169.241.115</StartAddress>

+			<EndAddress>12.169.241.150</EndAddress>

+			</IpRange></IpRanges></IpScope>

+			</IpScopes>

+			<FenceMode>bridged</FenceMode>

+			<RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments>

+			</Configuration>

+			<IsShared>false</IsShared>

+			</OrgVdcNetwork>"""

+

+create_network_xml_response = """<?xml version="1.0" encoding="UTF-8"?>

+            <OrgVdcNetwork xmlns="http://www.vmware.com/vcloud/v1.5" name="Test_network-25cb63aa-30e9-4de5-be76-1d6e00a2781a" id="urn:vcloud:network:df1956fa-da04-419e-a6a2-427b6f83788f" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+            <Link rel="edit" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml"/>

+            <Link rel="remove" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f"/>

+            <Link rel="repair" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f/action/reset"/>

+            <Link rel="up" href="https://localhost/api/admin/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.admin.vdc+xml"/>

+            <Link rel="down" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+            <Link rel="down" href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f/allocatedAddresses/" type="application/vnd.vmware.vcloud.allocatedNetworkAddress+xml"/>

+            <Description>Openmano created</Description>

+            <Tasks>

+                  <Task cancelRequested="false" expiryTime="2017-12-14T02:00:39.865-08:00" operation="Creating Network Test_network-25cb63aa-30e9-4de5-be76-1d6e00a2781a(df1956fa-da04-419e-a6a2-427b6f83788f)" operationName="networkCreateOrgVdcNetwork" serviceNamespace="com.vmware.vcloud" startTime="2017-09-15T02:00:39.865-07:00" status="queued" name="task" id="urn:vcloud:task:0600f592-42ce-4d58-85c0-212c569ba6e6" href="https://localhost/api/task/0600f592-42ce-4d58-85c0-212c569ba6e6" type="application/vnd.vmware.vcloud.task+xml">

+                  <Owner href="https://localhost/api/admin/network/df1956fa-da04-419e-a6a2-427b6f83788f" name="Test_network-25cb63aa-30e9-4de5-be76-1d6e00a2781a" type="application/vnd.vmware.admin.network+xml"/>

+                  <User href="https://localhost/api/admin/user/f49f28e0-7172-4b17-aaee-d171ce2b60da" name="administrator" type="application/vnd.vmware.admin.user+xml"/>

+                  <Organization href="https://localhost/api/org/a93c9db9-7471-3192-8d09-a8f7eeda85f9" name="System" type="application/vnd.vmware.vcloud.org+xml"/>

+                  <Details/>

+                  </Task>

+            </Tasks>

+            <Configuration>

+            <IpScopes><IpScope>

+            <IsInherited>false</IsInherited>

+            <Gateway>12.16.113.1</Gateway>

+            <Netmask>255.255.255.0</Netmask>

+            <Dns1>12.16.113.2</Dns1>

+            <IsEnabled>true</IsEnabled>

+            <IpRanges><IpRange>

+            <StartAddress>12.168.113.3</StartAddress>

+            <EndAddress>12.168.113.52</EndAddress>

+            </IpRange></IpRanges>

+            </IpScope></IpScopes>

+            <ParentNetwork href="https://localhost/api/admin/network/19b01b42-c862-4d0f-bcbf-d053e7396fc0" name="" type="application/vnd.vmware.admin.network+xml"/>

+            <FenceMode>bridged</FenceMode>

+            <RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments>

+            </Configuration><IsShared>false</IsShared>

+            </OrgVdcNetwork>"""

+

+catalog1_xml_response = """<?xml version="1.0" encoding="UTF-8"?>

+<Catalog xmlns="http://www.vmware.com/vcloud/v1.5" name="Ubuntu-vm" id="urn:vcloud:catalog:d0a11b12-780e-4681-babb-2b1fd6693f62" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62" type="application/vnd.vmware.vcloud.catalog+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+<Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/catalogItems" type="application/vnd.vmware.vcloud.catalogItem+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/upload" type="application/vnd.vmware.vcloud.media+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/upload" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>

+<Link rel="copy" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/copy" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>

+<Link rel="move" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/move" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/catalog/d0a11b12-780e-4681-babb-2b1fd6693f62/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/> <Description>Ubuntu-vm</Description>

+<CatalogItems><CatalogItem href="https://localhost/api/catalogItem/04fc0041-8e40-4e37-b072-7dba3e1c6a30" id="04fc0041-8e40-4e37-b072-7dba3e1c6a30" name="Ubuntu-vm" type="application/vnd.vmware.vcloud.catalogItem+xml"/></CatalogItems><IsPublished>false</IsPublished><DateCreated>2017-03-17T03:17:11.293-07:00</DateCreated><VersionNumber>5</VersionNumber>

+</Catalog>"""

+

+catalog2_xml_response = """<?xml version="1.0" encoding="UTF-8"?>

+<Catalog xmlns="http://www.vmware.com/vcloud/v1.5" name="cirros" id="urn:vcloud:catalog:32ccb082-4a65-41f6-bcd6-38942e8a3829" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829" type="application/vnd.vmware.vcloud.catalog+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+<Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/catalogItems" type="application/vnd.vmware.vcloud.catalogItem+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/upload" type="application/vnd.vmware.vcloud.media+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/upload" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>

+<Link rel="copy" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/copy" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>

+<Link rel="move" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/move" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/catalog/32ccb082-4a65-41f6-bcd6-38942e8a3829/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/> <Description>cirros</Description>

+<CatalogItems><CatalogItem href="https://localhost/api/catalogItem/98316d41-e38c-40c2-ac28-5462e8aada8c" id="98316d41-e38c-40c2-ac28-5462e8aada8c" name="cirros" type="application/vnd.vmware.vcloud.catalogItem+xml"/></CatalogItems><IsPublished>false</IsPublished><DateCreated>2017-03-08T02:06:07.003-08:00</DateCreated><VersionNumber>5</VersionNumber>

+</Catalog>"""

+

+vapp_xml_response = """<?xml version="1.0" encoding="UTF-8"?>

+<VApp xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" ovfDescriptorUploaded="true" deployed="true" status="4" name="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" id="urn:vcloud:vapp:4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" type="application/vnd.vmware.vcloud.vApp+xml" xsi:schemaLocation="http://schemas.dmtf.org/ovf/envelope/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8023_1.1.0.xsd http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/schema/ovf http://www.vmware.com/schema/ovf http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_ResourceAllocationSettingData.xsd http://schemas.dmtf.org/ovf/environment/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8027_1.1.0.xsd http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_VirtualSystemSettingData.xsd">

+<Link rel="power:powerOff" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/powerOff"/>

+<Link rel="power:reboot" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/reboot"/>

+<Link rel="power:reset" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/reset"/>

+<Link rel="power:shutdown" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/shutdown"/>

+<Link rel="power:suspend" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/power/action/suspend"/>

+<Link rel="deploy" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/deploy" type="application/vnd.vmware.vcloud.deployVAppParams+xml"/>

+<Link rel="undeploy" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/undeploy" type="application/vnd.vmware.vcloud.undeployVAppParams+xml"/>

+<Link rel="down" href="https://localhost/api/network/9489a59a-0339-4151-9667-f5b90296c36d" name="External-Network-1074" type="application/vnd.vmware.vcloud.vAppNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/379f083b-4057-4724-a128-ed5bc6672591" name="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d" type="application/vnd.vmware.vcloud.vAppNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>

+<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" type="application/vnd.vmware.vcloud.vApp+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/owner" type="application/vnd.vmware.vcloud.owner+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+<Link rel="ovf" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/ovf" type="text/xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>

+<Link rel="snapshot:create" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/action/createSnapshot" type="application/vnd.vmware.vcloud.createSnapshotParams+xml"/>

+<LeaseSettingsSection href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml" ovf:required="false">

+<ovf:Info>Lease settings section</ovf:Info>

+<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml"/> <DeploymentLeaseInSeconds>0</DeploymentLeaseInSeconds><StorageLeaseInSeconds>7776000</StorageLeaseInSeconds></LeaseSettingsSection>

+<ovf:StartupSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.startupSection+xml" vcloud:href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/startupSection/"><ovf:Info>VApp startup section</ovf:Info>

+<ovf:Item ovf:id="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" ovf:order="0" ovf:startAction="powerOn" ovf:startDelay="0" ovf:stopAction="powerOff" ovf:stopDelay="0"/>

+<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/startupSection/" type="application/vnd.vmware.vcloud.startupSection+xml"/> </ovf:StartupSection><ovf:NetworkSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.networkSection+xml" vcloud:href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/networkSection/"><ovf:Info>The list of logical networks</ovf:Info>

+<ovf:Network ovf:name="External-Network-1074"><ovf:Description>External-Network-1074</ovf:Description></ovf:Network>

+<ovf:Network ovf:name="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d"><ovf:Description/></ovf:Network></ovf:NetworkSection>

+<NetworkConfigSection href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/networkConfigSection/" type="application/vnd.vmware.vcloud.networkConfigSection+xml" ovf:required="false"><ovf:Info>The configuration parameters for logical networks</ovf:Info>

+<Link rel="edit" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/networkConfigSection/"   type="application/vnd.vmware.vcloud.networkConfigSection+xml"/><NetworkConfig networkName="External-Network-1074"><Link rel="repair" href="https://localhost/api/admin/network/9489a59a-0339-4151-9667-f5b90296c36d/action/reset"/>

+<Description>External-Network-1074</Description><Configuration><IpScopes><IpScope><IsInherited>false</IsInherited><Gateway>192.168.254.1</Gateway><Netmask>255.255.255.0</Netmask>

+<IsEnabled>true</IsEnabled><IpRanges><IpRange><StartAddress>192.168.254.100</StartAddress><EndAddress>192.168.254.199</EndAddress></IpRange></IpRanges></IpScope></IpScopes>

+<FenceMode>isolated</FenceMode><RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments></Configuration><IsDeployed>true</IsDeployed></NetworkConfig>

+<NetworkConfig networkName="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d">

+<Link rel="repair" href="https://localhost/api/admin/network/379f083b-4057-4724-a128-ed5bc6672591/action/reset"/><Description/><Configuration><IpScopes><IpScope><IsInherited>true</IsInherited>

+<Gateway>192.169.241.253</Gateway><Netmask>255.255.255.0</Netmask><Dns1>192.169.241.102</Dns1><DnsSuffix>corp.local</DnsSuffix><IsEnabled>true</IsEnabled><IpRanges><IpRange>

+<StartAddress>192.169.241.115</StartAddress><EndAddress>192.169.241.150</EndAddress></IpRange></IpRanges></IpScope></IpScopes>

+<ParentNetwork href="https://localhost/api/admin/network/d4307ff7-0e34-4d41-aab0-4c231a045088" id="d4307ff7-0e34-4d41-aab0-4c231a045088" name="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d"/><FenceMode>bridged</FenceMode><RetainNetInfoAcrossDeployments>false</RetainNetInfoAcrossDeployments></Configuration>

+<IsDeployed>true</IsDeployed></NetworkConfig></NetworkConfigSection><SnapshotSection href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069/snapshotSection" type="application/vnd.vmware.vcloud.snapshotSection+xml" ovf:required="false"><ovf:Info>Snapshot information section</ovf:Info></SnapshotSection><DateCreated>2017-09-21T01:15:31.627-07:00</DateCreated><Owner type="application/vnd.vmware.vcloud.owner+xml">

+<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>

+</Owner><InMaintenanceMode>false</InMaintenanceMode><Children>

+<Vm needsCustomization="false" nestedHypervisorEnabled="false" deployed="true" status="4" name="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" id="urn:vcloud:vm:47d12505-5968-4e16-95a7-18743edb0c8b" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b" type="application/vnd.vmware.vcloud.vm+xml">

+<Link rel="power:powerOff" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/powerOff"/>

+<Link rel="power:reboot" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/reboot"/>

+<Link rel="power:reset" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/reset"/>

+<Link rel="power:shutdown" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/shutdown"/>

+<Link rel="power:suspend" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/power/action/suspend"/>

+<Link rel="undeploy" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/undeploy" type="application/vnd.vmware.vcloud.undeployVAppParams+xml"/>

+<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b" type="application/vnd.vmware.vcloud.vm+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>

+<Link rel="metrics" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>

+<Link rel="metrics" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>

+<Link rel="screen:thumbnail" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen"/>

+<Link rel="screen:acquireTicket" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireTicket"/>

+<Link rel="screen:acquireMksTicket" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/screen/action/acquireMksTicket" type="application/vnd.vmware.vcloud.mksTicket+xml"/>

+<Link rel="media:insertMedia" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/media/action/insertMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>

+<Link rel="media:ejectMedia" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/media/action/ejectMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>

+<Link rel="disk:attach" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/disk/action/attach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>

+<Link rel="disk:detach" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/disk/action/detach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>

+<Link rel="installVmwareTools" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/installVMwareTools"/>

+<Link rel="customizeAtNextPowerOn" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/customizeAtNextPowerOn"/>

+<Link rel="snapshot:create" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/createSnapshot" type="application/vnd.vmware.vcloud.createSnapshotParams+xml"/>

+<Link rel="reconfigureVm" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/action/reconfigureVm" name="Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa" type="application/vnd.vmware.vcloud.vm+xml"/>

+<Link rel="up" href="https://localhost/api/vApp/vapp-4f6a9b49-e92d-4935-87a1-0e4dc9c3a069" type="application/vnd.vmware.vcloud.vApp+xml"/><Description>Ubuntu-vm</Description>  <ovf:VirtualHardwareSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:transport="" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/"><ovf:Info>Virtual hardware requirements</ovf:Info><ovf:System><vssd:ElementName>Virtual Hardware Family</vssd:ElementName><vssd:InstanceID>0</vssd:InstanceID>    <vssd:VirtualSystemIdentifier>Test1_vm-69a18104-8413-4cb8-bad7-b5afaec6f9fa</vssd:VirtualSystemIdentifier><vssd:VirtualSystemType>vmx-11</vssd:VirtualSystemType></ovf:System><ovf:Item>    <rasd:Address>00:50:56:01:12:a2</rasd:Address><rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>    <rasd:Connection vcloud:ipAddressingMode="DHCP" vcloud:ipAddress="12.19.21.20" vcloud:primaryNetworkConnection="true">testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d</rasd:Connection>    <rasd:Description>Vmxnet3 ethernet adapter on "testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d"</rasd:Description>    <rasd:ElementName>Network adapter 0</rasd:ElementName>    <rasd:InstanceID>1</rasd:InstanceID>    <rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>    <rasd:ResourceType>10</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:Address>0</rasd:Address>    <rasd:Description>SCSI Controller</rasd:Description>    <rasd:ElementName>SCSI Controller 0</rasd:ElementName>    <rasd:InstanceID>2</rasd:InstanceID>    <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>    <rasd:ResourceType>6</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:Description>Hard disk</rasd:Description>    <rasd:ElementName>Hard disk 1</rasd:ElementName>    <rasd:HostResource vcloud:storageProfileHref="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" vcloud:busType="6" vcloud:busSubType="lsilogic" vcloud:capacity="40960" vcloud:storageProfileOverrideVmDefault="false"/>    <rasd:InstanceID>2000</rasd:InstanceID>    <rasd:Parent>2</rasd:Parent>    <rasd:ResourceType>17</rasd:ResourceType>    <rasd:VirtualQuantity>42949672960</rasd:VirtualQuantity>    <rasd:VirtualQuantityUnits>byte</rasd:VirtualQuantityUnits></ovf:Item><ovf:Item>    <rasd:Address>0</rasd:Address>    <rasd:Description>SATA Controller</rasd:Description>    <rasd:ElementName>SATA Controller 0</rasd:ElementName>    <rasd:InstanceID>3</rasd:InstanceID>    <rasd:ResourceSubType>vmware.sata.ahci</rasd:ResourceSubType>    <rasd:ResourceType>20</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>    <rasd:Description>CD/DVD Drive</rasd:Description>    <rasd:ElementName>CD/DVD Drive 1</rasd:ElementName>    <rasd:HostResource/>    <rasd:InstanceID>16000</rasd:InstanceID>    <rasd:Parent>3</rasd:Parent>    <rasd:ResourceType>15</rasd:ResourceType></ovf:Item><ovf:Item>    <rasd:AddressOnParent>0</rasd:AddressOnParent>    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>    <rasd:Description>Floppy Drive</rasd:Description>    <rasd:ElementName>Floppy Drive 1</rasd:ElementName>    <rasd:HostResource/>    <rasd:InstanceID>8000</rasd:InstanceID>    <rasd:ResourceType>14</rasd:ResourceType></ovf:Item><ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu">    <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>    <rasd:Description>Number of Virtual CPUs</rasd:Description>    <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>    <rasd:InstanceID>4</rasd:InstanceID>    <rasd:Reservation>0</rasd:Reservation>    <rasd:ResourceType>3</rasd:ResourceType>    <rasd:VirtualQuantity>1</rasd:VirtualQuantity>    <rasd:Weight>0</rasd:Weight>    <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>    <Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/></ovf:Item><ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory">    <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>    <rasd:Description>Memory Size</rasd:Description>    <rasd:ElementName>1024 MB of memory</rasd:ElementName>    <rasd:InstanceID>5</rasd:InstanceID>    <rasd:Reservation>0</rasd:Reservation>    <rasd:ResourceType>4</rasd:ResourceType>    <rasd:VirtualQuantity>1024</rasd:VirtualQuantity>    <rasd:Weight>0</rasd:Weight>    <Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/></ovf:Item><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/" type="application/vnd.vmware.vcloud.virtualHardwareSection+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>

+<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>

+<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>

+<Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/media" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="down" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/></ovf:VirtualHardwareSection><ovf:OperatingSystemSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:id="94" vcloud:type="application/vnd.vmware.vcloud.operatingSystemSection+xml" vmw:osType="ubuntu64Guest" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/operatingSystemSection/"><ovf:Info>Specifies the operating system installed</ovf:Info><ovf:Description>Ubuntu Linux (64-bit)</ovf:Description><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/operatingSystemSection/" type="application/vnd.vmware.vcloud.operatingSystemSection+xml"/></ovf:OperatingSystemSection><NetworkConnectionSection href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false"><ovf:Info>Specifies the available VM network connections</ovf:Info><PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex><NetworkConnection needsCustomization="false" network="testing_T6nODiW4-68f68d93-0350-4d86-b40b-6e74dedf994d">    <NetworkConnectionIndex>0</NetworkConnectionIndex>    <IpAddress>12.19.21.20</IpAddress>    <IsConnected>true</IsConnected>    <MACAddress>00:50:56:01:12:a2</MACAddress>    <IpAddressAllocationMode>DHCP</IpAddressAllocationMode></NetworkConnection><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/></NetworkConnectionSection><GuestCustomizationSection href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" ovf:required="false"><ovf:Info>Specifies Guest OS Customization Settings</ovf:Info><Enabled>true</Enabled><ChangeSid>false</ChangeSid><VirtualMachineId>47d12505-5968-4e16-95a7-18743edb0c8b</VirtualMachineId><JoinDomainEnabled>false</JoinDomainEnabled><UseOrgSettings>false</UseOrgSettings><AdminPasswordEnabled>false</AdminPasswordEnabled><AdminPasswordAuto>true</AdminPasswordAuto><AdminAutoLogonEnabled>false</AdminAutoLogonEnabled><AdminAutoLogonCount>0</AdminAutoLogonCount><ResetPasswordRequired>false</ResetPasswordRequired><ComputerName>Ubuntu-vm-001</ComputerName><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml"/></GuestCustomizationSection><RuntimeInfoSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/runtimeInfoSection"><ovf:Info>Specifies Runtime info</ovf:Info><VMWareTools version="2147483647"/></RuntimeInfoSection><SnapshotSection href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/snapshotSection" type="application/vnd.vmware.vcloud.snapshotSection+xml" ovf:required="false"><ovf:Info>Snapshot information section</ovf:Info></SnapshotSection><DateCreated>2017-09-21T01:15:53.863-07:00</DateCreated><VAppScopedLocalId>Ubuntu-vm</VAppScopedLocalId><ovfenv:Environment xmlns:ns11="http://www.vmware.com/schema/ovfenv" ovfenv:id="" ns11:vCenterId="vm-7833"><ovfenv:PlatformSection>    <ovfenv:Kind>VMware ESXi</ovfenv:Kind>    <ovfenv:Version>6.0.0</ovfenv:Version>    <ovfenv:Vendor>VMware, Inc.</ovfenv:Vendor>    <ovfenv:Locale>en</ovfenv:Locale></ovfenv:PlatformSection><ovfenv:PropertySection>    <ovfenv:Property ovfenv:key="vCloud_UseSysPrep" ovfenv:value="None"/>    <ovfenv:Property ovfenv:key="vCloud_bitMask" ovfenv:value="1"/>    <ovfenv:Property ovfenv:key="vCloud_bootproto_0" ovfenv:value="dhcp"/>    <ovfenv:Property ovfenv:key="vCloud_computerName" ovfenv:value="Ubuntu-vm-001"/>    <ovfenv:Property ovfenv:key="vCloud_macaddr_0" ovfenv:value="00:50:56:01:12:a2"/>    <ovfenv:Property ovfenv:key="vCloud_markerid" ovfenv:value="c743cbe8-136e-4cf8-9e42-b291646b8058"/>    <ovfenv:Property ovfenv:key="vCloud_numnics" ovfenv:value="1"/>    <ovfenv:Property ovfenv:key="vCloud_primaryNic" ovfenv:value="0"/>    <ovfenv:Property ovfenv:key="vCloud_reconfigToken" ovfenv:value="246124151"/>    <ovfenv:Property ovfenv:key="vCloud_resetPassword" ovfenv:value="0"/></ovfenv:PropertySection><ve:EthernetAdapterSection xmlns:ve="http://www.vmware.com/schema/ovfenv" xmlns="http://schemas.dmtf.org/ovf/environment/1" xmlns:oe="http://schemas.dmtf.org/ovf/environment/1">    <ve:Adapter ve:mac="00:50:56:01:12:a2" ve:network="DPG-MGMT-3151" ve:unitNumber="7"/></ve:EthernetAdapterSection></ovfenv:Environment><VmCapabilities href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml"><Link rel="edit" href="https://localhost/api/vApp/vm-47d12505-5968-4e16-95a7-18743edb0c8b/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml"/><MemoryHotAddEnabled>false</MemoryHotAddEnabled><CpuHotAddEnabled>false</CpuHotAddEnabled></VmCapabilities><StorageProfile href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/></Vm></Children></VApp>"""

+

+poweroff_task_xml = """<?xml version="1.0" encoding="UTF-8"?>

+                <Task xmlns="http://www.vmware.com/vcloud/v1.5" cancelRequested="false" expiryTime="2017-12-22T23:18:23.040-08:00" operation="Powering Off Virtual Application Test1_vm-f370dafc-4aad-4415-bad9-68509dda67c9(f26ebf0a-f675-4622-83a6-64c6401769ac)" operationName="vappPowerOff" serviceNamespace="com.vmware.vcloud" startTime="2017-09-23T23:18:23.040-07:00" status="queued" name="task" id="urn:vcloud:task:26975b6e-310e-4ed9-914e-ba7051eaabcb" href="https://localhost/api/task/26975b6e-310e-4ed9-914e-ba7051eaabcb" type="application/vnd.vmware.vcloud.task+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+		<Owner href="https://localhost/api/vApp/vapp-f26ebf0a-f675-4622-83a6-64c6401769ac" name="Test1_vm-f370dafc-4aad-4415-bad9-68509dda67c9" type="application/vnd.vmware.vcloud.vApp+xml"/>

+		<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>

+		<Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>

+		<Details/>

+		</Task>"""

+

+org_xml_response = """<Org xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" name="Org3" id="urn:vcloud:org:2cb3dffb-5c51-4355-8406-28553ead28ac" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+<Link rel="down" href="https://localhost/api/vdc/216648ae-1b91-412b-b821-e4c301ff27d2" name="osm" type="application/vnd.vmware.vcloud.vdc+xml"/>

+<Link rel="down" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" name="Org3-VDC-PVDC1" type="application/vnd.vmware.vcloud.vdc+xml"/>

+<Link rel="down" href="https://localhost/api/vdc/414fdda9-3556-478c-a496-2deeec39cd30" name="osm1" type="application/vnd.vmware.vcloud.vdc+xml"/>

+<Link rel="down" href="https://localhost/api/tasksList/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.tasksList+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a" name="cirros034" type="application/vnd.vmware.vcloud.catalog+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/catalog/4b94b67e-c2c6-49ec-b46c-3f35ba45ca4a/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/34925a30-0f4a-4018-9759-0d6799063b51" name="Ubuntu_1nic" type="application/vnd.vmware.vcloud.catalog+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/34925a30-0f4a-4018-9759-0d6799063b51/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/catalog/34925a30-0f4a-4018-9759-0d6799063b51/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/d3fa3df2-b311-4571-9138-4c66541d7f46" name="cirros_10" type="application/vnd.vmware.vcloud.catalog+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/d3fa3df2-b311-4571-9138-4c66541d7f46/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/catalog/d3fa3df2-b311-4571-9138-4c66541d7f46/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/d0eb0b02-718d-42e0-b889-56575000b52d" name="Test_Cirros" type="application/vnd.vmware.vcloud.catalog+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/d0eb0b02-718d-42e0-b889-56575000b52d/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/catalog/d0eb0b02-718d-42e0-b889-56575000b52d/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/c3b56180-f980-4256-9109-a93168d73ff2" name="de4ffcf2ad21f1a5d0714d6b868e2645" type="application/vnd.vmware.vcloud.catalog+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/c3b56180-f980-4256-9109-a93168d73ff2/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/catalog/c3b56180-f980-4256-9109-a93168d73ff2/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/b139ed82-7ca4-49fb-9882-5f841f59c890" name="Ubuntu_plugtest-1" type="application/vnd.vmware.vcloud.catalog+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/b139ed82-7ca4-49fb-9882-5f841f59c890/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/catalog/b139ed82-7ca4-49fb-9882-5f841f59c890/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.catalog+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/b31e6973-86d2-404b-a522-b16846d099dc" name="Ubuntu_Cat" type="application/vnd.vmware.vcloud.catalog+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/b31e6973-86d2-404b-a522-b16846d099dc/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/d64b2617-ea4b-4b90-910b-102c99dd2031" name="Ubuntu16" type="application/vnd.vmware.vcloud.catalog+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/d64b2617-ea4b-4b90-910b-102c99dd2031/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/catalog/d64b2617-ea4b-4b90-910b-102c99dd2031/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="add" href="https://localhost/api/admin/org/2cb3dffb-5c51-4355-8406-28553ead28ac/catalogs" type="application/vnd.vmware.admin.catalog+xml"/>

+<Link rel="down" href="https://localhost/api/network/090ffa68-9be6-4d74-af45-9a071544a633" name="default.cirros_ns.cirros_nsd_vld1-73a7d683-af17-49ff-95d3-72f8feb25537" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/17f3a12f-16f8-44a1-99e9-9a0122a7ac41" name="default.ass.management-3979591d-ea4e-4254-b4c4-4052107e4aca" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/326aee79-4f5c-439c-8ead-1bbfa42d2e51" name="default.Testvm11.management-fe46ba91-3b36-4964-9ad2-e91b475b3d23" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/420c24c7-89e9-49e5-ba6d-d21bfb9af94b" name="cirros_nsd_vld1-ea8aec47-0a6c-4fdb-814f-7a743e31407a" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/42cba4fd-7baa-4f53-bda0-b36dada672d0" name="default.cirros_ns.cirros_nsd_vld1-44dff01a-2bdb-4096-a916-7e9826bfa401" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/4ae9fec5-7ed0-4d5e-b0f3-f5289bdf6471" name="default.cirros_ns.cirros_nsd_vld1-9f547589-37b7-4d7d-8890-8d3dd479ff5b" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/4b2ecfa9-6a70-4fe4-9d79-b3f74df91e85" name="default.cirros_ns.cirros_nsd_vld1-43852bce-6109-4949-b63a-deec9d7daab2" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/693f72af-ae42-42e5-956e-25723628bf26" name="default.cirros_ns.cirros_nsd_vld1-8cd70d26-ba81-4a04-aa82-67a994b3e21c" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/6d9fbd4c-f0b9-4033-a13f-a7c8990b01de" name="default.vcd.management-f05b9ad3-7480-4ee6-ab8d-92b1f3c0b265" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/6e3e9f57-cee4-433a-883b-0bbe9760e99d" name="default" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/7787cdd7-9577-4966-ba72-8fbbff5d2553" name="default.cirros_ns.cirros_nsd_vld1-ab1f2288-ff59-488c-af02-c8d5e34e0847" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/7fa723e3-cd6c-4680-9522-e644eb31a188" name="default.cirros_ns.cirros_nsd_vld1-285865bb-736c-4b3d-8618-d755928daf5c" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/9030a222-4562-43a0-abc6-aa60c7c1aae0" name="default.cirros_ns.cirros_nsd_vld1-57248151-de72-4313-a84f-b090d8c3feb8" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/a4bd508c-1325-41b0-8c25-61cb7b83cde7" name="default.cirros_ns.cirros_nsd_vld1-491dfb8d-6b4b-41ab-b3e8-a5148e110bba" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/a719292f-0a7f-4e03-a346-183f23f3e60c" name="default.cirros_ns.cirros_nsd_vld1-7ba57204-eed1-4dc8-8698-60a71bbae715" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/a73574ef-16d4-4357-adbf-a0997eb5eb75" name="default.cirros_ns.cirros_nsd_vld1-4430f367-3fc8-4367-9bf1-96dbc244abe6" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/a94e9ba0-e959-47d6-87c0-70e8cb1b485a" name="default.cirros_ns.cirros_nsd_vld1-c56c51c5-e5a8-44fe-9d36-1f2cbd9a7137" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/ab88a587-ff82-4fa7-8225-c0e3eddbf6e6" name="cirros_nsd_vld1-0ed4b7e9-dd56-4f8b-b92f-829b9de95f66" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/b141d722-c96b-4ac5-90da-3d407d376431" name="cirros_nsd_vld1-ad2ebea3-7a0b-4995-91bb-c16bc6fd4b0e" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/c4d61fd6-4d1e-446c-949f-9eb42e0ccc63" name="default.cirros_ns.cirros_nsd_vld1-021a0669-1833-4a0b-a782-30ceed2cca7a" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/cd466f6f-fdc5-404a-9136-320aaa9e3c16" name="default.cirros_ns.cirros_nsd_vld1-22e6962e-6488-47ad-bfad-41bc599abfcd" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/cde04227-8f87-4956-b1f1-9f1be1241b8b" name="default.cirros_ns.cirros_nsd_vld1-629da038-a216-48c5-9ae2-aa4d5dea057c" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/e9812bec-ded8-423d-9807-354adc5720aa" name="default.cirros_ns.cirros_nsd_vld1-ba7fcc4f-fa76-49b1-8fa0-2b0791141fdd" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/eec8ef17-e379-4e40-a743-4ecec6afe616" name="cirros_nsd_vld1-aa9832d6-7d7a-4ac9-be56-cd171063818b" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/ef16d51c-3a54-4eea-bc15-9aa1e92b140f" name="default.cirros_ns.cirros_nsd_vld1-fe7170ad-0b0a-491d-b585-4de31e758ad7" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/f1554f21-4a7b-40be-9a34-a1b640c13398" name="default.Test21.cirros_nsd_vld1-c8f2b860-6794-4c8e-9a5b-3f107f23bbc4" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/fcbbf40a-6578-4054-b496-f10504b94b21" name="default.cirros_ns.cirros_nsd_vld1-a3021c0f-a0fe-413d-9067-cb9182e1f614" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/1fd6421e-929a-4576-bc19-a0c48aea1969" name="default" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/41b8a539-6927-4ec4-a411-aedae8129c45" name="test001.vld2-name-e34e32fd-6d3f-4d24-9d29-e8dab46e515a" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/437258c7-a221-48cd-b889-d24b2fc15087" name="Mgmt-Network-3151" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/bfd56159-9178-4021-a5d8-9ec050569b0c" name="test001.net_internal_name-34602686-3619-4356-98e9-27f6e13e84ad" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/08a0276e-d0fb-4223-92ae-003857ccd38f" name="pytest-09/20/17-05:26:01-cirros_nsd.cirros_nsd_vld1-d6688412-e82a-4cf7-aa77-400beb70dbbf" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/294f2cba-9a81-49c5-bb73-fdaa6644c6ec" name="pytest-09/20/17-03:47:31-cirros_nsd.cirros_nsd_vld1-bd7e8e04-d075-4851-b550-0cf9737c7c8d" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/network/d660e25b-8049-4e8f-a4b8-6811465197d7" name="Ns1.mgmt-dee74b34-51a5-4caa-aafe-d0c896e53828" type="application/vnd.vmware.vcloud.orgNetwork+xml"/>

+<Link rel="down" href="https://localhost/api/supportedSystemsInfo/" type="application/vnd.vmware.vcloud.supportedSystemsInfo+xml"/>

+<Link rel="down" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+<Link rel="down" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac/hybrid" type="application/vnd.vmware.vcloud.hybridOrg+xml"/>

+<Link rel="alternate" href="https://localhost/api/admin/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.admin.organization+xml"/>

+<Link rel="down" href="https://localhost/api/vdcTemplates" type="application/vnd.vmware.admin.vdcTemplates+xml"/>

+<Link rel="instantiate" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac/action/instantiate" type="application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml"/>

+<Description/><FullName>Organization 3</FullName>

+</Org>

+"""

+

+delete_catalog_xml_response = """<?xml version="1.0" encoding="UTF-8"?>\n<Catalog xmlns="http://www.vmware.com/vcloud/v1.5" name="80d8488f67ba1de98b7f485fba6abbd2" id="urn:vcloud:catalog:f3bf3733-465b-419f-b675-52f91d18edbb" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb" type="application/vnd.vmware.vcloud.catalog+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+<Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/catalogItems" type="application/vnd.vmware.vcloud.catalogItem+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/upload" type="application/vnd.vmware.vcloud.media+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/upload" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>

+<Link rel="copy" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/copy" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>

+<Link rel="move" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/move" type="application/vnd.vmware.vcloud.copyOrMoveCatalogItemParams+xml"/>

+<Link rel="add" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>

+<Link rel="down" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Link rel="controlAccess" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb/action/controlAccess" type="application/vnd.vmware.vcloud.controlAccess+xml"/>

+<Description>80d8488f67ba1de98b7f485fba6abbd2</Description>

+<CatalogItems>

+    <CatalogItem href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded" id="8a984fdd-d2cb-4d58-a739-2ea12560aded" name="80d8488f67ba1de98b7f485fba6abbd2" type="application/vnd.vmware.vcloud.catalogItem+xml"/>

+</CatalogItems>

+<IsPublished>

+    false

+</IsPublished>

+<DateCreated>2017-09-24T02:30:23.623-07:00</DateCreated>

+<VersionNumber>2</VersionNumber>

+</Catalog>"""

+

+delete_catalog_item_xml_response = """<?xml version="1.0" encoding="UTF-8"?>

+<CatalogItem xmlns="http://www.vmware.com/vcloud/v1.5" size="0" name="80d8488f67ba1de98b7f485fba6abbd2" id="urn:vcloud:catalogitem:8a984fdd-d2cb-4d58-a739-2ea12560aded" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded" type="application/vnd.vmware.vcloud.catalogItem+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+<Link rel="up" href="https://localhost/api/catalog/f3bf3733-465b-419f-b675-52f91d18edbb" type="application/vnd.vmware.vcloud.catalog+xml"/>

+<Link rel="down" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+<Link rel="edit" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded" type="application/vnd.vmware.vcloud.catalogItem+xml"/>

+<Link rel="remove" href="https://localhost/api/catalogItem/8a984fdd-d2cb-4d58-a739-2ea12560aded"/>    <Description>medial_file_name vApp Template</Description>

+<Entity href="https://localhost/api/vAppTemplate/vappTemplate-2731194b-637a-45f5-8e6d-dc65690302f7" name="80d8488f67ba1de98b7f485fba6abbd2" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>   <DateCreated>2017-09-24T02:30:26.380-07:00</DateCreated>

+<VersionNumber>1</VersionNumber>

+</CatalogItem>"""

+

+undeploy_task_xml = """<Task xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" cancelRequested="false" expiryTime="2018-07-17T23:53:10.781-07:00" operation="Stopping Virtual Application Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46(86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed)" operationName="vappUndeployPowerOff" serviceNamespace="com.vmware.vcloud" startTime="2018-04-18T23:53:10.781-07:00" status="queued" name="task" id="urn:vcloud:task:5ca0a79f-c025-47b9-9f20-b6a04fd67ea3" href="https://localhost/api/task/5ca0a79f-c025-47b9-9f20-b6a04fd67ea3" type="application/vnd.vmware.vcloud.task+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+<Owner href="https://localhost/api/vApp/vapp-86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed" name="Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46" type="application/vnd.vmware.vcloud.vApp+xml"/>

+<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>

+<Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>

+<Details/>

+</Task>

+"""

+

+delete_task_xml = """<Task xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" cancelRequested="false" expiryTime="2018-07-17T23:54:11.696-07:00" operation="Deleting Virtual Application Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46(86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed)" operationName="vdcDeleteVapp" serviceNamespace="com.vmware.vcloud" startTime="2018-04-18T23:54:11.696-07:00" status="queued" name="task" id="urn:vcloud:task:f0399f4e-ddd5-4050-959f-5970ba0a63e6" href="https://localhost/api/task/f0399f4e-ddd5-4050-959f-5970ba0a63e6" type="application/vnd.vmware.vcloud.task+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+<Owner href="https://localhost/api/vApp/vapp-86d9dd50-9c07-4fc5-84ce-aefb5de7b8ed" name="Test1_vm-5e6dbb30-41ea-4290-951d-6ce2a7412d46" type="application/vnd.vmware.vcloud.vApp+xml"/>

+<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>

+<Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>

+<Details/>

+</Task>"""

+

+status_task_xml = """<Task xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" cancelRequested="false" endTime="2018-04-19T01:24:46.643-07:00" expiryTime="2018-07-18T01:24:39.363-07:00" operation="Powered Off Virtual Application Test1_vm-fa13aee3-fb79-456f-8ce9-17f029ec4324(e9765c7a-b0de-4663-9db9-028bf0031f4d)" operationName="vappPowerOff" serviceNamespace="com.vmware.vcloud" startTime="2018-04-19T01:24:39.363-07:00" status="success" name="task" id="urn:vcloud:task:17ebe394-b419-4612-ab55-cad3000d780a" href="https://localhost/api/task/17ebe394-b419-4612-ab55-cad3000d780a" type="application/vnd.vmware.vcloud.task+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">

+<Owner href="https://localhost/api/vApp/vapp-e9765c7a-b0de-4663-9db9-028bf0031f4d" name="Test1_vm-fa13aee3-fb79-456f-8ce9-17f029ec4324" type="application/vnd.vmware.vcloud.vApp+xml"/>

+<User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>

+<Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>

+<Details/>

+</Task>

+"""

+

+vm_xml_response = """<?xml version="1.0" encoding="UTF-8"?>

+<Vm xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" needsCustomization="false" nestedHypervisorEnabled="false" deployed="true" status="4" name="Ubuntu_no_nic" id="urn:vcloud:vm:53a529b2-10d8-4d56-a7ad-8182acdbe71c" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c" type="application/vnd.vmware.vcloud.vm+xml" xsi:schemaLocation="http://schemas.dmtf.org/ovf/envelope/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8023_1.1.0.xsd http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/schema/ovf http://www.vmware.com/schema/ovf http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_ResourceAllocationSettingData.xsd http://schemas.dmtf.org/ovf/environment/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8027_1.1.0.xsd http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_VirtualSystemSettingData.xsd">

+<Link rel="power:powerOff" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/powerOff"/>

+<Link rel="power:reboot" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/reboot"/>    <Link rel="power:reset" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/reset"/>

+<Link rel="power:shutdown" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/shutdown"/>

+<Link rel="power:suspend" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/power/action/suspend"/> <Link rel="undeploy" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/undeploy" type="application/vnd.vmware.vcloud.undeployVAppParams+xml"/>

+<Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c" type="application/vnd.vmware.vcloud.vm+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>

+<Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>

+<Link rel="metrics" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/current" type="application/vnd.vmware.vcloud.metrics.currentUsageSpec+xml"/>

+<Link rel="metrics" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/metrics/historic" type="application/vnd.vmware.vcloud.metrics.historicUsageSpec+xml"/>

+<Link rel="screen:thumbnail" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/screen"/>

+<Link rel="screen:acquireTicket" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/screen/action/acquireTicket"/>

+<Link rel="screen:acquireMksTicket" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/screen/action/acquireMksTicket" type="application/vnd.vmware.vcloud.mksTicket+xml"/>

+<Link rel="media:insertMedia" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/media/action/insertMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>

+<Link rel="media:ejectMedia" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/media/action/ejectMedia" type="application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml"/>

+<Link rel="disk:attach" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/disk/action/attach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>

+<Link rel="disk:detach" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/disk/action/detach" type="application/vnd.vmware.vcloud.diskAttachOrDetachParams+xml"/>

+<Link rel="installVmwareTools" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/installVMwareTools"/>

+<Link rel="customizeAtNextPowerOn" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/customizeAtNextPowerOn"/>

+<Link rel="snapshot:create" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/createSnapshot" type="application/vnd.vmware.vcloud.createSnapshotParams+xml"/>

+<Link rel="reconfigureVm" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/action/reconfigureVm" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.vm+xml"/>

+<Link rel="up" href="https://localhost/api/vApp/vapp-5a5ca3da-3826-4fe4-83c5-c018ad1765fa" type="application/vnd.vmware.vcloud.vApp+xml"/>

+<Description/>

+<ovf:VirtualHardwareSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:transport="" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/">

+<ovf:Info>Virtual hardware requirements</ovf:Info>

+<ovf:System>

+<vssd:ElementName>Virtual Hardware Family</vssd:ElementName>

+<vssd:InstanceID>0</vssd:InstanceID>

+<vssd:VirtualSystemIdentifier>Ubuntu_no_nic</vssd:VirtualSystemIdentifier>

+<vssd:VirtualSystemType>vmx-11</vssd:VirtualSystemType>

+</ovf:System><ovf:Item>

+<rasd:Address>00:50:56:01:14:1a</rasd:Address>

+<rasd:AddressOnParent>0</rasd:AddressOnParent>

+<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>

+<rasd:Connection vcloud:ipAddressingMode="DHCP" vcloud:ipAddress="172.16.27.72" vcloud:primaryNetworkConnection="true">testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce</rasd:Connection>

+<rasd:Description>Vmxnet3 ethernet adapter on "testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce"

+</rasd:Description><rasd:ElementName>Network adapter 0</rasd:ElementName>

+<rasd:InstanceID>1</rasd:InstanceID>

+<rasd:ResourceSubType>VMXNET3</rasd:ResourceSubType>

+<rasd:ResourceType>10</rasd:ResourceType></ovf:Item><ovf:Item>

+<rasd:Address>0</rasd:Address><rasd:Description>SCSI Controller</rasd:Description>

+<rasd:ElementName>SCSI Controller 0</rasd:ElementName>

+<rasd:InstanceID>2</rasd:InstanceID>

+<rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>

+<rasd:ResourceType>6</rasd:ResourceType></ovf:Item><ovf:Item>

+<rasd:AddressOnParent>0</rasd:AddressOnParent>

+<rasd:Description>Hard disk</rasd:Description>

+<rasd:ElementName>Hard disk 1</rasd:ElementName>

+<rasd:HostResource vcloud:storageProfileHref="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" vcloud:busType="6" vcloud:busSubType="lsilogic" vcloud:capacity="10240" vcloud:storageProfileOverrideVmDefault="false"/>      <rasd:InstanceID>2000</rasd:InstanceID>

+<rasd:Parent>2</rasd:Parent><rasd:ResourceType>17</rasd:ResourceType>

+<rasd:VirtualQuantity>10737418240</rasd:VirtualQuantity>

+<rasd:VirtualQuantityUnits>byte</rasd:VirtualQuantityUnits>

+</ovf:Item><ovf:Item><rasd:Address>1</rasd:Address>

+<rasd:Description>IDE Controller</rasd:Description>

+<rasd:ElementName>IDE Controller 1</rasd:ElementName>

+<rasd:InstanceID>3</rasd:InstanceID>

+<rasd:ResourceType>5</rasd:ResourceType>

+</ovf:Item><ovf:Item><rasd:AddressOnParent>0</rasd:AddressOnParent>

+<rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>

+<rasd:Description>CD/DVD Drive</rasd:Description>

+<rasd:ElementName>CD/DVD Drive 1</rasd:ElementName><rasd:HostResource/>

+<rasd:InstanceID>3002</rasd:InstanceID>

+<rasd:Parent>3</rasd:Parent>

+<rasd:ResourceType>15</rasd:ResourceType></ovf:Item><ovf:Item>

+<rasd:AddressOnParent>0</rasd:AddressOnParent>

+<rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>

+<rasd:Description>Floppy Drive</rasd:Description>

+<rasd:ElementName>Floppy Drive 1</rasd:ElementName>

+<rasd:HostResource/><rasd:InstanceID>8000</rasd:InstanceID>

+<rasd:ResourceType>14</rasd:ResourceType>

+</ovf:Item>

+<ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu">

+<rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>

+<rasd:Description>Number of Virtual CPUs</rasd:Description>

+<rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>

+<rasd:InstanceID>4</rasd:InstanceID>

+<rasd:Reservation>0</rasd:Reservation>

+<rasd:ResourceType>3</rasd:ResourceType>

+<rasd:VirtualQuantity>1</rasd:VirtualQuantity>

+<rasd:Weight>0</rasd:Weight>

+<vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>

+<Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/></ovf:Item>

+<ovf:Item vcloud:type="application/vnd.vmware.vcloud.rasdItem+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory">

+<rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>

+<rasd:Description>Memory Size</rasd:Description>

+<rasd:ElementName>1024 MB of memory</rasd:ElementName>

+<rasd:InstanceID>5</rasd:InstanceID>

+<rasd:Reservation>0</rasd:Reservation>

+<rasd:ResourceType>4</rasd:ResourceType>

+<rasd:VirtualQuantity>1024</rasd:VirtualQuantity>

+<rasd:Weight>0</rasd:Weight>

+<Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>

+        </ovf:Item>

+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/" type="application/vnd.vmware.vcloud.virtualHardwareSection+xml"/>

+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>

+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>

+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>

+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>

+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>

+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>

+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/media" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>

+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>

+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>

+        <Link rel="down" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>

+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>

+    </ovf:VirtualHardwareSection>

+    <ovf:OperatingSystemSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:id="94" vcloud:type="application/vnd.vmware.vcloud.operatingSystemSection+xml" vmw:osType="ubuntu64Guest" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/operatingSystemSection/">

+        <ovf:Info>Specifies the operating system installed</ovf:Info>

+        <ovf:Description>Ubuntu Linux (64-bit)</ovf:Description>

+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/operatingSystemSection/" type="application/vnd.vmware.vcloud.operatingSystemSection+xml"/>

+    </ovf:OperatingSystemSection>

+    <NetworkConnectionSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">

+        <ovf:Info>Specifies the available VM network connections</ovf:Info>

+        <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>

+        <NetworkConnection needsCustomization="false" network="testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce">

+            <NetworkConnectionIndex>0</NetworkConnectionIndex>

+            <IpAddress>172.16.27.72</IpAddress>

+            <IsConnected>true</IsConnected>

+            <MACAddress>00:50:56:01:14:1a</MACAddress>

+            <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>

+            <NetworkAdapterType>VMXNET3</NetworkAdapterType>

+        </NetworkConnection>

+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>

+    </NetworkConnectionSection>  

+    <NetworkConnectionSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">

+        <ovf:Info>Specifies the available VM network connections</ovf:Info>

+        <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>

+        <NetworkConnection needsCustomization="false" network="testing_6SNBKa9pz62P-63e13553-ebf9-4518-a33d-6ea922a6d2ce">

+            <NetworkConnectionIndex>0</NetworkConnectionIndex>

+            <IpAddress>172.16.27.72</IpAddress>

+            <IsConnected>true</IsConnected>

+            <MACAddress>00:50:56:01:14:1a</MACAddress>

+            <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>

+        </NetworkConnection>

+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>

+    </NetworkConnectionSection>

+    <GuestCustomizationSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" ovf:required="false">

+        <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>

+        <Enabled>true</Enabled>

+        <ChangeSid>false</ChangeSid>

+        <VirtualMachineId>53a529b2-10d8-4d56-a7ad-8182acdbe71c</VirtualMachineId>

+        <JoinDomainEnabled>false</JoinDomainEnabled>

+        <UseOrgSettings>false</UseOrgSettings>

+        <AdminPasswordEnabled>false</AdminPasswordEnabled>

+        <AdminPasswordAuto>true</AdminPasswordAuto>

+        <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>

+        <AdminAutoLogonCount>0</AdminAutoLogonCount>

+        <ResetPasswordRequired>false</ResetPasswordRequired>

+        <ComputerName>Ubuntunonic-001</ComputerName>

+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml"/>

+    </GuestCustomizationSection>

+    <RuntimeInfoSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/runtimeInfoSection">

+        <ovf:Info>Specifies Runtime info</ovf:Info>

+        <VMWareTools version="2147483647"/>

+    </RuntimeInfoSection>

+    <SnapshotSection href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/snapshotSection" type="application/vnd.vmware.vcloud.snapshotSection+xml" ovf:required="false">

+        <ovf:Info>Snapshot information section</ovf:Info>

+    </SnapshotSection>

+    <DateCreated>2018-04-19T04:19:28.150-07:00</DateCreated>

+    <VAppScopedLocalId>Ubuntu_no_nic</VAppScopedLocalId>

+    <ovfenv:Environment xmlns:ns11="http://www.vmware.com/schema/ovfenv" ovfenv:id="" ns11:vCenterId="vm-8971">

+        <ovfenv:PlatformSection>

+<ovfenv:Kind>VMware ESXi</ovfenv:Kind>

+<ovfenv:Version>6.0.0</ovfenv:Version>

+<ovfenv:Vendor>VMware, Inc.</ovfenv:Vendor>

+<ovfenv:Locale>en</ovfenv:Locale>

+        </ovfenv:PlatformSection>

+        <ovfenv:PropertySection>

+<ovfenv:Property ovfenv:key="vCloud_UseSysPrep" ovfenv:value="None"/>

+<ovfenv:Property ovfenv:key="vCloud_bitMask" ovfenv:value="1"/>

+<ovfenv:Property ovfenv:key="vCloud_bootproto_0" ovfenv:value="dhcp"/>

+<ovfenv:Property ovfenv:key="vCloud_computerName" ovfenv:value="Ubuntunonic-001"/>

+<ovfenv:Property ovfenv:key="vCloud_macaddr_0" ovfenv:value="00:50:56:01:14:1a"/>

+<ovfenv:Property ovfenv:key="vCloud_markerid" ovfenv:value="ec8b90ea-cb5d-43b4-8910-91380ff29d97"/>

+<ovfenv:Property ovfenv:key="vCloud_numnics" ovfenv:value="1"/>

+<ovfenv:Property ovfenv:key="vCloud_primaryNic" ovfenv:value="0"/>

+<ovfenv:Property ovfenv:key="vCloud_reconfigToken" ovfenv:value="132681259"/>

+<ovfenv:Property ovfenv:key="vCloud_resetPassword" ovfenv:value="0"/>

+        </ovfenv:PropertySection>

+        <ve:EthernetAdapterSection xmlns:ve="http://www.vmware.com/schema/ovfenv" xmlns="http://schemas.dmtf.org/ovf/environment/1" xmlns:oe="http://schemas.dmtf.org/ovf/environment/1">

+<ve:Adapter ve:mac="00:50:56:01:14:1a" ve:network="DPG-MGMT-3151" ve:unitNumber="7"/>

+   

+        </ve:EthernetAdapterSection>

+    </ovfenv:Environment>

+    <VmCapabilities href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml">

+        <Link rel="edit" href="https://localhost/api/vApp/vm-53a529b2-10d8-4d56-a7ad-8182acdbe71c/vmCapabilities/" type="application/vnd.vmware.vcloud.vmCapabilitiesSection+xml"/>

+        <MemoryHotAddEnabled>false</MemoryHotAddEnabled>

+        <CpuHotAddEnabled>false</CpuHotAddEnabled>

+    </VmCapabilities>

+    <StorageProfile href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>

+</Vm>"""

+

+delete_tenant = """<?xml version="1.0" encoding="UTF-8"?>\n<Vdc xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5" status="1" name="testing_Cqm5fiZ" id="urn:vcloud:vdc:753227f5-d6c6-4478-9546-acc5cfff21e9" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9" type="application/vnd.vmware.vcloud.vdc+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/vcloud/extension/v1.5 http://localhost/api/v1.5/schema/vmwextensions.xsd">\n    <VCloudExtension required="false">\n        <vmext:VimObjectRef>\n            <vmext:VimServerRef href="https://localhost/api/admin/extension/vimServer/cc82baf9-9f80-4468-bfe9-ce42b3f9dde5" name="VC" type="application/vnd.vmware.admin.vmwvirtualcenter+xml"/>\n            <vmext:MoRef>resgroup-9025</vmext:MoRef>\n            <vmext:VimObjectType>RESOURCE_POOL</vmext:VimObjectType>\n        </vmext:VimObjectRef>\n    </VCloudExtension>\n    <Link rel="up" href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" type="application/vnd.vmware.vcloud.org+xml"/>\n    <Link rel="down" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="edit" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9" type="application/vnd.vmware.vcloud.vdc+xml"/>\n    <Link rel="remove" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/uploadVAppTemplate" type="application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/media" type="application/vnd.vmware.vcloud.media+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/instantiateOvf" type="application/vnd.vmware.vcloud.instantiateOvfParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/instantiateVAppTemplate" type="application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/cloneVApp" type="application/vnd.vmware.vcloud.cloneVAppParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/cloneVAppTemplate" type="application/vnd.vmware.vcloud.cloneVAppTemplateParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/cloneMedia" type="application/vnd.vmware.vcloud.cloneMediaParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/captureVApp" type="application/vnd.vmware.vcloud.captureVAppParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/action/composeVApp" type="application/vnd.vmware.vcloud.composeVAppParams+xml"/>\n    <Link rel="add" href="https://localhost/api/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/disk" type="application/vnd.vmware.vcloud.diskCreateParams+xml"/>\n    <Link rel="edgeGateways" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/edgeGateways" type="application/vnd.vmware.vcloud.query.records+xml"/>\n    <Link rel="add" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/networks" type="application/vnd.vmware.vcloud.orgVdcNetwork+xml"/>\n    <Link rel="orgVdcNetworks" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9/networks" type="application/vnd.vmware.vcloud.query.records+xml"/>\n    <Link rel="alternate" href="https://localhost/api/admin/vdc/753227f5-d6c6-4478-9546-acc5cfff21e9" type="application/vnd.vmware.admin.vdc+xml"/>\n    <Description>opnemano</Description>\n    <AllocationModel>AllocationVApp</AllocationModel>\n    <ComputeCapacity>\n        <Cpu>\n            <Units>MHz</Units>\n            <Allocated>0</Allocated>\n            <Limit>2048</Limit>\n            <Reserved>0</Reserved>\n            <Used>0</Used>\n            <Overhead>0</Overhead>\n        </Cpu>\n        <Memory>\n            <Units>MB</Units>\n            <Allocated>0</Allocated>\n            <Limit>2048</Limit>\n            <Reserved>0</Reserved>\n            <Used>0</Used>\n            <Overhead>0</Overhead>\n        </Memory>\n    </ComputeCapacity>\n    <ResourceEntities/>\n    <AvailableNetworks/>\n    <Capabilities>\n        <SupportedHardwareVersions>\n            <SupportedHardwareVersion>vmx-04</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-07</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-08</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-09</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-10</SupportedHardwareVersion>\n            <SupportedHardwareVersion>vmx-11</SupportedHardwareVersion>\n        </SupportedHardwareVersions>\n    </Capabilities>\n    <NicQuota>100</NicQuota>\n    <NetworkQuota>100</NetworkQuota>\n    <UsedNetworkCount>0</UsedNetworkCount>\n    <VmQuota>50</VmQuota>\n    <IsEnabled>true</IsEnabled>\n    <VdcStorageProfiles>\n        <VdcStorageProfile href="https://localhost/api/vdcStorageProfile/37ec8982-e6c3-4fba-a107-0fa36fe292d0" name="NFS Storage Policy" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\n    </VdcStorageProfiles>\n    <VCpuInMhz2>1000</VCpuInMhz2>\n</Vdc>\n"""

+

+catalog_list_xml = """<CatalogItem xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" size="0" name="Ubuntu_no_nic" id="urn:vcloud:catalogitem:d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd"><Link rel="up" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9" type="application/vnd.vmware.vcloud.catalog+xml"/><Link rel="down" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/><Link rel="edit" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml"/><Link rel="remove" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad"/><Description/><Entity href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/><DateCreated>2017-10-14T23:52:58.097-07:00</DateCreated><VersionNumber>1</VersionNumber></CatalogItem>"""

+

+catalogItem_xml = """<?xml version="1.0" encoding="UTF-8"?>\n<CatalogItem xmlns="http://www.vmware.com/vcloud/v1.5" size="0" name="Ubuntu_no_nic" id="urn:vcloud:catalogitem:d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\n    <Link rel="up" href="https://localhost/api/catalog/e8d953db-8dc9-46d5-9cab-329774cd2ad9" type="application/vnd.vmware.vcloud.catalog+xml"/>\n    <Link rel="down" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="edit" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\n    <Link rel="remove" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad"/>\n    <Description/>\n    <Entity href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" name="Ubuntu_no_nic" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\n    <DateCreated>2017-10-14T23:52:58.097-07:00</DateCreated>\n    <VersionNumber>1</VersionNumber>\n</CatalogItem>"""

+

+vapp_template_xml = """<?xml version="1.0" encoding="UTF-8"?>\n<VAppTemplate xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1" xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" xmlns:vmw="http://www.vmware.com/schema/ovf" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" goldMaster="false" ovfDescriptorUploaded="true" status="8" name="Ubuntu_no_nic" id="urn:vcloud:vapptemplate:593e3130-ac0b-44f1-8289-14329dcc5435" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" type="application/vnd.vmware.vcloud.vAppTemplate+xml" xsi:schemaLocation="http://schemas.dmtf.org/ovf/envelope/1 http://schemas.dmtf.org/ovf/envelope/1/dsp8023_1.1.0.xsd http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd http://www.vmware.com/schema/ovf http://www.vmware.com/schema/ovf http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_ResourceAllocationSettingData.xsd http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2.22.0/CIM_VirtualSystemSettingData.xsd">\n    <Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\n    <Link rel="catalogItem" href="https://localhost/api/catalogItem/d79fb542-6ad4-4c09-8cfc-f6104cbf67ad" type="application/vnd.vmware.vcloud.catalogItem+xml"/>\n    <Link rel="remove" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435"/>\n    <Link rel="edit" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\n    <Link rel="enable" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/action/enableDownload"/>\n    <Link rel="disable" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/action/disableDownload"/>\n    <Link rel="ovf" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/ovf" type="text/xml"/>\n    <Link rel="storageProfile" href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" name="*" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\n    <Link rel="down" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/owner" type="application/vnd.vmware.vcloud.owner+xml"/>\n    <Link rel="down" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="down" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\n    <Description/>\n    <Owner type="application/vnd.vmware.vcloud.owner+xml">\n        <User href="https://localhost/api/admin/user/4e1905dc-7c0b-4013-b763-d01960853f49" name="system" type="application/vnd.vmware.admin.user+xml"/>\n    </Owner>\n    <Children>\n        <Vm goldMaster="false" status="8" name="Ubuntu_no_nic" id="urn:vcloud:vm:bd3fe155-3fb2-40a8-af48-89c276983166" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166" type="application/vnd.vmware.vcloud.vm+xml">\n            <Link rel="up" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435" type="application/vnd.vmware.vcloud.vAppTemplate+xml"/>\n            <Link rel="storageProfile" href="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" type="application/vnd.vmware.vcloud.vdcStorageProfile+xml"/>\n            <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n            <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\n            <Description/>\n            <NetworkConnectionSection href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/networkConnectionSection/" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">\n                <ovf:Info>Specifies the available VM network connections</ovf:Info>\n            </NetworkConnectionSection>\n            <GuestCustomizationSection href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/guestCustomizationSection/" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" ovf:required="false">\n                <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>\n                <Enabled>true</Enabled>\n                <ChangeSid>false</ChangeSid>\n                <VirtualMachineId>bd3fe155-3fb2-40a8-af48-89c276983166</VirtualMachineId>\n                <JoinDomainEnabled>false</JoinDomainEnabled>\n                <UseOrgSettings>false</UseOrgSettings>\n                <AdminPasswordEnabled>false</AdminPasswordEnabled>\n                <AdminPasswordAuto>true</AdminPasswordAuto>\n                <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>\n                <AdminAutoLogonCount>0</AdminAutoLogonCount>\n                <ResetPasswordRequired>false</ResetPasswordRequired>\n                <ComputerName>Ubuntunonic-001</ComputerName>\n            </GuestCustomizationSection>\n            <ovf:VirtualHardwareSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" ovf:transport="" vcloud:type="application/vnd.vmware.vcloud.virtualHardwareSection+xml" vcloud:href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/">\n                <ovf:Info>Virtual hardware requirements</ovf:Info>\n                <ovf:System>\n                    <vssd:ElementName>Virtual Hardware Family</vssd:ElementName>\n                    <vssd:InstanceID>0</vssd:InstanceID>\n                    <vssd:VirtualSystemIdentifier>Ubuntu_no_nic</vssd:VirtualSystemIdentifier>\n                    <vssd:VirtualSystemType>vmx-11</vssd:VirtualSystemType>\n                </ovf:System>\n                <ovf:Item>\n                    <rasd:Address>0</rasd:Address>\n                    <rasd:Description>SCSI Controller</rasd:Description>\n                    <rasd:ElementName>SCSI Controller 0</rasd:ElementName>\n                    <rasd:InstanceID>1</rasd:InstanceID>\n                    <rasd:ResourceSubType>lsilogic</rasd:ResourceSubType>\n                    <rasd:ResourceType>6</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AddressOnParent>0</rasd:AddressOnParent>\n                    <rasd:Description>Hard disk</rasd:Description>\n                    <rasd:ElementName>Hard disk 1</rasd:ElementName>\n                    <rasd:HostResource vcloud:storageProfileHref="https://localhost/api/vdcStorageProfile/950701fb-2b8a-4808-80f1-27d1170a2bfc" vcloud:busType="6" vcloud:busSubType="lsilogic" vcloud:capacity="5120" vcloud:storageProfileOverrideVmDefault="false"/>\n                    <rasd:InstanceID>2000</rasd:InstanceID>\n                    <rasd:Parent>1</rasd:Parent>\n                    <rasd:ResourceType>17</rasd:ResourceType>\n                    <rasd:VirtualQuantity>5368709120</rasd:VirtualQuantity>\n                    <rasd:VirtualQuantityUnits>byte</rasd:VirtualQuantityUnits>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:Address>1</rasd:Address>\n                    <rasd:Description>IDE Controller</rasd:Description>\n                    <rasd:ElementName>IDE Controller 1</rasd:ElementName>\n                    <rasd:InstanceID>2</rasd:InstanceID>\n                    <rasd:ResourceType>5</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AddressOnParent>0</rasd:AddressOnParent>\n                    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>\n                    <rasd:Description>CD/DVD Drive</rasd:Description>\n                    <rasd:ElementName>CD/DVD Drive 1</rasd:ElementName>\n                    <rasd:HostResource/>\n                    <rasd:InstanceID>3002</rasd:InstanceID>\n                    <rasd:Parent>2</rasd:Parent>\n                    <rasd:ResourceType>15</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AddressOnParent>0</rasd:AddressOnParent>\n                    <rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>\n                    <rasd:Description>Floppy Drive</rasd:Description>\n                    <rasd:ElementName>Floppy Drive 1</rasd:ElementName>\n                    <rasd:HostResource/>\n                    <rasd:InstanceID>8000</rasd:InstanceID>\n                    <rasd:ResourceType>14</rasd:ResourceType>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>\n                    <rasd:Description>Number of Virtual CPUs</rasd:Description>\n                    <rasd:ElementName>1 virtual CPU(s)</rasd:ElementName>\n                    <rasd:InstanceID>3</rasd:InstanceID>\n                    <rasd:Reservation>0</rasd:Reservation>\n                    <rasd:ResourceType>3</rasd:ResourceType>\n                    <rasd:VirtualQuantity>1</rasd:VirtualQuantity>\n                    <rasd:Weight>0</rasd:Weight>\n                    <vmw:CoresPerSocket ovf:required="false">1</vmw:CoresPerSocket>\n                </ovf:Item>\n                <ovf:Item>\n                    <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>\n                    <rasd:Description>Memory Size</rasd:Description>\n                    <rasd:ElementName>1024 MB of memory</rasd:ElementName>\n                    <rasd:InstanceID>4</rasd:InstanceID>\n                    <rasd:Reservation>0</rasd:Reservation>\n                    <rasd:ResourceType>4</rasd:ResourceType>\n                    <rasd:VirtualQuantity>1024</rasd:VirtualQuantity>\n                    <rasd:Weight>0</rasd:Weight>\n                </ovf:Item>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/cpu" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/memory" type="application/vnd.vmware.vcloud.rasdItem+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/disks" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/media" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/networkCards" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n                <Link rel="down" href="https://localhost/api/vAppTemplate/vm-bd3fe155-3fb2-40a8-af48-89c276983166/virtualHardwareSection/serialPorts" type="application/vnd.vmware.vcloud.rasdItemsList+xml"/>\n            </ovf:VirtualHardwareSection>\n            <VAppScopedLocalId>Ubuntu_no_nic</VAppScopedLocalId>\n            <DateCreated>2017-10-14T23:52:58.790-07:00</DateCreated>\n        </Vm>\n    </Children>\n    <ovf:NetworkSection xmlns:vcloud="http://www.vmware.com/vcloud/v1.5" vcloud:type="application/vnd.vmware.vcloud.networkSection+xml" vcloud:href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/networkSection/">\n        <ovf:Info>The list of logical networks</ovf:Info>\n    </ovf:NetworkSection>\n    <NetworkConfigSection href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/networkConfigSection/" type="application/vnd.vmware.vcloud.networkConfigSection+xml" ovf:required="false">\n        <ovf:Info>The configuration parameters for logical networks</ovf:Info>\n    </NetworkConfigSection>\n    <LeaseSettingsSection href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml" ovf:required="false">\n        <ovf:Info>Lease settings section</ovf:Info>\n        <Link rel="edit" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/leaseSettingsSection/" type="application/vnd.vmware.vcloud.leaseSettingsSection+xml"/>\n        <StorageLeaseInSeconds>7776000</StorageLeaseInSeconds>\n        <StorageLeaseExpiration>2018-08-22T02:41:54.567-07:00</StorageLeaseExpiration>\n    </LeaseSettingsSection>\n    <CustomizationSection goldMaster="false" href="https://localhost/api/vAppTemplate/vappTemplate-593e3130-ac0b-44f1-8289-14329dcc5435/customizationSection/" type="application/vnd.vmware.vcloud.customizationSection+xml" ovf:required="false">\n        <ovf:Info>VApp template customization section</ovf:Info>\n        <CustomizeOnInstantiate>true</CustomizeOnInstantiate>\n    </CustomizationSection>\n    <DateCreated>2017-10-14T23:52:58.790-07:00</DateCreated>\n</VAppTemplate>\n"""

+

+deployed_vapp_xml = """<?xml version="1.0" encoding="UTF-8"?>\n<VApp xmlns="http://www.vmware.com/vcloud/v1.5" ovfDescriptorUploaded="true" deployed="false" status="0" name="Test1_vm-978d608b-07e4-4733-9c15-b66bc8ee310a" id="urn:vcloud:vapp:8b3ab861-cc53-4bd8-bdd0-85a74af76c61" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61" type="application/vnd.vmware.vcloud.vApp+xml" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.vmware.com/vcloud/v1.5 http://localhost/api/v1.5/schema/master.xsd">\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/controlAccess/" type="application/vnd.vmware.vcloud.controlAccess+xml"/>\n    <Link rel="up" href="https://localhost/api/vdc/2584137f-6541-4c04-a2a2-e56bfca14c69" type="application/vnd.vmware.vcloud.vdc+xml"/>\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/owner" type="application/vnd.vmware.vcloud.owner+xml"/>\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/metadata" type="application/vnd.vmware.vcloud.metadata+xml"/>\n    <Link rel="ovf" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/ovf" type="text/xml"/>\n    <Link rel="down" href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61/productSections/" type="application/vnd.vmware.vcloud.productSections+xml"/>\n    <Description>Vapp instantiation</Description>\n    <Tasks>\n        <Task cancelRequested="false" expiryTime="2018-08-31T01:14:34.292-07:00" operation="Creating Virtual Application Test1_vm-978d608b-07e4-4733-9c15-b66bc8ee310a(8b3ab861-cc53-4bd8-bdd0-85a74af76c61)" operationName="vdcInstantiateVapp" serviceNamespace="com.vmware.vcloud" startTime="2018-06-02T01:14:34.292-07:00" status="queued" name="task" id="urn:vcloud:task:1d588451-6b7d-43f4-b8c7-c9155dcd715a" href="https://localhost/api/task/1d588451-6b7d-43f4-b8c7-c9155dcd715a" type="application/vnd.vmware.vcloud.task+xml">\n            <Owner href="https://localhost/api/vApp/vapp-8b3ab861-cc53-4bd8-bdd0-85a74af76c61" name="Test1_vm-978d608b-07e4-4733-9c15-b66bc8ee310a" type="application/vnd.vmware.vcloud.vApp+xml"/>\n            <User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\n            <Organization href="https://localhost/api/org/2cb3dffb-5c51-4355-8406-28553ead28ac" name="Org3" type="application/vnd.vmware.vcloud.org+xml"/>\n            <Progress>1</Progress>\n            <Details/>\n        </Task>\n    </Tasks>\n    <DateCreated>2018-06-02T01:14:32.870-07:00</DateCreated>\n    <Owner type="application/vnd.vmware.vcloud.owner+xml">\n        <User href="https://localhost/api/admin/user/f7b6beba-96db-4674-b187-675ed1873c8c" name="orgadmin" type="application/vnd.vmware.admin.user+xml"/>\n    </Owner>\n    <InMaintenanceMode>false</InMaintenanceMode>\n</VApp>"""

diff --git a/RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py b/RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py
new file mode 100644
index 0000000..e37c419
--- /dev/null
+++ b/RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py
@@ -0,0 +1,6616 @@
+# -*- coding: utf-8 -*-
+
+##
+# Copyright 2016-2017 VMware Inc.
+# This file is part of ETSI OSM
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+# For those usages not covered by the Apache License, Version 2.0 please
+# contact:  osslegalrouting@vmware.com
+##
+
+"""
+vimconn_vmware implementation an Abstract class in order to interact with VMware  vCloud Director.
+mbayramov@vmware.com
+"""
+from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
+
+from osm_ro import vimconn
+import os
+import shutil
+import subprocess
+import tempfile
+import traceback
+import itertools
+import requests
+import ssl
+import atexit
+
+from pyVmomi import vim, vmodl
+from pyVim.connect import SmartConnect, Disconnect
+
+from xml.etree import ElementTree as XmlElementTree
+from lxml import etree as lxmlElementTree
+
+import yaml
+from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
+from pyvcloud.vcd.vdc import VDC
+from pyvcloud.vcd.org import Org
+import re
+from pyvcloud.vcd.vapp import VApp
+from xml.sax.saxutils import escape
+import logging
+import json
+import time
+import uuid
+# import httplib
+#For python3
+#import http.client
+import hashlib
+import socket
+import struct
+import netaddr
+import random
+
+# global variable for vcd connector type
+STANDALONE = 'standalone'
+
+# key for flavor dicts
+FLAVOR_RAM_KEY = 'ram'
+FLAVOR_VCPUS_KEY = 'vcpus'
+FLAVOR_DISK_KEY = 'disk'
+DEFAULT_IP_PROFILE = {'dhcp_count':50,
+                      'dhcp_enabled':True,
+                      'ip_version':"IPv4"
+                      }
+# global variable for wait time
+INTERVAL_TIME = 5
+MAX_WAIT_TIME = 1800
+
+API_VERSION = '27.0'
+
+__author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
+__date__ = "$09-Mar-2018 11:09:29$"
+__version__ = '0.2'
+
+#     -1: "Could not be created",
+#     0: "Unresolved",
+#     1: "Resolved",
+#     2: "Deployed",
+#     3: "Suspended",
+#     4: "Powered on",
+#     5: "Waiting for user input",
+#     6: "Unknown state",
+#     7: "Unrecognized state",
+#     8: "Powered off",
+#     9: "Inconsistent state",
+#     10: "Children do not all have the same status",
+#     11: "Upload initiated, OVF descriptor pending",
+#     12: "Upload initiated, copying contents",
+#     13: "Upload initiated , disk contents pending",
+#     14: "Upload has been quarantined",
+#     15: "Upload quarantine period has expired"
+
+# mapping vCD status to MANO
+vcdStatusCode2manoFormat = {4: 'ACTIVE',
+                            7: 'PAUSED',
+                            3: 'SUSPENDED',
+                            8: 'INACTIVE',
+                            12: 'BUILD',
+                            -1: 'ERROR',
+                            14: 'DELETED'}
+
+#
+netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
+                        'ERROR': 'ERROR', 'DELETED': 'DELETED'
+                        }
+
+class vimconnector(vimconn.vimconnector):
+    # dict used to store flavor in memory
+    flavorlist = {}
+
+    def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
+                 url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
+        """
+        Constructor create vmware connector to vCloud director.
+
+        By default construct doesn't validate connection state. So client can create object with None arguments.
+        If client specified username , password and host and VDC name.  Connector initialize other missing attributes.
+
+        a) It initialize organization UUID
+        b) Initialize tenant_id/vdc ID.   (This information derived from tenant name)
+
+        Args:
+            uuid - is organization uuid.
+            name - is organization name that must be presented in vCloud director.
+            tenant_id - is VDC uuid it must be presented in vCloud director
+            tenant_name - is VDC name.
+            url - is hostname or ip address of vCloud director
+            url_admin - same as above.
+            user - is user that administrator for organization. Caller must make sure that
+                    username has right privileges.
+
+            password - is password for a user.
+
+            VMware connector also requires PVDC administrative privileges and separate account.
+            This variables must be passed via config argument dict contains keys
+
+            dict['admin_username']
+            dict['admin_password']
+            config - Provide NSX and vCenter information
+
+            Returns:
+                Nothing.
+        """
+
+        vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
+                                      url_admin, user, passwd, log_level, config)
+
+        self.logger = logging.getLogger('openmano.vim.vmware')
+        self.logger.setLevel(10)
+        self.persistent_info = persistent_info
+
+        self.name = name
+        self.id = uuid
+        self.url = url
+        self.url_admin = url_admin
+        self.tenant_id = tenant_id
+        self.tenant_name = tenant_name
+        self.user = user
+        self.passwd = passwd
+        self.config = config
+        self.admin_password = None
+        self.admin_user = None
+        self.org_name = ""
+        self.nsx_manager = None
+        self.nsx_user = None
+        self.nsx_password = None
+        self.availability_zone = None
+
+        # Disable warnings from self-signed certificates.
+        requests.packages.urllib3.disable_warnings()
+
+        if tenant_name is not None:
+            orgnameandtenant = tenant_name.split(":")
+            if len(orgnameandtenant) == 2:
+                self.tenant_name = orgnameandtenant[1]
+                self.org_name = orgnameandtenant[0]
+            else:
+                self.tenant_name = tenant_name
+        if "orgname" in config:
+            self.org_name = config['orgname']
+
+        if log_level:
+            self.logger.setLevel(getattr(logging, log_level))
+
+        try:
+            self.admin_user = config['admin_username']
+            self.admin_password = config['admin_password']
+        except KeyError:
+            raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
+
+        try:
+            self.nsx_manager = config['nsx_manager']
+            self.nsx_user = config['nsx_user']
+            self.nsx_password = config['nsx_password']
+        except KeyError:
+            raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
+
+        self.vcenter_ip = config.get("vcenter_ip", None)
+        self.vcenter_port = config.get("vcenter_port", None)
+        self.vcenter_user = config.get("vcenter_user", None)
+        self.vcenter_password = config.get("vcenter_password", None)
+
+        #Set availability zone for Affinity rules
+        self.availability_zone = self.set_availability_zones()
+
+# ############# Stub code for SRIOV #################
+#         try:
+#             self.dvs_name = config['dv_switch_name']
+#         except KeyError:
+#             raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
+#
+#         self.vlanID_range = config.get("vlanID_range", None)
+
+        self.org_uuid = None
+        self.client = None
+
+        if not url:
+            raise vimconn.vimconnException('url param can not be NoneType')
+
+        if not self.url_admin:  # try to use normal url
+            self.url_admin = self.url
+
+        logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
+                                                                              self.tenant_id, self.tenant_name))
+        logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
+        logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
+
+        # initialize organization
+        if self.user is not None and self.passwd is not None and self.url:
+            self.init_organization()
+
+    def __getitem__(self, index):
+        if index == 'name':
+            return self.name
+        if index == 'tenant_id':
+            return self.tenant_id
+        if index == 'tenant_name':
+            return self.tenant_name
+        elif index == 'id':
+            return self.id
+        elif index == 'org_name':
+            return self.org_name
+        elif index == 'org_uuid':
+            return self.org_uuid
+        elif index == 'user':
+            return self.user
+        elif index == 'passwd':
+            return self.passwd
+        elif index == 'url':
+            return self.url
+        elif index == 'url_admin':
+            return self.url_admin
+        elif index == "config":
+            return self.config
+        else:
+            raise KeyError("Invalid key '{}'".format(index))
+
+    def __setitem__(self, index, value):
+        if index == 'name':
+            self.name = value
+        if index == 'tenant_id':
+            self.tenant_id = value
+        if index == 'tenant_name':
+            self.tenant_name = value
+        elif index == 'id':
+            self.id = value
+        elif index == 'org_name':
+            self.org_name = value
+        elif index == 'org_uuid':
+            self.org_uuid = value
+        elif index == 'user':
+            self.user = value
+        elif index == 'passwd':
+            self.passwd = value
+        elif index == 'url':
+            self.url = value
+        elif index == 'url_admin':
+            self.url_admin = value
+        else:
+            raise KeyError("Invalid key '{}'".format(index))
+
+    def connect_as_admin(self):
+        """ Method connect as pvdc admin user to vCloud director.
+            There are certain action that can be done only by provider vdc admin user.
+            Organization creation / provider network creation etc.
+
+            Returns:
+                The return client object that latter can be used to connect to vcloud director as admin for provider vdc
+        """
+        self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
+
+        try:
+            host = self.url
+            org = 'System'
+            client_as_admin = Client(host, verify_ssl_certs=False)
+            client_as_admin.set_highest_supported_version()
+            client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
+        except Exception as e:
+            raise vimconn.vimconnException(
+                  "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
+
+        return client_as_admin
+
+    def connect(self):
+        """ Method connect as normal user to vCloud director.
+
+            Returns:
+                The return client object that latter can be used to connect to vCloud director as admin for VDC
+        """
+        try:
+            self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
+                                                                                      self.user,
+                                                                                      self.org_name))
+            host = self.url
+            client = Client(host, verify_ssl_certs=False)
+            client.set_highest_supported_version()
+            client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
+        except:
+            raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
+                                                     "{} as user: {}".format(self.org_name, self.user))
+
+        return client
+
+    def init_organization(self):
+        """ Method initialize organization UUID and VDC parameters.
+
+            At bare minimum client must provide organization name that present in vCloud director and VDC.
+
+            The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
+            The Org - UUID will be initialized at the run time if data center present in vCloud director.
+
+            Returns:
+                The return vca object that letter can be used to connect to vcloud direct as admin
+        """
+        client = self.connect()
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD.")
+
+        self.client = client
+        try:
+            if self.org_uuid is None:
+                org_list = client.get_org_list()
+                for org in org_list.Org:
+                    # we set org UUID at the init phase but we can do it only when we have valid credential.
+                    if org.get('name') == self.org_name:
+                        self.org_uuid = org.get('href').split('/')[-1]
+                        self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
+                        break
+                else:
+                    raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
+
+                # if well good we require for org details
+                org_details_dict = self.get_org(org_uuid=self.org_uuid)
+
+                # we have two case if we want to initialize VDC ID or VDC name at run time
+                # tenant_name provided but no tenant id
+                if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
+                    vdcs_dict = org_details_dict['vdcs']
+                    for vdc in vdcs_dict:
+                        if vdcs_dict[vdc] == self.tenant_name:
+                            self.tenant_id = vdc
+                            self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
+                                                                                                    self.org_name))
+                            break
+                    else:
+                        raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
+                    # case two we have tenant_id but we don't have tenant name so we find and set it.
+                    if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
+                        vdcs_dict = org_details_dict['vdcs']
+                        for vdc in vdcs_dict:
+                            if vdc == self.tenant_id:
+                                self.tenant_name = vdcs_dict[vdc]
+                                self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
+                                                                                                        self.org_name))
+                                break
+                        else:
+                            raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
+            self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
+        except:
+            self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
+            self.logger.debug(traceback.format_exc())
+            self.org_uuid = None
+
+    def new_tenant(self, tenant_name=None, tenant_description=None):
+        """ Method adds a new tenant to VIM with this name.
+            This action requires access to create VDC action in vCloud director.
+
+            Args:
+                tenant_name is tenant_name to be created.
+                tenant_description not used for this call
+
+            Return:
+                returns the tenant identifier in UUID format.
+                If action is failed method will throw vimconn.vimconnException method
+            """
+        vdc_task = self.create_vdc(vdc_name=tenant_name)
+        if vdc_task is not None:
+            vdc_uuid, value = vdc_task.popitem()
+            self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
+            return vdc_uuid
+        else:
+            raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
+
+    def delete_tenant(self, tenant_id=None):
+        """ Delete a tenant from VIM
+             Args:
+                tenant_id is tenant_id to be deleted.
+
+            Return:
+                returns the tenant identifier in UUID format.
+                If action is failed method will throw exception
+        """
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+
+        if tenant_id is not None:
+            if vca._session:
+                #Get OrgVDC
+                url_list = [self.url, '/api/vdc/', tenant_id]
+                orgvdc_herf = ''.join(url_list)
+
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                                url=orgvdc_herf,
+                                                headers=headers)
+
+                if response.status_code != requests.codes.ok:
+                    self.logger.debug("delete_tenant():GET REST API call {} failed. "\
+                                      "Return status code {}".format(orgvdc_herf,
+                                                                     response.status_code))
+                    raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
+
+                lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+                namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+                #For python3
+                #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+                namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+                vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
+                vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
+
+                response = self.perform_request(req_type='DELETE',
+                                                url=vdc_remove_href,
+                                                headers=headers)
+
+                if response.status_code == 202:
+                    time.sleep(5)
+                    return tenant_id
+                else:
+                    self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
+                                      "Return status code {}".format(vdc_remove_href,
+                                                                     response.status_code))
+                    raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
+        else:
+            self.logger.debug("delete_tenant():Incorrect tenant ID  {}".format(tenant_id))
+            raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
+
+
+    def get_tenant_list(self, filter_dict={}):
+        """Obtain tenants of VIM
+        filter_dict can contain the following keys:
+            name: filter by tenant name
+            id: filter by tenant uuid/id
+            <other VIM specific>
+        Returns the tenant list of dictionaries:
+            [{'name':'<name>, 'id':'<id>, ...}, ...]
+
+        """
+        org_dict = self.get_org(self.org_uuid)
+        vdcs_dict = org_dict['vdcs']
+
+        vdclist = []
+        try:
+            for k in vdcs_dict:
+                entry = {'name': vdcs_dict[k], 'id': k}
+                # if caller didn't specify dictionary we return all tenants.
+                if filter_dict is not None and filter_dict:
+                    filtered_entry = entry.copy()
+                    filtered_dict = set(entry.keys()) - set(filter_dict)
+                    for unwanted_key in filtered_dict: del entry[unwanted_key]
+                    if filter_dict == entry:
+                        vdclist.append(filtered_entry)
+                else:
+                    vdclist.append(entry)
+        except:
+            self.logger.debug("Error in get_tenant_list()")
+            self.logger.debug(traceback.format_exc())
+            raise vimconn.vimconnException("Incorrect state. {}")
+
+        return vdclist
+
+    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
+        """Adds a tenant network to VIM
+        Params:
+            'net_name': name of the network
+            'net_type': one of:
+                'bridge': overlay isolated network
+                'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+            'ip_profile': is a dict containing the IP parameters of the network
+                'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                'dhcp_enabled': True or False
+                'dhcp_start_address': ip_schema, first IP to grant
+                'dhcp_count': number of IPs to grant.
+            'shared': if this network can be seen/use by other tenants/organization
+            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
+        Returns a tuple with the network identifier and created_items, or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+
+        self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
+                          .format(net_name, net_type, ip_profile, shared))
+
+        created_items = {}
+        isshared = 'false'
+        if shared:
+            isshared = 'true'
+
+# ############# Stub code for SRIOV #################
+#         if net_type == "data" or net_type == "ptp":
+#             if self.config.get('dv_switch_name') == None:
+#                  raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
+#             network_uuid = self.create_dvPort_group(net_name)
+
+        network_uuid = self.create_network(network_name=net_name, net_type=net_type,
+                                           ip_profile=ip_profile, isshared=isshared)
+        if network_uuid is not None:
+            return network_uuid, created_items
+        else:
+            raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
+
+    def get_vcd_network_list(self):
+        """ Method available organization for a logged in tenant
+
+            Returns:
+                The return vca object that letter can be used to connect to vcloud direct as admin
+        """
+
+        self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
+
+        if not self.tenant_name:
+            raise vimconn.vimconnConnectionException("Tenant name is empty.")
+
+        org, vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
+
+        vdc_uuid = vdc.get('id').split(":")[3]
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+        if response.status_code != 200:
+            self.logger.error("Failed to get vdc content")
+            raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+        else:
+            content = XmlElementTree.fromstring(response.content)
+
+        network_list = []
+        try:
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
+
+                            filter_dict = {}
+                            net_uuid = net_details.get('id').split(":")
+                            if len(net_uuid) != 4:
+                                continue
+                            else:
+                                net_uuid = net_uuid[3]
+                                # create dict entry
+                                self.logger.debug("get_vcd_network_list(): Adding network {} "
+                                                  "to a list vcd id {} network {}".format(net_uuid,
+                                                                                          vdc_uuid,
+                                                                                          net_details.get('name')))
+                                filter_dict["name"] = net_details.get('name')
+                                filter_dict["id"] = net_uuid
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_dict["shared"] = shared
+                                filter_dict["tenant_id"] = vdc_uuid
+                                if int(net_details.get('status')) == 1:
+                                    filter_dict["admin_state_up"] = True
+                                else:
+                                    filter_dict["admin_state_up"] = False
+                                filter_dict["status"] = "ACTIVE"
+                                filter_dict["type"] = "bridge"
+                                network_list.append(filter_dict)
+                                self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
+        except:
+            self.logger.debug("Error in get_vcd_network_list", exc_info=True)
+            pass
+
+        self.logger.debug("get_vcd_network_list returning {}".format(network_list))
+        return network_list
+
+    def get_network_list(self, filter_dict={}):
+        """Obtain tenant networks of VIM
+        Filter_dict can be:
+            name: network name  OR/AND
+            id: network uuid    OR/AND
+            shared: boolean     OR/AND
+            tenant_id: tenant   OR/AND
+            admin_state_up: boolean
+            status: 'ACTIVE'
+
+        [{key : value , key : value}]
+
+        Returns the network list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        """
+
+        self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
+
+        if not self.tenant_name:
+            raise vimconn.vimconnConnectionException("Tenant name is empty.")
+
+        org, vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
+
+        try:
+            vdcid = vdc.get('id').split(":")[3]
+
+            if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+            if response.status_code != 200:
+                self.logger.error("Failed to get vdc content")
+                raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+            else:
+                content = XmlElementTree.fromstring(response.content)
+
+            network_list = []
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
+
+                            filter_entry = {}
+                            net_uuid = net_details.get('id').split(":")
+                            if len(net_uuid) != 4:
+                                continue
+                            else:
+                                net_uuid = net_uuid[3]
+                                # create dict entry
+                                self.logger.debug("get_network_list(): Adding net {}"
+                                                  " to a list vcd id {} network {}".format(net_uuid,
+                                                                                           vdcid,
+                                                                                           net_details.get('name')))
+                                filter_entry["name"] = net_details.get('name')
+                                filter_entry["id"] = net_uuid
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_entry["shared"] = shared
+                                filter_entry["tenant_id"] = vdcid
+                                if int(net_details.get('status')) == 1:
+                                    filter_entry["admin_state_up"] = True
+                                else:
+                                    filter_entry["admin_state_up"] = False
+                                filter_entry["status"] = "ACTIVE"
+                                filter_entry["type"] = "bridge"
+                                filtered_entry = filter_entry.copy()
+
+                                if filter_dict is not None and filter_dict:
+                                    # we remove all the key : value we don't care and match only
+                                    # respected field
+                                    filtered_dict = set(filter_entry.keys()) - set(filter_dict)
+                                    for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
+                                    if filter_dict == filter_entry:
+                                        network_list.append(filtered_entry)
+                                else:
+                                    network_list.append(filtered_entry)
+        except Exception as e:
+            self.logger.debug("Error in get_network_list",exc_info=True)
+            if isinstance(e, vimconn.vimconnException):
+                raise
+            else:
+                raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
+
+        self.logger.debug("Returning {}".format(network_list))
+        return network_list
+
+    def get_network(self, net_id):
+        """Method obtains network details of net_id VIM network
+           Return a dict with  the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
+
+        try:
+            org, vdc = self.get_vdc_details()
+            vdc_id = vdc.get('id').split(":")[3]
+            if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                           url=vdc.get('href'),
+                                               headers=headers)
+            if response.status_code != 200:
+                self.logger.error("Failed to get vdc content")
+                raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+            else:
+                content = XmlElementTree.fromstring(response.content)
+
+            filter_dict = {}
+
+            for item in content:
+                if item.tag.split('}')[-1] == 'AvailableNetworks':
+                    for net in item:
+                        response = self.perform_request(req_type='GET',
+                                                   url=net.get('href'),
+                                                       headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("Failed to get network content")
+                            raise vimconn.vimconnNotFoundException("Failed to get network content")
+                        else:
+                            net_details = XmlElementTree.fromstring(response.content)
+
+                            vdc_network_id = net_details.get('id').split(":")
+                            if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
+                                filter_dict["name"] = net_details.get('name')
+                                filter_dict["id"] = vdc_network_id[3]
+                                if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                    shared = True
+                                else:
+                                    shared = False
+                                filter_dict["shared"] = shared
+                                filter_dict["tenant_id"] = vdc_id
+                                if int(net_details.get('status')) == 1:
+                                    filter_dict["admin_state_up"] = True
+                                else:
+                                    filter_dict["admin_state_up"] = False
+                                filter_dict["status"] = "ACTIVE"
+                                filter_dict["type"] = "bridge"
+                                self.logger.debug("Returning {}".format(filter_dict))
+                                return filter_dict
+                    else:
+                        raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
+        except Exception as e:
+            self.logger.debug("Error in get_network")
+            self.logger.debug(traceback.format_exc())
+            if isinstance(e, vimconn.vimconnException):
+                raise
+            else:
+                raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
+
+        return filter_dict
+
+    def delete_network(self, net_id, created_items=None):
+        """
+        Removes a tenant network from VIM and its associated elements
+        :param net_id: VIM identifier of the network, provided by method new_network
+        :param created_items: dictionary with extra items to be deleted. provided by method new_network
+        Returns the network identifier or raises an exception upon error or when network is not found
+        """
+
+        # ############# Stub code for SRIOV #################
+#         dvport_group = self.get_dvport_group(net_id)
+#         if dvport_group:
+#             #delete portgroup
+#             status = self.destroy_dvport_group(net_id)
+#             if status:
+#                 # Remove vlanID from persistent info
+#                 if net_id in self.persistent_info["used_vlanIDs"]:
+#                     del self.persistent_info["used_vlanIDs"][net_id]
+#
+#                 return net_id
+
+        vcd_network = self.get_vcd_network(network_uuid=net_id)
+        if vcd_network is not None and vcd_network:
+            if self.delete_network_action(network_uuid=net_id):
+                return net_id
+        else:
+            raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
+
+    def refresh_nets_status(self, net_list):
+        """Get the status of the networks
+           Params: the list of network identifiers
+           Returns a dictionary with:
+                net_id:         #VIM id of this network
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, INACTIVE, DOWN (admin down),
+                                #  BUILD (on building process)
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+
+        """
+
+        dict_entry = {}
+        try:
+            for net in net_list:
+                errormsg = ''
+                vcd_network = self.get_vcd_network(network_uuid=net)
+                if vcd_network is not None and vcd_network:
+                    if vcd_network['status'] == '1':
+                        status = 'ACTIVE'
+                    else:
+                        status = 'DOWN'
+                else:
+                    status = 'DELETED'
+                    errormsg = 'Network not found.'
+
+                dict_entry[net] = {'status': status, 'error_msg': errormsg,
+                                   'vim_info': yaml.safe_dump(vcd_network)}
+        except:
+            self.logger.debug("Error in refresh_nets_status")
+            self.logger.debug(traceback.format_exc())
+
+        return dict_entry
+
+    def get_flavor(self, flavor_id):
+        """Obtain flavor details from the  VIM
+            Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
+        """
+        if flavor_id not in vimconnector.flavorlist:
+            raise vimconn.vimconnNotFoundException("Flavor not found.")
+        return vimconnector.flavorlist[flavor_id]
+
+    def new_flavor(self, flavor_data):
+        """Adds a tenant flavor to VIM
+            flavor_data contains a dictionary with information, keys:
+                name: flavor name
+                ram: memory (cloud type) in MBytes
+                vpcus: cpus (cloud type)
+                extended: EPA parameters
+                  - numas: #items requested in same NUMA
+                        memory: number of 1G huge pages memory
+                        paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+                        interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+                          - name: interface name
+                            dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
+                            bandwidth: X Gbps; requested guarantee bandwidth
+                            vpci: requested virtual PCI address
+                disk: disk size
+                is_public:
+                 #TODO to concrete
+        Returns the flavor identifier"""
+
+        # generate a new uuid put to internal dict and return it.
+        self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
+        new_flavor=flavor_data
+        ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
+        cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
+        disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
+
+        if not isinstance(ram, int):
+            raise vimconn.vimconnException("Non-integer value for ram")
+        elif not isinstance(cpu, int):
+            raise vimconn.vimconnException("Non-integer value for cpu")
+        elif not isinstance(disk, int):
+            raise vimconn.vimconnException("Non-integer value for disk")
+
+        extended_flv = flavor_data.get("extended")
+        if extended_flv:
+            numas=extended_flv.get("numas")
+            if numas:
+                for numa in numas:
+                    #overwrite ram and vcpus
+                    if 'memory' in numa:
+                        ram = numa['memory']*1024
+                    if 'paired-threads' in numa:
+                        cpu = numa['paired-threads']*2
+                    elif 'cores' in numa:
+                        cpu = numa['cores']
+                    elif 'threads' in numa:
+                        cpu = numa['threads']
+
+        new_flavor[FLAVOR_RAM_KEY] = ram
+        new_flavor[FLAVOR_VCPUS_KEY] = cpu
+        new_flavor[FLAVOR_DISK_KEY] = disk
+        # generate a new uuid put to internal dict and return it.
+        flavor_id = uuid.uuid4()
+        vimconnector.flavorlist[str(flavor_id)] = new_flavor
+        self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
+
+        return str(flavor_id)
+
+    def delete_flavor(self, flavor_id):
+        """Deletes a tenant flavor from VIM identify by its id
+
+           Returns the used id or raise an exception
+        """
+        if flavor_id not in vimconnector.flavorlist:
+            raise vimconn.vimconnNotFoundException("Flavor not found.")
+
+        vimconnector.flavorlist.pop(flavor_id, None)
+        return flavor_id
+
+    def new_image(self, image_dict):
+        """
+        Adds a tenant image to VIM
+        Returns:
+            200, image-id        if the image is created
+            <0, message          if there is an error
+        """
+
+        return self.get_image_id_from_path(image_dict['location'])
+
+    def delete_image(self, image_id):
+        """
+            Deletes a tenant image from VIM
+            Args:
+                image_id is ID of Image to be deleted
+            Return:
+                returns the image identifier in UUID format or raises an exception on error
+        """
+        conn = self.connect_as_admin()
+        if not conn:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+        # Get Catalog details
+        url_list = [self.url, '/api/catalog/', image_id]
+        catalog_herf = ''.join(url_list)
+
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                  'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
+
+        response = self.perform_request(req_type='GET',
+                                        url=catalog_herf,
+                                        headers=headers)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.debug("delete_image():GET REST API call {} failed. "\
+                              "Return status code {}".format(catalog_herf,
+                                                             response.status_code))
+            raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
+
+        lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+        namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+        #For python3
+        #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+        namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+        catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
+        catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
+        for catalogItem in catalogItems:
+            catalogItem_href = catalogItem.attrib['href']
+
+            response = self.perform_request(req_type='GET',
+                                        url=catalogItem_href,
+                                        headers=headers)
+
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("delete_image():GET REST API call {} failed. "\
+                                  "Return status code {}".format(catalog_herf,
+                                                                 response.status_code))
+                raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
+                                                                                    catalogItem,
+                                                                                    image_id))
+
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+            catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
+
+            #Remove catalogItem
+            response = self.perform_request(req_type='DELETE',
+                                        url=catalogitem_remove_href,
+                                        headers=headers)
+            if response.status_code == requests.codes.no_content:
+                self.logger.debug("Deleted Catalog item {}".format(catalogItem))
+            else:
+                raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
+
+        #Remove catalog
+        url_list = [self.url, '/api/admin/catalog/', image_id]
+        catalog_remove_herf = ''.join(url_list)
+        response = self.perform_request(req_type='DELETE',
+                                        url=catalog_remove_herf,
+                                        headers=headers)
+
+        if response.status_code == requests.codes.no_content:
+            self.logger.debug("Deleted Catalog {}".format(image_id))
+            return image_id
+        else:
+            raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
+
+
+    def catalog_exists(self, catalog_name, catalogs):
+        """
+
+        :param catalog_name:
+        :param catalogs:
+        :return:
+        """
+        for catalog in catalogs:
+            if catalog['name'] == catalog_name:
+                return catalog['id']
+
+    def create_vimcatalog(self, vca=None, catalog_name=None):
+        """ Create new catalog entry in vCloud director.
+
+            Args
+                vca:  vCloud director.
+                catalog_name catalog that client wish to create.   Note no validation done for a name.
+                Client must make sure that provide valid string representation.
+
+             Returns catalog id if catalog created else None.
+
+        """
+        try:
+            lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
+            if lxml_catalog_element:
+                id_attr_value = lxml_catalog_element.get('id')  # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
+                return id_attr_value.split(':')[-1]
+            catalogs = vca.list_catalogs()
+        except Exception as ex:
+            self.logger.error(
+                'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
+            raise
+        return self.catalog_exists(catalog_name, catalogs)
+
+    # noinspection PyIncorrectDocstring
+    def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
+                   description='', progress=False, chunk_bytes=128 * 1024):
+        """
+        Uploads a OVF file to a vCloud catalog
+
+        :param chunk_bytes:
+        :param progress:
+        :param description:
+        :param image_name:
+        :param vca:
+        :param catalog_name: (str): The name of the catalog to upload the media.
+        :param media_file_name: (str): The name of the local media file to upload.
+        :return: (bool) True if the media file was successfully uploaded, false otherwise.
+        """
+        os.path.isfile(media_file_name)
+        statinfo = os.stat(media_file_name)
+
+        #  find a catalog entry where we upload OVF.
+        #  create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
+        #  status change.
+        #  if VCD can parse OVF we upload VMDK file
+        try:
+            for catalog in vca.list_catalogs():
+                if catalog_name != catalog['name']:
+                    continue
+                catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
+                data = """
+                <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
+                """.format(catalog_name, description)
+
+                if self.client:
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
+
+                response = self.perform_request(req_type='POST',
+                                                url=catalog_href,
+                                                headers=headers,
+                                                data=data)
+
+                if response.status_code == requests.codes.created:
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if
+                              child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    href = entity.get('href')
+                    template = href
+
+                    response = self.perform_request(req_type='GET',
+                                                    url=href,
+                                                    headers=headers)
+
+                    if response.status_code == requests.codes.ok:
+                        headers['Content-Type'] = 'Content-Type text/xml'
+                        result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
+                        if result:
+                            transfer_href = result.group(1)
+
+                        response = self.perform_request(req_type='PUT',
+                                                    url=transfer_href,
+                                                    headers=headers,
+                                                    data=open(media_file_name, 'rb'))
+                        if response.status_code != requests.codes.ok:
+                            self.logger.debug(
+                                "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
+                                                                                                      media_file_name))
+                            return False
+
+                    # TODO fix this with aync block
+                    time.sleep(5)
+
+                    self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
+
+                    # uploading VMDK file
+                    # check status of OVF upload and upload remaining files.
+                    response = self.perform_request(req_type='GET',
+                                                    url=template,
+                                                    headers=headers)
+
+                    if response.status_code == requests.codes.ok:
+                        result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
+                        if result:
+                            link_href = result.group(1)
+                        # we skip ovf since it already uploaded.
+                        if 'ovf' in link_href:
+                            continue
+                        # The OVF file and VMDK must be in a same directory
+                        head, tail = os.path.split(media_file_name)
+                        file_vmdk = head + '/' + link_href.split("/")[-1]
+                        if not os.path.isfile(file_vmdk):
+                            return False
+                        statinfo = os.stat(file_vmdk)
+                        if statinfo.st_size == 0:
+                            return False
+                        hrefvmdk = link_href
+
+                        if progress:
+                            widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
+                                           FileTransferSpeed()]
+                            progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
+
+                        bytes_transferred = 0
+                        f = open(file_vmdk, 'rb')
+                        while bytes_transferred < statinfo.st_size:
+                            my_bytes = f.read(chunk_bytes)
+                            if len(my_bytes) <= chunk_bytes:
+                                headers['Content-Range'] = 'bytes {}-{}/{}'.format(
+                                    bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
+                                headers['Content-Length'] = str(len(my_bytes))
+                                response = requests.put(url=hrefvmdk,
+                                                         headers=headers,
+                                                         data=my_bytes,
+                                                         verify=False)
+                                if response.status_code == requests.codes.ok:
+                                    bytes_transferred += len(my_bytes)
+                                    if progress:
+                                        progress_bar.update(bytes_transferred)
+                                else:
+                                    self.logger.debug(
+                                        'file upload failed with error: [{}] {}'.format(response.status_code,
+                                                                                        response.content))
+
+                                    f.close()
+                                    return False
+                        f.close()
+                        if progress:
+                            progress_bar.finish()
+                            time.sleep(10)
+                    return True
+                else:
+                    self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
+                                      format(catalog_name, media_file_name))
+                    return False
+        except Exception as exp:
+            self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                .format(catalog_name,media_file_name, exp))
+            raise vimconn.vimconnException(
+                "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                .format(catalog_name,media_file_name, exp))
+
+        self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
+        return False
+
+    def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
+        """Upload media file"""
+        # TODO add named parameters for readability
+
+        return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
+                               media_file_name=medial_file_name, description='medial_file_name', progress=progress)
+
+    def validate_uuid4(self, uuid_string=None):
+        """  Method validate correct format of UUID.
+
+        Return: true if string represent valid uuid
+        """
+        try:
+            val = uuid.UUID(uuid_string, version=4)
+        except ValueError:
+            return False
+        return True
+
+    def get_catalogid(self, catalog_name=None, catalogs=None):
+        """  Method check catalog and return catalog ID in UUID format.
+
+        Args
+            catalog_name: catalog name as string
+            catalogs:  list of catalogs.
+
+        Return: catalogs uuid
+        """
+
+        for catalog in catalogs:
+            if catalog['name'] == catalog_name:
+                catalog_id = catalog['id']
+                return catalog_id
+        return None
+
+    def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
+        """  Method check catalog and return catalog name lookup done by catalog UUID.
+
+        Args
+            catalog_name: catalog name as string
+            catalogs:  list of catalogs.
+
+        Return: catalogs name or None
+        """
+
+        if not self.validate_uuid4(uuid_string=catalog_uuid):
+            return None
+
+        for catalog in catalogs:
+            catalog_id = catalog.get('id')
+            if catalog_id == catalog_uuid:
+                return catalog.get('name')
+        return None
+
+    def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
+        """  Method check catalog and return catalog name lookup done by catalog UUID.
+
+        Args
+            catalog_name: catalog name as string
+            catalogs:  list of catalogs.
+
+        Return: catalogs name or None
+        """
+
+        if not self.validate_uuid4(uuid_string=catalog_uuid):
+            return None
+
+        for catalog in catalogs:
+            catalog_id = catalog.get('id')
+            if catalog_id == catalog_uuid:
+                return catalog
+        return None
+
+    def get_image_id_from_path(self, path=None, progress=False):
+        """  Method upload OVF image to vCloud director.
+
+        Each OVF image represented as single catalog entry in vcloud director.
+        The method check for existing catalog entry.  The check done by file name without file extension.
+
+        if given catalog name already present method will respond with existing catalog uuid otherwise
+        it will create new catalog entry and upload OVF file to newly created catalog.
+
+        If method can't create catalog entry or upload a file it will throw exception.
+
+        Method accept boolean flag progress that will output progress bar. It useful method
+        for standalone upload use case. In case to test large file upload.
+
+        Args
+            path: - valid path to OVF file.
+            progress - boolean progress bar show progress bar.
+
+        Return: if image uploaded correct method will provide image catalog UUID.
+        """
+
+        if not path:
+            raise vimconn.vimconnException("Image path can't be None.")
+
+        if not os.path.isfile(path):
+            raise vimconn.vimconnException("Can't read file. File not found.")
+
+        if not os.access(path, os.R_OK):
+            raise vimconn.vimconnException("Can't read file. Check file permission to read.")
+
+        self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
+
+        dirpath, filename = os.path.split(path)
+        flname, file_extension = os.path.splitext(path)
+        if file_extension != '.ovf':
+            self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
+            raise vimconn.vimconnException("Wrong container.  vCloud director supports only OVF.")
+
+        catalog_name = os.path.splitext(filename)[0]
+        catalog_md5_name = hashlib.md5(path).hexdigest()
+        self.logger.debug("File name {} Catalog Name {} file path {} "
+                          "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
+
+        try:
+            org,vdc = self.get_vdc_details()
+            catalogs = org.list_catalogs()
+        except Exception as exp:
+            self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
+            raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
+
+        if len(catalogs) == 0:
+            self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
+            if self.create_vimcatalog(org, catalog_md5_name) is None:
+                raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
+
+            result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
+                                          media_name=filename, medial_file_name=path, progress=progress)
+            if not result:
+                raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
+            return self.get_catalogid(catalog_name, catalogs)
+        else:
+            for catalog in catalogs:
+                # search for existing catalog if we find same name we return ID
+                # TODO optimize this
+                if catalog['name'] == catalog_md5_name:
+                    self.logger.debug("Found existing catalog entry for {} "
+                                      "catalog id {}".format(catalog_name,
+                                                             self.get_catalogid(catalog_md5_name, catalogs)))
+                    return self.get_catalogid(catalog_md5_name, catalogs)
+
+        # if we didn't find existing catalog we create a new one and upload image.
+        self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
+        if self.create_vimcatalog(org, catalog_md5_name) is None:
+            raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
+
+        result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
+                                      media_name=filename, medial_file_name=path, progress=progress)
+        if not result:
+            raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
+
+        return self.get_catalogid(catalog_md5_name, org.list_catalogs())
+
+    def get_image_list(self, filter_dict={}):
+        '''Obtain tenant images from VIM
+        Filter_dict can be:
+            name: image name
+            id: image uuid
+            checksum: image checksum
+            location: image path
+        Returns the image list of dictionaries:
+            [{<the fields at Filter_dict plus some VIM specific>}, ...]
+            List can be empty
+        '''
+
+        try:
+            org, vdc = self.get_vdc_details()
+            image_list = []
+            catalogs = org.list_catalogs()
+            if len(catalogs) == 0:
+                return image_list
+            else:
+                for catalog in catalogs:
+                    catalog_uuid = catalog.get('id')
+                    name = catalog.get('name')
+                    filtered_dict = {}
+                    if filter_dict.get("name") and filter_dict["name"] != name:
+                        continue
+                    if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
+                        continue
+                    filtered_dict ["name"] = name
+                    filtered_dict ["id"] = catalog_uuid
+                    image_list.append(filtered_dict)
+
+                self.logger.debug("List of already created catalog items: {}".format(image_list))
+                return image_list
+        except Exception as exp:
+            raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
+
+    def get_vappid(self, vdc=None, vapp_name=None):
+        """ Method takes vdc object and vApp name and returns vapp uuid or None
+
+        Args:
+            vdc: The VDC object.
+            vapp_name: is application vappp name identifier
+
+        Returns:
+                The return vApp name otherwise None
+        """
+        if vdc is None or vapp_name is None:
+            return None
+        # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
+        try:
+            refs = filter(lambda ref: ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
+                          vdc.ResourceEntities.ResourceEntity)
+            #For python3
+            #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
+            #         if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
+            if len(refs) == 1:
+                return refs[0].href.split("vapp")[1][1:]
+        except Exception as e:
+            self.logger.exception(e)
+            return False
+        return None
+
+    def check_vapp(self, vdc=None, vapp_uuid=None):
+        """ Method Method returns True or False if vapp deployed in vCloud director
+
+            Args:
+                vca: Connector to VCA
+                vdc: The VDC object.
+                vappid: vappid is application identifier
+
+            Returns:
+                The return True if vApp deployed
+                :param vdc:
+                :param vapp_uuid:
+        """
+        try:
+            refs = filter(lambda ref:
+                          ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml',
+                          vdc.ResourceEntities.ResourceEntity)
+            #For python3
+            #refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
+            #         if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
+            for ref in refs:
+                vappid = ref.href.split("vapp")[1][1:]
+                # find vapp with respected vapp uuid
+                if vappid == vapp_uuid:
+                    return True
+        except Exception as e:
+            self.logger.exception(e)
+            return False
+        return False
+
+    def get_namebyvappid(self, vapp_uuid=None):
+        """Method returns vApp name from vCD and lookup done by vapp_id.
+
+        Args:
+            vapp_uuid: vappid is application identifier
+
+        Returns:
+            The return vApp name otherwise None
+        """
+        try:
+            if self.client and vapp_uuid:
+                vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+                response = self.perform_request(req_type='GET',
+                                                url=vapp_call,
+                                                headers=headers)
+                #Retry login if session expired & retry sending request
+                if response.status_code == 403:
+                    response = self.retry_rest('GET', vapp_call)
+
+                tree = XmlElementTree.fromstring(response.content)
+                return tree.attrib['name']
+        except Exception as e:
+            self.logger.exception(e)
+            return None
+        return None
+
+    def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
+                       cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
+        """Adds a VM instance to VIM
+        Params:
+            'start': (boolean) indicates if VM must start or created in pause mode.
+            'image_id','flavor_id': image and flavor VIM id to use for the VM
+            'net_list': list of interfaces, each one is a dictionary with:
+                'name': (optional) name for the interface.
+                'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
+                'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                'mac_address': (optional) mac address to assign to this interface
+                #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
+                    the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
+                'type': (mandatory) can be one of:
+                    'virtual', in this case always connected to a network of type 'net_type=bridge'
+                     'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                           can created unconnected
+                     'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                     'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                            are allocated on the same physical NIC
+                'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
+                                or True, it must apply the default VIM behaviour
+                After execution the method will add the key:
+                'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
+                        interface. 'net_list' is modified
+            'cloud_config': (optional) dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+            'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                'size': (mandatory) string with the size of the disk in GB
+            availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+            availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                availability_zone_index is None
+        Returns a tuple with the instance identifier and created_items or raises an exception on error
+            created_items can be None or a dictionary where this method can include key-values that will be passed to
+            the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+            Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+            as not present.
+        """
+        self.logger.info("Creating new instance for entry {}".format(name))
+        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
+                          "availability_zone_index {} availability_zone_list {}"\
+                          .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
+                                  availability_zone_index, availability_zone_list))
+
+        #new vm name = vmname + tenant_id + uuid
+        new_vm_name = [name, '-', str(uuid.uuid4())]
+        vmname_andid = ''.join(new_vm_name)
+
+        for net in net_list:
+            if net['type'] == "PCI-PASSTHROUGH":
+                raise vimconn.vimconnNotSupportedException(
+                      "Current vCD version does not support type : {}".format(net['type']))
+
+        if len(net_list) > 10:
+            raise vimconn.vimconnNotSupportedException(
+                      "The VM hardware versions 7 and above support upto 10 NICs only")
+
+        # if vm already deployed we return existing uuid
+        # we check for presence of VDC, Catalog entry and Flavor.
+        org, vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnNotFoundException(
+                "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
+        catalogs = org.list_catalogs()
+        if catalogs is None:
+            #Retry once, if failed by refreshing token
+            self.get_token()
+            org = Org(self.client, resource=self.client.get_org())
+            catalogs = org.list_catalogs()
+        if catalogs is None:
+            raise vimconn.vimconnNotFoundException(
+                "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
+
+        catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
+        if catalog_hash_name:
+            self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
+        else:
+            raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
+                                                   "(Failed retrieve catalog information {})".format(name, image_id))
+
+        # Set vCPU and Memory based on flavor.
+        vm_cpus = None
+        vm_memory = None
+        vm_disk = None
+        numas = None
+
+        if flavor_id is not None:
+            if flavor_id not in vimconnector.flavorlist:
+                raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
+                                                       "Failed retrieve flavor information "
+                                                       "flavor id {}".format(name, flavor_id))
+            else:
+                try:
+                    flavor = vimconnector.flavorlist[flavor_id]
+                    vm_cpus = flavor[FLAVOR_VCPUS_KEY]
+                    vm_memory = flavor[FLAVOR_RAM_KEY]
+                    vm_disk = flavor[FLAVOR_DISK_KEY]
+                    extended = flavor.get("extended", None)
+                    if extended:
+                        numas=extended.get("numas", None)
+
+                except Exception as exp:
+                    raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
+
+        # image upload creates template name as catalog name space Template.
+        templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
+        power_on = 'false'
+        if start:
+            power_on = 'true'
+
+        # client must provide at least one entry in net_list if not we report error
+        #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
+        #If no mgmt, then the 1st NN in netlist is considered as primary net. 
+        primary_net = None
+        primary_netname = None
+        primary_net_href = None
+        network_mode = 'bridged'
+        if net_list is not None and len(net_list) > 0:
+            for net in net_list:
+                if 'use' in net and net['use'] == 'mgmt' and not primary_net:
+                    primary_net = net
+            if primary_net is None:
+                primary_net = net_list[0]
+
+            try:
+                primary_net_id = primary_net['net_id']
+                url_list = [self.url, '/api/network/', primary_net_id]
+                primary_net_href = ''.join(url_list) 
+                network_dict = self.get_vcd_network(network_uuid=primary_net_id)
+                if 'name' in network_dict:
+                    primary_netname = network_dict['name']
+
+            except KeyError:
+                raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
+        else:
+            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
+
+        # use: 'data', 'bridge', 'mgmt'
+        # create vApp.  Set vcpu and ram based on flavor id.
+        try:
+            vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
+            if not vdc_obj:
+                raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
+
+            for retry in (1,2):
+                items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
+                catalog_items = [items.attrib]
+
+                if len(catalog_items) == 1:
+                    if self.client:
+                        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+                    response = self.perform_request(req_type='GET',
+                                                url=catalog_items[0].get('href'),
+                                                headers=headers)
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    vapp_tempalte_href = entity.get("href")
+
+                response = self.perform_request(req_type='GET',
+                                                    url=vapp_tempalte_href,
+                                                    headers=headers)
+                if response.status_code != requests.codes.ok:
+                    self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
+                                                                                           response.status_code))
+                else:
+                    result = (response.content).replace("\n"," ")
+
+                vapp_template_tree = XmlElementTree.fromstring(response.content)
+                children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
+                vm_element = [child for child in children_element if 'Vm' in child.tag][0]
+                vm_name = vm_element.get('name')
+                vm_id = vm_element.get('id')
+                vm_href = vm_element.get('href')
+
+                cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
+
+                headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
+                vdc_id = vdc.get('id').split(':')[-1]
+                instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
+                                                                                                vdc_id)
+                data = """<?xml version="1.0" encoding="UTF-8"?>
+                <InstantiateVAppTemplateParams
+                xmlns="http://www.vmware.com/vcloud/v1.5"
+                name="{}"
+                deploy="false"
+                powerOn="false"
+                xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+                xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+                <Description>Vapp instantiation</Description>
+                <InstantiationParams>
+                     <NetworkConfigSection>
+                         <ovf:Info>Configuration parameters for logical networks</ovf:Info>
+                         <NetworkConfig networkName="{}">
+                             <Configuration>
+                                 <ParentNetwork href="{}" />
+                                 <FenceMode>bridged</FenceMode>
+                             </Configuration>
+                         </NetworkConfig>
+                     </NetworkConfigSection>
+                <LeaseSettingsSection
+                type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
+                <ovf:Info>Lease Settings</ovf:Info>
+                <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
+                <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
+                </LeaseSettingsSection>
+                </InstantiationParams>
+                <Source href="{}"/>
+                <SourcedItem>
+                <Source href="{}" id="{}" name="{}"
+                type="application/vnd.vmware.vcloud.vm+xml"/>
+                <VmGeneralParams>
+                    <NeedsCustomization>false</NeedsCustomization>
+                </VmGeneralParams>
+                <InstantiationParams>
+                      <NetworkConnectionSection>
+                      <ovf:Info>Specifies the available VM network connections</ovf:Info>
+                      <NetworkConnection network="{}">
+                      <NetworkConnectionIndex>0</NetworkConnectionIndex>
+                      <IsConnected>true</IsConnected>
+                      <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
+                      </NetworkConnection>
+                      </NetworkConnectionSection><ovf:VirtualHardwareSection>
+                      <ovf:Info>Virtual hardware requirements</ovf:Info>
+                      <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
+                      xmlns:vmw="http://www.vmware.com/schema/ovf">
+                      <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+                      <rasd:Description>Number of Virtual CPUs</rasd:Description>
+                      <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
+                      <rasd:InstanceID>4</rasd:InstanceID>
+                      <rasd:Reservation>0</rasd:Reservation>
+                      <rasd:ResourceType>3</rasd:ResourceType>
+                      <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
+                      <rasd:Weight>0</rasd:Weight>
+                      <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
+                      </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
+                      <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+                      <rasd:Description>Memory Size</rasd:Description>
+                      <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
+                      <rasd:InstanceID>5</rasd:InstanceID>
+                      <rasd:Reservation>0</rasd:Reservation>
+                      <rasd:ResourceType>4</rasd:ResourceType>
+                      <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
+                      <rasd:Weight>0</rasd:Weight>
+                      </ovf:Item>
+                </ovf:VirtualHardwareSection>
+                </InstantiationParams>
+                </SourcedItem>
+                <AllEULAsAccepted>false</AllEULAsAccepted>
+                </InstantiateVAppTemplateParams>""".format(vmname_andid,
+                                                        primary_netname,
+                                                        primary_net_href,
+                                                     vapp_tempalte_href,
+                                                                vm_href,
+                                                                  vm_id,
+                                                                vm_name,
+                                                        primary_netname,
+                                                               cpu=cpus,
+                                                             core=cores,
+                                                       memory=memory_mb)
+
+                response = self.perform_request(req_type='POST',
+                                                url=instantiate_vapp_href,
+                                                headers=headers,
+                                                data=data)
+
+                if response.status_code != 201:
+                    self.logger.error("REST call {} failed reason : {}"\
+                         "status code : {}".format(instantiate_vapp_href,
+                                                        response.content,
+                                                   response.status_code))
+                    raise vimconn.vimconnException("new_vminstance(): Failed to create"\
+                                                        "vAapp {}".format(vmname_andid))
+                else:
+                    vapptask = self.get_task_from_response(response.content)
+
+                if vapptask is None and retry==1:
+                    self.get_token() # Retry getting token
+                    continue
+                else:
+                    break
+
+            if vapptask is None or vapptask is False:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+
+            # wait for task to complete
+            result = self.client.get_task_monitor().wait_for_success(task=vapptask)
+
+            if result.get('status') == 'success':
+                self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
+            else:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+
+        except Exception as exp:
+            raise vimconn.vimconnUnexpectedResponse(
+                "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
+
+        # we should have now vapp in undeployed state.
+        try:
+            vdc_obj = VDC(self.client, href=vdc.get('href'))
+            vapp_resource = vdc_obj.get_vapp(vmname_andid)
+            vapp_uuid = vapp_resource.get('id').split(':')[-1]
+            vapp = VApp(self.client, resource=vapp_resource)
+
+        except Exception as exp:
+            raise vimconn.vimconnUnexpectedResponse(
+                    "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                    .format(vmname_andid, exp))
+
+        if vapp_uuid is None:
+            raise vimconn.vimconnUnexpectedResponse(
+                "new_vminstance(): Failed to retrieve vApp {} after creation".format(
+                                                                            vmname_andid))
+
+        #Add PCI passthrough/SRIOV configrations
+        vm_obj = None
+        pci_devices_info = []
+        reserve_memory = False
+
+        for net in net_list:
+            if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
+                pci_devices_info.append(net)
+            elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
+                reserve_memory = True
+
+        #Add PCI
+        if len(pci_devices_info) > 0:
+            self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
+                                                                        vmname_andid ))
+            PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
+                                                                            pci_devices_info,
+                                                                            vmname_andid)
+            if PCI_devices_status:
+                self.logger.info("Added PCI devives {} to VM {}".format(
+                                                            pci_devices_info,
+                                                            vmname_andid)
+                                 )
+                reserve_memory = True
+            else:
+                self.logger.info("Fail to add PCI devives {} to VM {}".format(
+                                                            pci_devices_info,
+                                                            vmname_andid)
+                                 )
+
+        # Modify vm disk
+        if vm_disk:
+            #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
+            result = self.modify_vm_disk(vapp_uuid, vm_disk)
+            if result :
+                self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
+
+        #Add new or existing disks to vApp
+        if disk_list:
+            added_existing_disk = False
+            for disk in disk_list:
+                if 'device_type' in disk and disk['device_type'] == 'cdrom':
+                    image_id = disk['image_id']
+                    # Adding CD-ROM to VM
+                    # will revisit code once specification ready to support this feature
+                    self.insert_media_to_vm(vapp, image_id)
+                elif "image_id" in disk and disk["image_id"] is not None:
+                    self.logger.debug("Adding existing disk from image {} to vm {} ".format(
+                                                                    disk["image_id"] , vapp_uuid))
+                    self.add_existing_disk(catalogs=catalogs,
+                                           image_id=disk["image_id"],
+                                           size = disk["size"],
+                                           template_name=templateName,
+                                           vapp_uuid=vapp_uuid
+                                           )
+                    added_existing_disk = True
+                else:
+                    #Wait till added existing disk gets reflected into vCD database/API
+                    if added_existing_disk:
+                        time.sleep(5)
+                        added_existing_disk = False
+                    self.add_new_disk(vapp_uuid, disk['size'])
+
+        if numas:
+            # Assigning numa affinity setting
+            for numa in numas:
+                if 'paired-threads-id' in numa:
+                    paired_threads_id = numa['paired-threads-id']
+                    self.set_numa_affinity(vapp_uuid, paired_threads_id)
+
+        # add NICs & connect to networks in netlist
+        try:
+            vdc_obj = VDC(self.client, href=vdc.get('href'))
+            vapp_resource = vdc_obj.get_vapp(vmname_andid)
+            vapp = VApp(self.client, resource=vapp_resource)
+            vapp_id = vapp_resource.get('id').split(':')[-1]
+
+            self.logger.info("Removing primary NIC: ")
+            # First remove all NICs so that NIC properties can be adjusted as needed
+            self.remove_primary_network_adapter_from_all_vms(vapp)
+
+            self.logger.info("Request to connect VM to a network: {}".format(net_list))
+            primary_nic_index = 0
+            nicIndex = 0
+            for net in net_list:
+                # openmano uses network id in UUID format.
+                # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
+                # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
+                #   'vpci': '0000:00:11.0', 'name': 'eth0'}]
+
+                if 'net_id' not in net:
+                    continue
+
+                #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
+                #Same will be returned in refresh_vms_status() as vim_interface_id
+                net['vim_id'] = net['net_id']  # Provide the same VIM identifier as the VIM network
+
+                interface_net_id = net['net_id']
+                interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
+                interface_network_mode = net['use']
+
+                if interface_network_mode == 'mgmt':
+                    primary_nic_index = nicIndex
+
+                """- POOL (A static IP address is allocated automatically from a pool of addresses.)
+                                  - DHCP (The IP address is obtained from a DHCP service.)
+                                  - MANUAL (The IP address is assigned manually in the IpAddress element.)
+                                  - NONE (No IP addressing mode specified.)"""
+
+                if primary_netname is not None:
+                    self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
+                    nets = filter(lambda n: n.get('name') == interface_net_name, self.get_network_list())
+                    #For python3
+                    #nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
+                    if len(nets) == 1:
+                        self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
+
+                        if interface_net_name != primary_netname:
+                            # connect network to VM - with all DHCP by default
+                            self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
+                            self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
+
+                        type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
+                        nic_type = 'VMXNET3'
+                        if 'type' in net and net['type'] not in type_list:
+                            # fetching nic type from vnf
+                            if 'model' in net:
+                                if net['model'] is not None:
+                                    if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
+                                        nic_type = 'VMXNET3'
+                                else:
+                                    nic_type = net['model']
+
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                          "to a network {}".format(nets[0].get('name')))
+                                self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net,
+                                                                nic_type=nic_type)
+                            else:
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                         "to a network {}".format(nets[0].get('name')))
+                                if net['type'] in ['SR-IOV', 'VF']:
+                                    nic_type = net['type']
+                                self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net,
+                                                                nic_type=nic_type)
+                nicIndex += 1
+
+            # cloud-init for ssh-key injection
+            if cloud_config:
+                # Create a catalog which will be carrying the config drive ISO
+                # This catalog is deleted during vApp deletion. The catalog name carries
+                # vApp UUID and thats how it gets identified during its deletion.
+                config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
+                self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
+                    config_drive_catalog_name))
+                config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
+                if config_drive_catalog_id is None:
+                    error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
+                                "ISO".format(config_drive_catalog_name)
+                    raise Exception(error_msg)
+
+                # Create config-drive ISO
+                _, userdata = self._create_user_data(cloud_config)
+                # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
+                iso_path = self.create_config_drive_iso(userdata)
+                self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
+
+                self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
+                self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
+                # Attach the config-drive ISO to the VM
+                self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
+                # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
+                time.sleep(5)
+                self.insert_media_to_vm(vapp, config_drive_catalog_id)
+                shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
+
+            # If VM has PCI devices or SRIOV reserve memory for VM
+            if reserve_memory:
+                self.reserve_memory_for_all_vms(vapp, memory_mb)
+
+            self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
+
+            poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
+            result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+            if result.get('status') == 'success':
+                self.logger.info("new_vminstance(): Successfully power on "\
+                                             "vApp {}".format(vmname_andid))
+            else:
+                self.logger.error("new_vminstance(): failed to power on vApp "\
+                                                     "{}".format(vmname_andid))
+
+        except Exception as exp:
+            try:
+                self.delete_vminstance(vapp_uuid)
+            except Exception as exp2:
+                self.logger.error("new_vminstance rollback fail {}".format(exp2))
+            # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
+            self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
+                              .format(name, exp))
+            raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
+                                           .format(name, exp))
+
+        # check if vApp deployed and if that the case return vApp UUID otherwise -1
+        wait_time = 0
+        vapp_uuid = None
+        while wait_time <= MAX_WAIT_TIME:
+            try:
+                vapp_resource = vdc_obj.get_vapp(vmname_andid)
+                vapp = VApp(self.client, resource=vapp_resource)
+            except Exception as exp:
+                raise vimconn.vimconnUnexpectedResponse(
+                        "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                        .format(vmname_andid, exp))
+
+            #if vapp and vapp.me.deployed:
+            if vapp and vapp_resource.get('deployed') == 'true':
+                vapp_uuid = vapp_resource.get('id').split(':')[-1]
+                break
+            else:
+                self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
+                time.sleep(INTERVAL_TIME)
+
+            wait_time +=INTERVAL_TIME
+
+        #SET Affinity Rule for VM
+        #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
+        #While creating VIM account user has to pass the Host Group names in availability_zone list
+        #"availability_zone" is a  part of VIM "config" parameters
+        #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
+        #Host groups are referred as availability zones
+        #With following procedure, deployed VM will be added into a VM group.
+        #Then A VM to Host Affinity rule will be created using the VM group & Host group.
+        if(availability_zone_list):
+            self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
+            #Admin access required for creating Affinity rules
+            client = self.connect_as_admin()
+            if not client:
+                raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
+            else:
+                self.client = client
+            if self.client:
+                headers = {'Accept':'application/*+xml;version=27.0',
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            #Step1: Get provider vdc details from organization
+            pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
+            if pvdc_href is not None:
+            #Step2: Found required pvdc, now get resource pool information
+                respool_href = self.get_resource_pool_details(pvdc_href, headers)
+                if respool_href is None:
+                    #Raise error if respool_href not found
+                    msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
+                           .format(pvdc_href)
+                    self.log_message(msg)
+
+            #Step3: Verify requested availability zone(hostGroup) is present in vCD
+            # get availability Zone
+            vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
+            # check if provided av zone(hostGroup) is present in vCD VIM
+            status = self.check_availibility_zone(vm_az, respool_href, headers)
+            if status is False:
+                msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
+                       "resource pool {} status: {}".format(vm_az,respool_href,status)
+                self.log_message(msg)
+            else:
+                self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
+
+            #Step4: Find VM group references to create vm group
+            vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
+            if vmgrp_href == None:
+                msg = "new_vminstance(): No reference to VmGroup found in resource pool"
+                self.log_message(msg)
+
+            #Step5: Create a VmGroup with name az_VmGroup
+            vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
+            status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
+            if status is not True:
+                msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
+                self.log_message(msg)
+
+            #VM Group url to add vms to vm group
+            vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
+
+            #Step6: Add VM to VM Group
+            #Find VM uuid from vapp_uuid
+            vm_details = self.get_vapp_details_rest(vapp_uuid)
+            vm_uuid = vm_details['vmuuid']
+
+            status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
+            if status is not True:
+                msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
+                self.log_message(msg)
+
+            #Step7: Create VM to Host affinity rule
+            addrule_href = self.get_add_rule_reference (respool_href, headers)
+            if addrule_href is None:
+                msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
+                      .format(respool_href)
+                self.log_message(msg)
+
+            status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity",  headers)
+            if status is False:
+                msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
+                      .format(name, vm_az)
+                self.log_message(msg)
+            else:
+                self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
+                                    .format(name, vm_az))
+            #Reset token to a normal user to perform other operations
+            self.get_token()
+
+        if vapp_uuid is not None:
+            return vapp_uuid, None
+        else:
+            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
+
+    def create_config_drive_iso(self, user_data):
+        tmpdir = tempfile.mkdtemp()
+        iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
+        latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
+        os.makedirs(latest_dir)
+        with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
+                open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
+            userdata_file_obj.write(user_data)
+            meta_file_obj.write(json.dumps({"availability_zone": "nova",
+                                            "launch_index": 0,
+                                            "name": "ConfigDrive",
+                                            "uuid": str(uuid.uuid4())}
+                                           )
+                                )
+        genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
+            iso_path=iso_path, source_dir_path=tmpdir)
+        self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
+        try:
+            FNULL = open(os.devnull, 'w')
+            subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
+        except subprocess.CalledProcessError as e:
+            shutil.rmtree(tmpdir, ignore_errors=True)
+            error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+        return iso_path
+
+    def upload_iso_to_catalog(self, catalog_id, iso_file_path):
+        if not os.path.isfile(iso_file_path):
+            error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+        iso_file_stat = os.stat(iso_file_path)
+        xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
+                            <Media
+                                xmlns="http://www.vmware.com/vcloud/v1.5"
+                                name="{iso_name}"
+                                size="{iso_size}"
+                                imageType="iso">
+                                <Description>ISO image for config-drive</Description>
+                            </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+        headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
+        catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
+        response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
+
+        if response.status_code != 201:
+            error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
+            self.logger.error(error_msg)
+            raise Exception(error_msg)
+
+        catalogItem = XmlElementTree.fromstring(response.content)
+        entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
+        entity_href = entity.get('href')
+
+        response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
+        if response.status_code != 200:
+            raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
+
+        match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
+        if match:
+            media_upload_href = match.group(1)
+        else:
+            raise Exception('Could not parse the upload URL for the media file from the last response')
+        upload_iso_task = self.get_task_from_response(response.content)
+        headers['Content-Type'] = 'application/octet-stream'
+        response = self.perform_request(req_type='PUT',
+                                        url=media_upload_href,
+                                        headers=headers,
+                                        data=open(iso_file_path, 'rb'))
+
+        if response.status_code != 200:
+            raise Exception('PUT request to "{}" failed'.format(media_upload_href))
+        result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
+        if result.get('status') != 'success':
+            raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
+
+    def get_vcd_availibility_zones(self,respool_href, headers):
+        """ Method to find presence of av zone is VIM resource pool
+
+            Args:
+                respool_href - resource pool href
+                headers - header information
+
+            Returns:
+               vcd_az - list of azone present in vCD
+        """
+        vcd_az = []
+        url=respool_href
+        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
+        else:
+        #Get the href to hostGroups and find provided hostGroup is present in it
+            resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
+                                hostGroup = schild.attrib.get('href')
+                                hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
+                                if hg_resp.status_code != requests.codes.ok:
+                                    self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
+                                else:
+                                    hg_resp_xml =  XmlElementTree.fromstring(hg_resp.content)
+                                    for hostGroup in hg_resp_xml:
+                                        if 'HostGroup' in hostGroup.tag:
+                                            #append host group name to the list
+                                            vcd_az.append(hostGroup.attrib.get("name"))
+        return vcd_az
+
+
+    def set_availability_zones(self):
+        """
+        Set vim availability zone
+        """
+
+        vim_availability_zones = None
+        availability_zone = None
+        if 'availability_zone' in self.config:
+            vim_availability_zones = self.config.get('availability_zone')
+        if isinstance(vim_availability_zones, str):
+            availability_zone = [vim_availability_zones]
+        elif isinstance(vim_availability_zones, list):
+            availability_zone = vim_availability_zones
+        else:
+            return availability_zone
+
+        return availability_zone
+
+
+    def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
+        """
+        Return the availability zone to be used by the created VM.
+        returns: The VIM availability zone to be used or None
+        """
+        if availability_zone_index is None:
+            if not self.config.get('availability_zone'):
+                return None
+            elif isinstance(self.config.get('availability_zone'), str):
+                return self.config['availability_zone']
+            else:
+                return self.config['availability_zone'][0]
+
+        vim_availability_zones = self.availability_zone
+
+        # check if VIM offer enough availability zones describe in the VNFD
+        if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+            # check if all the names of NFV AV match VIM AV names
+            match_by_index = False
+            for av in availability_zone_list:
+                if av not in vim_availability_zones:
+                    match_by_index = True
+                    break
+            if match_by_index:
+                self.logger.debug("Required Availability zone or Host Group not found in VIM config")
+                self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
+                self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
+                self.logger.debug("VIM Availability zones will be used by index")
+                return vim_availability_zones[availability_zone_index]
+            else:
+                return availability_zone_list[availability_zone_index]
+        else:
+            raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
+
+
+    def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
+        """ Method to create VM to Host Affinity rule in vCD
+
+        Args:
+            addrule_href - href to make a POST request
+            vmgrpname - name of the VM group created
+            hostgrpnmae - name of the host group created earlier
+            polarity - Affinity or Anti-affinity (default: Affinity)
+            headers - headers to make REST call
+
+        Returns:
+            True- if rule is created
+            False- Failed to create rule due to some error
+
+        """
+        task_status = False
+        rule_name = polarity + "_" + vmgrpname
+        payload = """<?xml version="1.0" encoding="UTF-8"?>
+                     <vmext:VMWVmHostAffinityRule
+                       xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
+                       xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
+                       type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
+                       <vcloud:Name>{}</vcloud:Name>
+                       <vcloud:IsEnabled>true</vcloud:IsEnabled>
+                       <vcloud:IsMandatory>true</vcloud:IsMandatory>
+                       <vcloud:Polarity>{}</vcloud:Polarity>
+                       <vmext:HostGroupName>{}</vmext:HostGroupName>
+                       <vmext:VmGroupName>{}</vmext:VmGroupName>
+                     </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
+
+        resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
+
+        if resp.status_code != requests.codes.accepted:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
+            task_status = False
+            return task_status
+        else:
+            affinity_task = self.get_task_from_response(resp.content)
+            self.logger.debug ("affinity_task: {}".format(affinity_task))
+            if affinity_task is None or affinity_task is False:
+                raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
+            # wait for task to complete
+            result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
+            if result.get('status') == 'success':
+                self.logger.debug("Successfully created affinity rule {}".format(rule_name))
+                return True
+            else:
+                raise vimconn.vimconnUnexpectedResponse(
+                      "failed to create affinity rule {}".format(rule_name))
+
+
+    def get_add_rule_reference (self, respool_href, headers):
+        """ This method finds href to add vm to host affinity rule to vCD
+
+        Args:
+            respool_href- href to resource pool
+            headers- header information to make REST call
+
+        Returns:
+            None - if no valid href to add rule found or
+            addrule_href - href to add vm to host affinity rule of resource pool
+        """
+        addrule_href = None
+        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
+        else:
+
+            resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
+                                schild.attrib.get('rel') == "add":
+                                addrule_href = schild.attrib.get('href')
+                                break
+
+        return addrule_href
+
+
+    def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
+        """ Method to add deployed VM to newly created VM Group.
+            This is required to create VM to Host affinity in vCD
+
+        Args:
+            vm_uuid- newly created vm uuid
+            vmGroupNameURL- URL to VM Group name
+            vmGroup_name- Name of VM group created
+            headers- Headers for REST request
+
+        Returns:
+            True- if VM added to VM group successfully
+            False- if any error encounter
+        """
+
+        addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
+
+        if addvm_resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
+                               .format(vmGroupNameURL, addvm_resp.status_code))
+            return False
+        else:
+            resp_xml = XmlElementTree.fromstring(addvm_resp.content)
+            for child in resp_xml:
+                if child.tag.split('}')[1] == 'Link':
+                    if child.attrib.get("rel") == "addVms":
+                        addvmtogrpURL =  child.attrib.get("href")
+
+        #Get vm details
+        url_list = [self.url, '/api/vApp/vm-',vm_uuid]
+        vmdetailsURL = ''.join(url_list)
+
+        resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
+            return False
+
+        #Parse VM details
+        resp_xml = XmlElementTree.fromstring(resp.content)
+        if resp_xml.tag.split('}')[1] == "Vm":
+            vm_id = resp_xml.attrib.get("id")
+            vm_name = resp_xml.attrib.get("name")
+            vm_href = resp_xml.attrib.get("href")
+            #print vm_id, vm_name, vm_href
+        #Add VM into VMgroup
+        payload = """<?xml version="1.0" encoding="UTF-8"?>\
+                   <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
+                    xmlns="http://www.vmware.com/vcloud/versions" \
+                    xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
+                    xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
+                    xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
+                    xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
+                    xmlns:ns7="http://www.vmware.com/schema/ovf" \
+                    xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
+                    xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
+                    <ns2:VmReference href="{}" id="{}" name="{}" \
+                    type="application/vnd.vmware.vcloud.vm+xml" />\
+                   </ns2:Vms>""".format(vm_href, vm_id, vm_name)
+
+        addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
+
+        if addvmtogrp_resp.status_code != requests.codes.accepted:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
+            return False
+        else:
+            self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
+            return True
+
+
+    def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
+        """Method to create a VM group in vCD
+
+           Args:
+              vmgroup_name : Name of VM group to be created
+              vmgroup_href : href for vmgroup
+              headers- Headers for REST request
+        """
+        #POST to add URL with required data
+        vmgroup_status = False
+        payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
+                       xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
+                   <vmCount>1</vmCount>\
+                   </VMWVmGroup>""".format(vmgroup_name)
+        resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
+
+        if resp.status_code != requests.codes.accepted:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
+            return vmgroup_status
+        else:
+            vmgroup_task = self.get_task_from_response(resp.content)
+            if vmgroup_task is None or vmgroup_task is False:
+                raise vimconn.vimconnUnexpectedResponse(
+                    "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
+
+            # wait for task to complete
+            result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
+
+            if result.get('status') == 'success':
+                self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
+                #time.sleep(10)
+                vmgroup_status = True
+                return vmgroup_status
+            else:
+                raise vimconn.vimconnUnexpectedResponse(\
+                        "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
+
+
+    def find_vmgroup_reference(self, url, headers):
+        """ Method to create a new VMGroup which is required to add created VM
+            Args:
+               url- resource pool href
+               headers- header information
+
+            Returns:
+               returns href to VM group to create VM group
+        """
+        #Perform GET on resource pool to find 'add' link to create VMGroup
+        #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
+        vmgrp_href = None
+        resp = self.perform_request(req_type='GET',url=url, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
+        else:
+            #Get the href to add vmGroup to vCD
+            resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            #Find href with type VMGroup and rel with add
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
+                                and schild.attrib.get('rel') == "add":
+                                vmgrp_href = schild.attrib.get('href')
+                                return vmgrp_href
+
+
+    def check_availibility_zone(self, az, respool_href, headers):
+        """ Method to verify requested av zone is present or not in provided
+            resource pool
+
+            Args:
+                az - name of hostgroup (availibility_zone)
+                respool_href - Resource Pool href
+                headers - Headers to make REST call
+            Returns:
+                az_found - True if availibility_zone is found else False
+        """
+        az_found = False
+        headers['Accept']='application/*+xml;version=27.0'
+        resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
+        else:
+        #Get the href to hostGroups and find provided hostGroup is present in it
+            resp_xml = XmlElementTree.fromstring(resp.content)
+
+            for child in resp_xml:
+                if 'VMWProviderVdcResourcePool' in child.tag:
+                    for schild in child:
+                        if 'Link' in schild.tag:
+                            if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
+                                hostGroup_href = schild.attrib.get('href')
+                                hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
+                                if hg_resp.status_code != requests.codes.ok:
+                                    self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
+                                else:
+                                    hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
+                                    for hostGroup in hg_resp_xml:
+                                        if 'HostGroup' in hostGroup.tag:
+                                            if hostGroup.attrib.get("name") == az:
+                                                az_found = True
+                                                break
+        return az_found
+
+
+    def get_pvdc_for_org(self, org_vdc, headers):
+        """ This method gets provider vdc references from organisation
+
+            Args:
+               org_vdc - name of the organisation VDC to find pvdc
+               headers - headers to make REST call
+
+            Returns:
+               None - if no pvdc href found else
+               pvdc_href - href to pvdc
+        """
+
+        #Get provider VDC references from vCD
+        pvdc_href = None
+        #url = '<vcd url>/api/admin/extension/providerVdcReferences'
+        url_list = [self.url, '/api/admin/extension/providerVdcReferences']
+        url = ''.join(url_list)
+
+        response = self.perform_request(req_type='GET',url=url, headers=headers)
+        if response.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}"\
+                               .format(url, response.status_code))
+        else:
+            xmlroot_response = XmlElementTree.fromstring(response.content)
+            for child in xmlroot_response:
+                if 'ProviderVdcReference' in child.tag:
+                    pvdc_href = child.attrib.get('href')
+                    #Get vdcReferences to find org
+                    pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
+                    if pvdc_resp.status_code != requests.codes.ok:
+                        raise vimconn.vimconnException("REST API call {} failed. "\
+                                                       "Return status code {}"\
+                                                       .format(url, pvdc_resp.status_code))
+
+                    pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
+                    for child in pvdc_resp_xml:
+                        if 'Link' in child.tag:
+                            if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
+                                vdc_href = child.attrib.get('href')
+
+                                #Check if provided org is present in vdc
+                                vdc_resp = self.perform_request(req_type='GET',
+                                                                url=vdc_href,
+                                                                headers=headers)
+                                if vdc_resp.status_code != requests.codes.ok:
+                                    raise vimconn.vimconnException("REST API call {} failed. "\
+                                                                   "Return status code {}"\
+                                                                   .format(url, vdc_resp.status_code))
+                                vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
+                                for child in vdc_resp_xml:
+                                    if 'VdcReference' in child.tag:
+                                        if child.attrib.get('name') == org_vdc:
+                                            return pvdc_href
+
+
+    def get_resource_pool_details(self, pvdc_href, headers):
+        """ Method to get resource pool information.
+            Host groups are property of resource group.
+            To get host groups, we need to GET details of resource pool.
+
+            Args:
+                pvdc_href: href to pvdc details
+                headers: headers
+
+            Returns:
+                respool_href - Returns href link reference to resource pool
+        """
+        respool_href = None
+        resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
+
+        if resp.status_code != requests.codes.ok:
+            self.logger.debug ("REST API call {} failed. Return status code {}"\
+                               .format(pvdc_href, resp.status_code))
+        else:
+            respool_resp_xml = XmlElementTree.fromstring(resp.content)
+            for child in respool_resp_xml:
+                if 'Link' in child.tag:
+                    if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
+                        respool_href = child.attrib.get("href")
+                        break
+        return respool_href
+
+
+    def log_message(self, msg):
+        """
+            Method to log error messages related to Affinity rule creation
+            in new_vminstance & raise Exception
+                Args :
+                    msg - Error message to be logged
+
+        """
+        #get token to connect vCD as a normal user
+        self.get_token()
+        self.logger.debug(msg)
+        raise vimconn.vimconnException(msg)
+
+
+    ##
+    ##
+    ##  based on current discussion
+    ##
+    ##
+    ##  server:
+    #   created: '2016-09-08T11:51:58'
+    #   description: simple-instance.linux1.1
+    #   flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
+    #   hostId: e836c036-74e7-11e6-b249-0800273e724c
+    #   image: dde30fe6-75a9-11e6-ad5f-0800273e724c
+    #   status: ACTIVE
+    #   error_msg:
+    #   interfaces: …
+    #
+    def get_vminstance(self, vim_vm_uuid=None):
+        """Returns the VM instance information from VIM"""
+
+        self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
+
+        org, vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnConnectionException(
+                "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+        vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
+        if not vm_info_dict:
+            self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+            raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+
+        status_key = vm_info_dict['status']
+        error = ''
+        try:
+            vm_dict = {'created': vm_info_dict['created'],
+                       'description': vm_info_dict['name'],
+                       'status': vcdStatusCode2manoFormat[int(status_key)],
+                       'hostId': vm_info_dict['vmuuid'],
+                       'error_msg': error,
+                       'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
+
+            if 'interfaces' in vm_info_dict:
+                vm_dict['interfaces'] = vm_info_dict['interfaces']
+            else:
+                vm_dict['interfaces'] = []
+        except KeyError:
+            vm_dict = {'created': '',
+                       'description': '',
+                       'status': vcdStatusCode2manoFormat[int(-1)],
+                       'hostId': vm_info_dict['vmuuid'],
+                       'error_msg': "Inconsistency state",
+                       'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
+
+        return vm_dict
+
+    def delete_vminstance(self, vm__vim_uuid, created_items=None):
+        """Method poweroff and remove VM instance from vcloud director network.
+
+        Args:
+            vm__vim_uuid: VM UUID
+
+        Returns:
+            Returns the instance identifier
+        """
+
+        self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
+
+        org, vdc = self.get_vdc_details()
+        vdc_obj = VDC(self.client, href=vdc.get('href'))
+        if vdc_obj is None:
+            self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
+                self.tenant_name))
+            raise vimconn.vimconnException(
+                "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+        try:
+            vapp_name = self.get_namebyvappid(vm__vim_uuid)
+            if vapp_name is None:
+                self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+            self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+            vapp_resource = vdc_obj.get_vapp(vapp_name)
+            vapp = VApp(self.client, resource=vapp_resource)
+
+            # Delete vApp and wait for status change if task executed and vApp is None.
+
+            if vapp:
+                if vapp_resource.get('deployed') == 'true':
+                    self.logger.info("Powering off vApp {}".format(vapp_name))
+                    #Power off vApp
+                    powered_off = False
+                    wait_time = 0
+                    while wait_time <= MAX_WAIT_TIME:
+                        power_off_task = vapp.power_off()
+                        result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
+
+                        if result.get('status') == 'success':
+                            powered_off = True
+                            break
+                        else:
+                            self.logger.info("Wait for vApp {} to power off".format(vapp_name))
+                            time.sleep(INTERVAL_TIME)
+
+                        wait_time +=INTERVAL_TIME
+                    if not powered_off:
+                        self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
+                    else:
+                        self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
+
+                    #Undeploy vApp
+                    self.logger.info("Undeploy vApp {}".format(vapp_name))
+                    wait_time = 0
+                    undeployed = False
+                    while wait_time <= MAX_WAIT_TIME:
+                        vapp = VApp(self.client, resource=vapp_resource)
+                        if not vapp:
+                            self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                            return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+                        undeploy_task = vapp.undeploy()
+
+                        result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
+                        if result.get('status') == 'success':
+                            undeployed = True
+                            break
+                        else:
+                            self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
+                            time.sleep(INTERVAL_TIME)
+
+                        wait_time +=INTERVAL_TIME
+
+                    if not undeployed:
+                        self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
+
+                # delete vapp
+                self.logger.info("Start deletion of vApp {} ".format(vapp_name))
+
+                if vapp is not None:
+                    wait_time = 0
+                    result = False
+
+                    while wait_time <= MAX_WAIT_TIME:
+                        vapp = VApp(self.client, resource=vapp_resource)
+                        if not vapp:
+                            self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                            return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+
+                        delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
+
+                        result = self.client.get_task_monitor().wait_for_success(task=delete_task)
+                        if result.get('status') == 'success':
+                            break
+                        else:
+                            self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
+                            time.sleep(INTERVAL_TIME)
+
+                        wait_time +=INTERVAL_TIME
+
+                    if result is None:
+                        self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
+                    else:
+                        self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
+                        config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
+                        catalog_list = self.get_image_list()
+                        try:
+                            config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
+                                                       if catalog_['name'] == config_drive_catalog_name][0]
+                        except IndexError:
+                            pass
+                        if config_drive_catalog_id:
+                            self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
+                                              'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
+                            self.delete_image(config_drive_catalog_id)
+                        return vm__vim_uuid
+        except:
+            self.logger.debug(traceback.format_exc())
+            raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
+
+
+    def refresh_vms_status(self, vm_list):
+        """Get the status of the virtual machines and their interfaces/ports
+           Params: the list of VM identifiers
+           Returns a dictionary with:
+                vm_id:          #VIM id of this Virtual Machine
+                    status:     #Mandatory. Text with one of:
+                                #  DELETED (not found at vim)
+                                #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                #  OTHER (Vim reported other status not understood)
+                                #  ERROR (VIM indicates an ERROR status)
+                                #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                                #  CREATING (on building process), ERROR
+                                #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                #
+                    error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                    vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                    interfaces:
+                     -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                        mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                        vim_net_id:       #network id where this interface is connected
+                        vim_interface_id: #interface/port VIM id
+                        ip_address:       #null, or text with IPv4, IPv6 address
+        """
+
+        self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
+
+        org,vdc = self.get_vdc_details()
+        if vdc is None:
+            raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+        vms_dict = {}
+        nsx_edge_list = []
+        for vmuuid in vm_list:
+            vapp_name = self.get_namebyvappid(vmuuid)
+            if vapp_name is not None:
+
+                try:
+                    vm_pci_details = self.get_vm_pci_details(vmuuid)
+                    vdc_obj = VDC(self.client, href=vdc.get('href'))
+                    vapp_resource = vdc_obj.get_vapp(vapp_name)
+                    the_vapp = VApp(self.client, resource=vapp_resource)
+
+                    vm_details = {}
+                    for vm in the_vapp.get_all_vms():
+                        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                        response = self.perform_request(req_type='GET',
+                                                        url=vm.get('href'),
+                                                        headers=headers)
+
+                        if response.status_code != 200:
+                            self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
+                                                            "status code : {}".format(vm.get('href'),
+                                                                                    response.content,
+                                                                               response.status_code))
+                            raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
+                                                                         "VM details")
+                        xmlroot = XmlElementTree.fromstring(response.content)
+
+                        
+                        result = response.content.replace("\n"," ")
+                        hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
+                        if hdd_match:
+                            hdd_mb = hdd_match.group(1)
+                            vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
+                        cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
+                        if cpus_match:
+                            cpus = cpus_match.group(1)
+                            vm_details['cpus'] = int(cpus) if cpus else None
+                        memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                        vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
+                        vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
+                        vm_details['id'] = xmlroot.get('id')
+                        vm_details['name'] = xmlroot.get('name')
+                        vm_info = [vm_details]
+                        if vm_pci_details:
+                            vm_info[0].update(vm_pci_details)
+
+                        vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
+                                   'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
+                                   'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
+
+                        # get networks
+                        vm_ip = None
+                        vm_mac = None
+                        networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
+                        for network in networks:
+                            mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
+                            vm_mac = mac_s.group(1) if mac_s else None
+                            ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
+                            vm_ip = ip_s.group(1) if ip_s else None
+
+                            if vm_ip is None:
+                                if not nsx_edge_list:
+                                    nsx_edge_list = self.get_edge_details()
+                                    if nsx_edge_list is None:
+                                        raise vimconn.vimconnException("refresh_vms_status:"\
+                                                                       "Failed to get edge details from NSX Manager")
+                                if vm_mac is not None:
+                                    vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
+
+                            net_s = re.search('network="(.*?)"',network)
+                            network_name = net_s.group(1) if net_s else None
+
+                            vm_net_id = self.get_network_id_by_name(network_name)
+                            interface = {"mac_address": vm_mac,
+                                         "vim_net_id": vm_net_id,
+                                         "vim_interface_id": vm_net_id,
+                                         "ip_address": vm_ip}
+
+                            vm_dict["interfaces"].append(interface)
+
+                    # add a vm to vm dict
+                    vms_dict.setdefault(vmuuid, vm_dict)
+                    self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
+                except Exception as exp:
+                    self.logger.debug("Error in response {}".format(exp))
+                    self.logger.debug(traceback.format_exc())
+
+        return vms_dict
+
+
+    def get_edge_details(self):
+        """Get the NSX edge list from NSX Manager
+           Returns list of NSX edges
+        """
+        edge_list = []
+        rheaders = {'Content-Type': 'application/xml'}
+        nsx_api_url = '/api/4.0/edges'
+
+        self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
+
+        try:
+            resp = requests.get(self.nsx_manager + nsx_api_url,
+                                auth = (self.nsx_user, self.nsx_password),
+                                verify = False, headers = rheaders)
+            if resp.status_code == requests.codes.ok:
+                paged_Edge_List = XmlElementTree.fromstring(resp.text)
+                for edge_pages in paged_Edge_List:
+                    if edge_pages.tag == 'edgePage':
+                        for edge_summary in edge_pages:
+                            if edge_summary.tag == 'pagingInfo':
+                                for element in edge_summary:
+                                    if element.tag == 'totalCount' and element.text == '0':
+                                        raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
+                                                                       .format(self.nsx_manager))
+
+                            if edge_summary.tag == 'edgeSummary':
+                                for element in edge_summary:
+                                    if element.tag == 'id':
+                                        edge_list.append(element.text)
+                    else:
+                        raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
+                                                       .format(self.nsx_manager))
+
+                if not edge_list:
+                    raise vimconn.vimconnException("get_edge_details: "\
+                                                   "No NSX edge details found: {}"
+                                                   .format(self.nsx_manager))
+                else:
+                    self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
+                    return edge_list
+            else:
+                self.logger.debug("get_edge_details: "
+                                  "Failed to get NSX edge details from NSX Manager: {}"
+                                  .format(resp.content))
+                return None
+
+        except Exception as exp:
+            self.logger.debug("get_edge_details: "\
+                              "Failed to get NSX edge details from NSX Manager: {}"
+                              .format(exp))
+            raise vimconn.vimconnException("get_edge_details: "\
+                                           "Failed to get NSX edge details from NSX Manager: {}"
+                                           .format(exp))
+
+
+    def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
+        """Get IP address details from NSX edges, using the MAC address
+           PARAMS: nsx_edges : List of NSX edges
+                   mac_address : Find IP address corresponding to this MAC address
+           Returns: IP address corrresponding to the provided MAC address
+        """
+
+        ip_addr = None
+        rheaders = {'Content-Type': 'application/xml'}
+
+        self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
+
+        try:
+            for edge in nsx_edges:
+                nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
+
+                resp = requests.get(self.nsx_manager + nsx_api_url,
+                                    auth = (self.nsx_user, self.nsx_password),
+                                    verify = False, headers = rheaders)
+
+                if resp.status_code == requests.codes.ok:
+                    dhcp_leases = XmlElementTree.fromstring(resp.text)
+                    for child in dhcp_leases:
+                        if child.tag == 'dhcpLeaseInfo':
+                            dhcpLeaseInfo = child
+                            for leaseInfo in dhcpLeaseInfo:
+                                for elem in leaseInfo:
+                                    if (elem.tag)=='macAddress':
+                                        edge_mac_addr = elem.text
+                                    if (elem.tag)=='ipAddress':
+                                        ip_addr = elem.text
+                                if edge_mac_addr is not None:
+                                    if edge_mac_addr == mac_address:
+                                        self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
+                                                          .format(ip_addr, mac_address,edge))
+                                        return ip_addr
+                else:
+                    self.logger.debug("get_ipaddr_from_NSXedge: "\
+                                      "Error occurred while getting DHCP lease info from NSX Manager: {}"
+                                      .format(resp.content))
+
+            self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
+            return None
+
+        except XmlElementTree.ParseError as Err:
+            self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
+
+
+    def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
+        """Send and action over a VM instance from VIM
+        Returns the vm_id if the action was successfully sent to the VIM"""
+
+        self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
+        if vm__vim_uuid is None or action_dict is None:
+            raise vimconn.vimconnException("Invalid request. VM id or action is None.")
+
+        org, vdc = self.get_vdc_details()
+        if vdc is None:
+            raise  vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+
+        vapp_name = self.get_namebyvappid(vm__vim_uuid)
+        if vapp_name is None:
+            self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+            raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+        else:
+            self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+
+        try:
+            vdc_obj = VDC(self.client, href=vdc.get('href'))
+            vapp_resource = vdc_obj.get_vapp(vapp_name)
+            vapp = VApp(self.client, resource=vapp_resource)
+            if "start" in action_dict:
+                self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
+                poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
+                result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+                self.instance_actions_result("start", result, vapp_name)
+            elif "rebuild" in action_dict:
+                self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
+                rebuild_task = vapp.deploy(power_on=True)
+                result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
+                self.instance_actions_result("rebuild", result, vapp_name)
+            elif "pause" in action_dict:
+                self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
+                pause_task = vapp.undeploy(action='suspend')
+                result = self.client.get_task_monitor().wait_for_success(task=pause_task)
+                self.instance_actions_result("pause", result, vapp_name)
+            elif "resume" in action_dict:
+                self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
+                poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
+                result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+                self.instance_actions_result("resume", result, vapp_name)
+            elif "shutoff" in action_dict or "shutdown" in action_dict:
+                action_name , value = action_dict.items()[0]
+                #For python3
+                #action_name , value = list(action_dict.items())[0]
+                self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
+                shutdown_task = vapp.shutdown()
+                result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
+                if action_name == "shutdown":
+                    self.instance_actions_result("shutdown", result, vapp_name)
+                else:
+                    self.instance_actions_result("shutoff", result, vapp_name)
+            elif "forceOff" in action_dict:
+                result = vapp.undeploy(action='powerOff')
+                self.instance_actions_result("forceOff", result, vapp_name)
+            elif "reboot" in action_dict:
+                self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
+                reboot_task = vapp.reboot()
+                self.client.get_task_monitor().wait_for_success(task=reboot_task)
+            else:
+                raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
+            return vm__vim_uuid
+        except Exception as exp :
+            self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
+            raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
+
+    def instance_actions_result(self, action, result, vapp_name):
+        if result.get('status') == 'success':
+            self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
+        else:
+            self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
+
+    def get_vminstance_console(self, vm_id, console_type="novnc"):
+        """
+        Get a console for the virtual machine
+        Params:
+            vm_id: uuid of the VM
+            console_type, can be:
+                "novnc" (by default), "xvpvnc" for VNC types,
+                "rdp-html5" for RDP types, "spice-html5" for SPICE types
+        Returns dict with the console parameters:
+                protocol: ssh, ftp, http, https, ...
+                server:   usually ip address
+                port:     the http, ssh, ... port
+                suffix:   extra text, e.g. the http path and query string
+        """
+        console_dict = {}
+
+        if console_type==None or console_type=='novnc':
+
+            url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='POST',
+                                         url=url_rest_call,
+                                           headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                         response.content,
+                                                    response.status_code))
+                raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
+                                                                     "VM Mks ticket details")
+            s = re.search("<Host>(.*?)</Host>",response.content)
+            console_dict['server'] = s.group(1) if s else None
+            s1 = re.search("<Port>(\d+)</Port>",response.content)
+            console_dict['port'] = s1.group(1) if s1 else None
+
+
+            url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='POST',
+                                         url=url_rest_call,
+                                           headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                         response.content,
+                                                    response.status_code))
+                raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
+                                                                     "VM console details")
+            s = re.search(">.*?/(vm-\d+.*)</",response.content)
+            console_dict['suffix'] = s.group(1) if s else None
+            console_dict['protocol'] = "https"
+
+        return console_dict
+
+    # NOT USED METHODS in current version
+
+    def host_vim2gui(self, host, server_dict):
+        """Transform host dictionary from VIM format to GUI format,
+        and append to the server_dict
+        """
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def get_hosts_info(self):
+        """Get the information of deployed hosts
+        Returns the hosts content"""
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def get_hosts(self, vim_tenant):
+        """Get the hosts and deployed instances
+        Returns the hosts content"""
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def get_processor_rankings(self):
+        """Get the processor rankings in the VIM database"""
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def new_host(self, host_data):
+        """Adds a new host to VIM"""
+        '''Returns status code of the VIM response'''
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def new_external_port(self, port_data):
+        """Adds a external port to VIM"""
+        '''Returns the port identifier'''
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def new_external_network(self, net_name, net_type):
+        """Adds a external network to VIM (shared)"""
+        '''Returns the network identifier'''
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def connect_port_network(self, port_id, network_id, admin=False):
+        """Connects a external port to a network"""
+        '''Returns status code of the VIM response'''
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def new_vminstancefromJSON(self, vm_data):
+        """Adds a VM instance to VIM"""
+        '''Returns the instance identifier'''
+        raise vimconn.vimconnNotImplemented("Should have implemented this")
+
+    def get_network_name_by_id(self, network_uuid=None):
+        """Method gets vcloud director network named based on supplied uuid.
+
+        Args:
+            network_uuid: network_id
+
+        Returns:
+            The return network name.
+        """
+
+        if not network_uuid:
+            return None
+
+        try:
+            org_dict = self.get_org(self.org_uuid)
+            if 'networks' in org_dict:
+                org_network_dict = org_dict['networks']
+                for net_uuid in org_network_dict:
+                    if net_uuid == network_uuid:
+                        return org_network_dict[net_uuid]
+        except:
+            self.logger.debug("Exception in get_network_name_by_id")
+            self.logger.debug(traceback.format_exc())
+
+        return None
+
+    def get_network_id_by_name(self, network_name=None):
+        """Method gets vcloud director network uuid based on supplied name.
+
+        Args:
+            network_name: network_name
+        Returns:
+            The return network uuid.
+            network_uuid: network_id
+        """
+
+        if not network_name:
+            self.logger.debug("get_network_id_by_name() : Network name is empty")
+            return None
+
+        try:
+            org_dict = self.get_org(self.org_uuid)
+            if org_dict and 'networks' in org_dict:
+                org_network_dict = org_dict['networks']
+                for net_uuid,net_name in org_network_dict.items():
+                #For python3
+                #for net_uuid,net_name in org_network_dict.items():
+                    if net_name == network_name:
+                        return net_uuid
+
+        except KeyError as exp:
+            self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
+
+        return None
+
+    def list_org_action(self):
+        """
+        Method leverages vCloud director and query for available organization for particular user
+
+        Args:
+            vca - is active VCA connection.
+            vdc_name - is a vdc name that will be used to query vms action
+
+            Returns:
+                The return XML respond
+        """
+        url_list = [self.url, '/api/org']
+        vm_list_rest_call = ''.join(url_list)
+
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            response = self.perform_request(req_type='GET',
+                                     url=vm_list_rest_call,
+                                           headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
+
+            if response.status_code == requests.codes.ok:
+                return response.content
+
+        return None
+
+    def get_org_action(self, org_uuid=None):
+        """
+        Method leverages vCloud director and retrieve available object for organization.
+
+        Args:
+            org_uuid - vCD organization uuid
+            self.client - is active connection.
+
+            Returns:
+                The return XML respond
+        """
+
+        if org_uuid is None:
+            return None
+
+        url_list = [self.url, '/api/org/', org_uuid]
+        vm_list_rest_call = ''.join(url_list)
+
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
+
+            if response.status_code == requests.codes.ok:
+                return response.content
+        return None
+
+    def get_org(self, org_uuid=None):
+        """
+        Method retrieves available organization in vCloud Director
+
+        Args:
+            org_uuid - is a organization uuid.
+
+            Returns:
+                The return dictionary with following key
+                    "network" - for network list under the org
+                    "catalogs" - for network list under the org
+                    "vdcs" - for vdc list under org
+        """
+
+        org_dict = {}
+
+        if org_uuid is None:
+            return org_dict
+
+        content = self.get_org_action(org_uuid=org_uuid)
+        try:
+            vdc_list = {}
+            network_list = {}
+            catalog_list = {}
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            for child in vm_list_xmlroot:
+                if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
+                    vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+                    org_dict['vdcs'] = vdc_list
+                if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
+                    network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+                    org_dict['networks'] = network_list
+                if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
+                    catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+                    org_dict['catalogs'] = catalog_list
+        except:
+            pass
+
+        return org_dict
+
+    def get_org_list(self):
+        """
+        Method retrieves available organization in vCloud Director
+
+        Args:
+            vca - is active VCA connection.
+
+            Returns:
+                The return dictionary and key for each entry VDC UUID
+        """
+
+        org_dict = {}
+
+        content = self.list_org_action()
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            for vm_xml in vm_list_xmlroot:
+                if vm_xml.tag.split("}")[1] == 'Org':
+                    org_uuid = vm_xml.attrib['href'].split('/')[-1:]
+                    org_dict[org_uuid[0]] = vm_xml.attrib['name']
+        except:
+            pass
+
+        return org_dict
+
+    def vms_view_action(self, vdc_name=None):
+        """ Method leverages vCloud director vms query call
+
+        Args:
+            vca - is active VCA connection.
+            vdc_name - is a vdc name that will be used to query vms action
+
+            Returns:
+                The return XML respond
+        """
+        vca = self.connect()
+        if vdc_name is None:
+            return None
+
+        url_list = [vca.host, '/api/vms/query']
+        vm_list_rest_call = ''.join(url_list)
+
+        if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+            refs = filter(lambda ref: ref.name == vdc_name and ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml',
+                          vca.vcloud_session.organization.Link)
+            #For python3
+            #refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and\
+            #        ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
+            if len(refs) == 1:
+                response = Http.get(url=vm_list_rest_call,
+                                    headers=vca.vcloud_session.get_vcloud_headers(),
+                                    verify=vca.verify,
+                                    logger=vca.logger)
+                if response.status_code == requests.codes.ok:
+                    return response.content
+
+        return None
+
+    def get_vapp_list(self, vdc_name=None):
+        """
+        Method retrieves vApp list deployed vCloud director and returns a dictionary
+        contains a list of all vapp deployed for queried VDC.
+        The key for a dictionary is vApp UUID
+
+
+        Args:
+            vca - is active VCA connection.
+            vdc_name - is a vdc name that will be used to query vms action
+
+            Returns:
+                The return dictionary and key for each entry vapp UUID
+        """
+
+        vapp_dict = {}
+        if vdc_name is None:
+            return vapp_dict
+
+        content = self.vms_view_action(vdc_name=vdc_name)
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            for vm_xml in vm_list_xmlroot:
+                if vm_xml.tag.split("}")[1] == 'VMRecord':
+                    if vm_xml.attrib['isVAppTemplate'] == 'true':
+                        rawuuid = vm_xml.attrib['container'].split('/')[-1:]
+                        if 'vappTemplate-' in rawuuid[0]:
+                            # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+                            # vm and use raw UUID as key
+                            vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
+        except:
+            pass
+
+        return vapp_dict
+
+    def get_vm_list(self, vdc_name=None):
+        """
+        Method retrieves VM's list deployed vCloud director. It returns a dictionary
+        contains a list of all VM's deployed for queried VDC.
+        The key for a dictionary is VM UUID
+
+
+        Args:
+            vca - is active VCA connection.
+            vdc_name - is a vdc name that will be used to query vms action
+
+            Returns:
+                The return dictionary and key for each entry vapp UUID
+        """
+        vm_dict = {}
+
+        if vdc_name is None:
+            return vm_dict
+
+        content = self.vms_view_action(vdc_name=vdc_name)
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            for vm_xml in vm_list_xmlroot:
+                if vm_xml.tag.split("}")[1] == 'VMRecord':
+                    if vm_xml.attrib['isVAppTemplate'] == 'false':
+                        rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+                        if 'vm-' in rawuuid[0]:
+                            # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+                            #  vm and use raw UUID as key
+                            vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+        except:
+            pass
+
+        return vm_dict
+
+    def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
+        """
+        Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
+        contains a list of all VM's deployed for queried VDC.
+        The key for a dictionary is VM UUID
+
+
+        Args:
+            vca - is active VCA connection.
+            vdc_name - is a vdc name that will be used to query vms action
+
+            Returns:
+                The return dictionary and key for each entry vapp UUID
+        """
+        vm_dict = {}
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+        if vdc_name is None:
+            return vm_dict
+
+        content = self.vms_view_action(vdc_name=vdc_name)
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            for vm_xml in vm_list_xmlroot:
+                if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
+                    # lookup done by UUID
+                    if isuuid:
+                        if vapp_name in vm_xml.attrib['container']:
+                            rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+                            if 'vm-' in rawuuid[0]:
+                                vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+                                break
+                    # lookup done by Name
+                    else:
+                        if vapp_name in vm_xml.attrib['name']:
+                            rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+                            if 'vm-' in rawuuid[0]:
+                                vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+                                break
+        except:
+            pass
+
+        return vm_dict
+
+    def get_network_action(self, network_uuid=None):
+        """
+        Method leverages vCloud director and query network based on network uuid
+
+        Args:
+            vca - is active VCA connection.
+            network_uuid - is a network uuid
+
+            Returns:
+                The return XML respond
+        """
+
+        if network_uuid is None:
+            return None
+
+        url_list = [self.url, '/api/network/', network_uuid]
+        vm_list_rest_call = ''.join(url_list)
+
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+            #Retry login if session expired & retry sending request
+            if response.status_code == 403:
+                response = self.retry_rest('GET', vm_list_rest_call)
+
+            if response.status_code == requests.codes.ok:
+                return response.content
+
+        return None
+
+    def get_vcd_network(self, network_uuid=None):
+        """
+        Method retrieves available network from vCloud Director
+
+        Args:
+            network_uuid - is VCD network UUID
+
+        Each element serialized as key : value pair
+
+        Following keys available for access.    network_configuration['Gateway'}
+        <Configuration>
+          <IpScopes>
+            <IpScope>
+                <IsInherited>true</IsInherited>
+                <Gateway>172.16.252.100</Gateway>
+                <Netmask>255.255.255.0</Netmask>
+                <Dns1>172.16.254.201</Dns1>
+                <Dns2>172.16.254.202</Dns2>
+                <DnsSuffix>vmwarelab.edu</DnsSuffix>
+                <IsEnabled>true</IsEnabled>
+                <IpRanges>
+                    <IpRange>
+                        <StartAddress>172.16.252.1</StartAddress>
+                        <EndAddress>172.16.252.99</EndAddress>
+                    </IpRange>
+                </IpRanges>
+            </IpScope>
+        </IpScopes>
+        <FenceMode>bridged</FenceMode>
+
+        Returns:
+                The return dictionary and key for each entry vapp UUID
+        """
+
+        network_configuration = {}
+        if network_uuid is None:
+            return network_uuid
+
+        try:
+            content = self.get_network_action(network_uuid=network_uuid)
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+
+            network_configuration['status'] = vm_list_xmlroot.get("status")
+            network_configuration['name'] = vm_list_xmlroot.get("name")
+            network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
+
+            for child in vm_list_xmlroot:
+                if child.tag.split("}")[1] == 'IsShared':
+                    network_configuration['isShared'] = child.text.strip()
+                if child.tag.split("}")[1] == 'Configuration':
+                    for configuration in child.iter():
+                        tagKey = configuration.tag.split("}")[1].strip()
+                        if tagKey != "":
+                            network_configuration[tagKey] = configuration.text.strip()
+            return network_configuration
+        except Exception as exp :
+            self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
+            raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
+
+        return network_configuration
+
+    def delete_network_action(self, network_uuid=None):
+        """
+        Method delete given network from vCloud director
+
+        Args:
+            network_uuid - is a network uuid that client wish to delete
+
+            Returns:
+                The return None or XML respond or false
+        """
+        client = self.connect_as_admin()
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
+        if network_uuid is None:
+            return False
+
+        url_list = [self.url, '/api/admin/network/', network_uuid]
+        vm_list_rest_call = ''.join(url_list)
+
+        if client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='DELETE',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+            if response.status_code == 202:
+                return True
+
+        return False
+
+    def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+                       ip_profile=None, isshared='true'):
+        """
+        Method create network in vCloud director
+
+        Args:
+            network_name - is network name to be created.
+            net_type - can be 'bridge','data','ptp','mgmt'.
+            ip_profile is a dict containing the IP parameters of the network
+            isshared - is a boolean
+            parent_network_uuid - is parent provider vdc network that will be used for mapping.
+            It optional attribute. by default if no parent network indicate the first available will be used.
+
+            Returns:
+                The return network uuid or return None
+        """
+
+        new_network_name = [network_name, '-', str(uuid.uuid4())]
+        content = self.create_network_rest(network_name=''.join(new_network_name),
+                                           ip_profile=ip_profile,
+                                           net_type=net_type,
+                                           parent_network_uuid=parent_network_uuid,
+                                           isshared=isshared)
+        if content is None:
+            self.logger.debug("Failed create network {}.".format(network_name))
+            return None
+
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(content)
+            vcd_uuid = vm_list_xmlroot.get('id').split(":")
+            if len(vcd_uuid) == 4:
+                self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
+                return vcd_uuid[3]
+        except:
+            self.logger.debug("Failed create network {}".format(network_name))
+            return None
+
+    def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+                            ip_profile=None, isshared='true'):
+        """
+        Method create network in vCloud director
+
+        Args:
+            network_name - is network name to be created.
+            net_type - can be 'bridge','data','ptp','mgmt'.
+            ip_profile is a dict containing the IP parameters of the network
+            isshared - is a boolean
+            parent_network_uuid - is parent provider vdc network that will be used for mapping.
+            It optional attribute. by default if no parent network indicate the first available will be used.
+
+            Returns:
+                The return network uuid or return None
+        """
+        client_as_admin = self.connect_as_admin()
+        if not client_as_admin:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD.")
+        if network_name is None:
+            return None
+
+        url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
+        vm_list_rest_call = ''.join(url_list)
+
+        if client_as_admin._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
+
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+
+            provider_network = None
+            available_networks = None
+            add_vdc_rest_url = None
+
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+                                                                                          response.status_code))
+                return None
+            else:
+                try:
+                    vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+                    for child in vm_list_xmlroot:
+                        if child.tag.split("}")[1] == 'ProviderVdcReference':
+                            provider_network = child.attrib.get('href')
+                            # application/vnd.vmware.admin.providervdc+xml
+                        if child.tag.split("}")[1] == 'Link':
+                            if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
+                                    and child.attrib.get('rel') == 'add':
+                                add_vdc_rest_url = child.attrib.get('href')
+                except:
+                    self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+                    self.logger.debug("Respond body {}".format(response.content))
+                    return None
+
+            # find  pvdc provided available network
+            response = self.perform_request(req_type='GET',
+                                            url=provider_network,
+                                            headers=headers)
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+                                                                                          response.status_code))
+                return None
+
+            if parent_network_uuid is None:
+                try:
+                    vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+                    for child in vm_list_xmlroot.iter():
+                        if child.tag.split("}")[1] == 'AvailableNetworks':
+                            for networks in child.iter():
+                                # application/vnd.vmware.admin.network+xml
+                                if networks.attrib.get('href') is not None:
+                                    available_networks = networks.attrib.get('href')
+                                    break
+                except:
+                    return None
+
+            try:
+                #Configure IP profile of the network
+                ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
+
+                if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
+                    subnet_rand = random.randint(0, 255)
+                    ip_base = "192.168.{}.".format(subnet_rand)
+                    ip_profile['subnet_address'] = ip_base + "0/24"
+                else:
+                    ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
+
+                if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
+                    ip_profile['gateway_address']=ip_base + "1"
+                if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
+                    ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
+                if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
+                    ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
+                if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
+                    ip_profile['dhcp_start_address']=ip_base + "3"
+                if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
+                    ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
+                if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
+                    ip_profile['dns_address']=ip_base + "2"
+
+                gateway_address=ip_profile['gateway_address']
+                dhcp_count=int(ip_profile['dhcp_count'])
+                subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+
+                if ip_profile['dhcp_enabled']==True:
+                    dhcp_enabled='true'
+                else:
+                    dhcp_enabled='false'
+                dhcp_start_address=ip_profile['dhcp_start_address']
+
+                #derive dhcp_end_address from dhcp_start_address & dhcp_count
+                end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
+                end_ip_int += dhcp_count - 1
+                dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
+
+                ip_version=ip_profile['ip_version']
+                dns_address=ip_profile['dns_address']
+            except KeyError as exp:
+                self.logger.debug("Create Network REST: Key error {}".format(exp))
+                raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
+
+            # either use client provided UUID or search for a first available
+            #  if both are not defined we return none
+            if parent_network_uuid is not None:
+                provider_network = None
+                available_networks = None
+                add_vdc_rest_url = None
+
+                url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
+                add_vdc_rest_url = ''.join(url_list)
+
+                url_list = [self.url, '/api/admin/network/', parent_network_uuid]
+                available_networks = ''.join(url_list)
+
+            #Creating all networks as Direct Org VDC type networks.
+            #Unused in case of Underlay (data/ptp) network interface.
+            fence_mode="isolated"
+            is_inherited='false'
+            dns_list = dns_address.split(";")
+            dns1 = dns_list[0]
+            dns2_text = ""
+            if len(dns_list) >= 2:
+                dns2_text = "\n                                                <Dns2>{}</Dns2>\n".format(dns_list[1])
+            data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+                            <Description>Openmano created</Description>
+                                    <Configuration>
+                                        <IpScopes>
+                                            <IpScope>
+                                                <IsInherited>{1:s}</IsInherited>
+                                                <Gateway>{2:s}</Gateway>
+                                                <Netmask>{3:s}</Netmask>
+                                                <Dns1>{4:s}</Dns1>{5:s}
+                                                <IsEnabled>{6:s}</IsEnabled>
+                                                <IpRanges>
+                                                    <IpRange>
+                                                        <StartAddress>{7:s}</StartAddress>
+                                                        <EndAddress>{8:s}</EndAddress>
+                                                    </IpRange>
+                                                </IpRanges>
+                                            </IpScope>
+                                        </IpScopes>
+                                        <FenceMode>{9:s}</FenceMode>
+                                    </Configuration>
+                                    <IsShared>{10:s}</IsShared>
+                        </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
+                                                    subnet_address, dns1, dns2_text, dhcp_enabled,
+                                                    dhcp_start_address, dhcp_end_address,
+                                                    fence_mode, isshared)
+
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
+            try:
+                response = self.perform_request(req_type='POST',
+                                           url=add_vdc_rest_url,
+                                           headers=headers,
+                                           data=data)
+
+                if response.status_code != 201:
+                    self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
+                                      .format(response.status_code,response.content))
+                else:
+                    network_task = self.get_task_from_response(response.content)
+                    self.logger.debug("Create Network REST : Waiting for Network creation complete")
+                    time.sleep(5)
+                    result = self.client.get_task_monitor().wait_for_success(task=network_task)
+                    if result.get('status') == 'success':
+                        return response.content
+                    else:
+                        self.logger.debug("create_network_rest task failed. Network Create response : {}"
+                                          .format(response.content))
+            except Exception as exp:
+                self.logger.debug("create_network_rest : Exception : {} ".format(exp))
+
+        return None
+
+    def convert_cidr_to_netmask(self, cidr_ip=None):
+        """
+        Method sets convert CIDR netmask address to normal IP format
+        Args:
+            cidr_ip : CIDR IP address
+            Returns:
+                netmask : Converted netmask
+        """
+        if cidr_ip is not None:
+            if '/' in cidr_ip:
+                network, net_bits = cidr_ip.split('/')
+                netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
+            else:
+                netmask = cidr_ip
+            return netmask
+        return None
+
+    def get_provider_rest(self, vca=None):
+        """
+        Method gets provider vdc view from vcloud director
+
+        Args:
+            network_name - is network name to be created.
+            parent_network_uuid - is parent provider vdc network that will be used for mapping.
+            It optional attribute. by default if no parent network indicate the first available will be used.
+
+            Returns:
+                The return xml content of respond or None
+        """
+
+        url_list = [self.url, '/api/admin']
+        if vca:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=''.join(url_list),
+                                            headers=headers)
+
+        if response.status_code == requests.codes.ok:
+            return response.content
+        return None
+
+    def create_vdc(self, vdc_name=None):
+
+        vdc_dict = {}
+
+        xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
+        if xml_content is not None:
+            try:
+                task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
+                for child in task_resp_xmlroot:
+                    if child.tag.split("}")[1] == 'Owner':
+                        vdc_id = child.attrib.get('href').split("/")[-1]
+                        vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
+                        return vdc_dict
+            except:
+                self.logger.debug("Respond body {}".format(xml_content))
+
+        return None
+
+    def create_vdc_from_tmpl_rest(self, vdc_name=None):
+        """
+        Method create vdc in vCloud director based on VDC template.
+        it uses pre-defined template.
+
+        Args:
+            vdc_name -  name of a new vdc.
+
+            Returns:
+                The return xml content of respond or None
+        """
+        # pre-requesite atleast one vdc template should be available in vCD
+        self.logger.info("Creating new vdc {}".format(vdc_name))
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+        if vdc_name is None:
+            return None
+
+        url_list = [self.url, '/api/vdcTemplates']
+        vm_list_rest_call = ''.join(url_list)
+
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                    'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+        response = self.perform_request(req_type='GET',
+                                        url=vm_list_rest_call,
+                                        headers=headers)
+
+        # container url to a template
+        vdc_template_ref = None
+        try:
+            vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+            for child in vm_list_xmlroot:
+                # application/vnd.vmware.admin.providervdc+xml
+                # we need find a template from witch we instantiate VDC
+                if child.tag.split("}")[1] == 'VdcTemplate':
+                    if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
+                        vdc_template_ref = child.attrib.get('href')
+        except:
+            self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+            self.logger.debug("Respond body {}".format(response.content))
+            return None
+
+        # if we didn't found required pre defined template we return None
+        if vdc_template_ref is None:
+            return None
+
+        try:
+            # instantiate vdc
+            url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
+            vm_list_rest_call = ''.join(url_list)
+            data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+                                        <Source href="{1:s}"></Source>
+                                        <Description>opnemano</Description>
+                                        </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
+
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
+
+            response = self.perform_request(req_type='POST',
+                                            url=vm_list_rest_call,
+                                            headers=headers,
+                                            data=data)
+
+            vdc_task = self.get_task_from_response(response.content)
+            self.client.get_task_monitor().wait_for_success(task=vdc_task)
+
+            # if we all ok we respond with content otherwise by default None
+            if response.status_code >= 200 and response.status_code < 300:
+                return response.content
+            return None
+        except:
+            self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+            self.logger.debug("Respond body {}".format(response.content))
+
+        return None
+
+    def create_vdc_rest(self, vdc_name=None):
+        """
+        Method create network in vCloud director
+
+        Args:
+            vdc_name - vdc name to be created
+            Returns:
+                The return response
+        """
+
+        self.logger.info("Creating new vdc {}".format(vdc_name))
+
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+        if vdc_name is None:
+            return None
+
+        url_list = [self.url, '/api/admin/org/', self.org_uuid]
+        vm_list_rest_call = ''.join(url_list)
+
+        if vca._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                      'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=vm_list_rest_call,
+                                            headers=headers)
+
+            provider_vdc_ref = None
+            add_vdc_rest_url = None
+            available_networks = None
+
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+                                                                                          response.status_code))
+                return None
+            else:
+                try:
+                    vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+                    for child in vm_list_xmlroot:
+                        # application/vnd.vmware.admin.providervdc+xml
+                        if child.tag.split("}")[1] == 'Link':
+                            if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
+                                    and child.attrib.get('rel') == 'add':
+                                add_vdc_rest_url = child.attrib.get('href')
+                except:
+                    self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+                    self.logger.debug("Respond body {}".format(response.content))
+                    return None
+
+                response = self.get_provider_rest(vca=vca)
+                try:
+                    vm_list_xmlroot = XmlElementTree.fromstring(response)
+                    for child in vm_list_xmlroot:
+                        if child.tag.split("}")[1] == 'ProviderVdcReferences':
+                            for sub_child in child:
+                                provider_vdc_ref = sub_child.attrib.get('href')
+                except:
+                    self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+                    self.logger.debug("Respond body {}".format(response))
+                    return None
+
+                if add_vdc_rest_url is not None and provider_vdc_ref is not None:
+                    data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
+                            <AllocationModel>ReservationPool</AllocationModel>
+                            <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
+                            <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
+                            </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
+                            <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
+                            <ProviderVdcReference
+                            name="Main Provider"
+                            href="{2:s}" />
+                    <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
+                                                                                                  escape(vdc_name),
+                                                                                                  provider_vdc_ref)
+
+                    headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
+
+                    response = self.perform_request(req_type='POST',
+                                                    url=add_vdc_rest_url,
+                                                    headers=headers,
+                                                    data=data)
+
+                    # if we all ok we respond with content otherwise by default None
+                    if response.status_code == 201:
+                        return response.content
+        return None
+
+    def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
+        """
+        Method retrieve vapp detail from vCloud director
+
+        Args:
+            vapp_uuid - is vapp identifier.
+
+            Returns:
+                The return network uuid or return None
+        """
+
+        parsed_respond = {}
+        vca = None
+
+        if need_admin_access:
+            vca = self.connect_as_admin()
+        else:
+            vca = self.client
+
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+        if vapp_uuid is None:
+            return None
+
+        url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
+        get_vapp_restcall = ''.join(url_list)
+
+        if vca._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=get_vapp_restcall,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                if need_admin_access == False:
+                    response = self.retry_rest('GET', get_vapp_restcall)
+
+            if response.status_code != requests.codes.ok:
+                self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
+                                                                                          response.status_code))
+                return parsed_respond
+
+            try:
+                xmlroot_respond = XmlElementTree.fromstring(response.content)
+                parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
+
+                namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                              'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                              'vmw': 'http://www.vmware.com/schema/ovf',
+                              'vm': 'http://www.vmware.com/vcloud/v1.5',
+                              'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                              "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
+                              "xmlns":"http://www.vmware.com/vcloud/v1.5"
+                             }
+
+                created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
+                if created_section is not None:
+                    parsed_respond['created'] = created_section.text
+
+                network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
+                if network_section is not None and 'networkName' in network_section.attrib:
+                    parsed_respond['networkname'] = network_section.attrib['networkName']
+
+                ipscopes_section = \
+                    xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
+                                         namespaces)
+                if ipscopes_section is not None:
+                    for ipscope in ipscopes_section:
+                        for scope in ipscope:
+                            tag_key = scope.tag.split("}")[1]
+                            if tag_key == 'IpRanges':
+                                ip_ranges = scope.getchildren()
+                                for ipblock in ip_ranges:
+                                    for block in ipblock:
+                                        parsed_respond[block.tag.split("}")[1]] = block.text
+                            else:
+                                parsed_respond[tag_key] = scope.text
+
+                # parse children section for other attrib
+                children_section = xmlroot_respond.find('vm:Children/', namespaces)
+                if children_section is not None:
+                    parsed_respond['name'] = children_section.attrib['name']
+                    parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
+                     if  "nestedHypervisorEnabled" in children_section.attrib else None
+                    parsed_respond['deployed'] = children_section.attrib['deployed']
+                    parsed_respond['status'] = children_section.attrib['status']
+                    parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
+                    network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
+                    nic_list = []
+                    for adapters in network_adapter:
+                        adapter_key = adapters.tag.split("}")[1]
+                        if adapter_key == 'PrimaryNetworkConnectionIndex':
+                            parsed_respond['primarynetwork'] = adapters.text
+                        if adapter_key == 'NetworkConnection':
+                            vnic = {}
+                            if 'network' in adapters.attrib:
+                                vnic['network'] = adapters.attrib['network']
+                            for adapter in adapters:
+                                setting_key = adapter.tag.split("}")[1]
+                                vnic[setting_key] = adapter.text
+                            nic_list.append(vnic)
+
+                    for link in children_section:
+                        if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+                            if link.attrib['rel'] == 'screen:acquireTicket':
+                                parsed_respond['acquireTicket'] = link.attrib
+                            if link.attrib['rel'] == 'screen:acquireMksTicket':
+                                parsed_respond['acquireMksTicket'] = link.attrib
+
+                    parsed_respond['interfaces'] = nic_list
+                    vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                    if vCloud_extension_section is not None:
+                        vm_vcenter_info = {}
+                        vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+                        vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                        if vmext is not None:
+                            vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                        parsed_respond["vm_vcenter_info"]= vm_vcenter_info
+
+                    virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
+                    vm_virtual_hardware_info = {}
+                    if virtual_hardware_section is not None:
+                        for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
+                            if item.find("rasd:Description",namespaces).text == "Hard disk":
+                                disk_size = item.find("rasd:HostResource" ,namespaces
+                                                ).attrib["{"+namespaces['vm']+"}capacity"]
+
+                                vm_virtual_hardware_info["disk_size"]= disk_size
+                                break
+
+                        for link in virtual_hardware_section:
+                            if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+                                if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
+                                    vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
+                                    break
+
+                    parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
+            except Exception as exp :
+                self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+        return parsed_respond
+
+    def acquire_console(self, vm_uuid=None):
+
+        if vm_uuid is None:
+            return None
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
+            console_dict = vm_dict['acquireTicket']
+            console_rest_call = console_dict['href']
+
+            response = self.perform_request(req_type='POST',
+                                            url=console_rest_call,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('POST', console_rest_call)
+
+            if response.status_code == requests.codes.ok:
+                return response.content
+
+        return None
+
+    def modify_vm_disk(self, vapp_uuid, flavor_disk):
+        """
+        Method retrieve vm disk details
+
+        Args:
+            vapp_uuid - is vapp identifier.
+            flavor_disk - disk size as specified in VNFD (flavor)
+
+            Returns:
+                The return network uuid or return None
+        """
+        status = None
+        try:
+            #Flavor disk is in GB convert it into MB
+            flavor_disk = int(flavor_disk) * 1024
+            vm_details = self.get_vapp_details_rest(vapp_uuid)
+            if vm_details:
+                vm_name = vm_details["name"]
+                self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
+
+            if vm_details and "vm_virtual_hardware" in vm_details:
+                vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
+                disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+
+                self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
+
+                if flavor_disk > vm_disk:
+                    status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
+                    self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
+                                                         vm_disk,  flavor_disk ))
+                else:
+                    status = True
+                    self.logger.info("No need to modify disk of VM {}".format(vm_name))
+
+            return status
+        except Exception as exp:
+            self.logger.info("Error occurred while modifing disk size {}".format(exp))
+
+
+    def modify_vm_disk_rest(self, disk_href , disk_size):
+        """
+        Method retrieve modify vm disk size
+
+        Args:
+            disk_href - vCD API URL to GET and PUT disk data
+            disk_size - disk size as specified in VNFD (flavor)
+
+            Returns:
+                The return network uuid or return None
+        """
+        if disk_href is None or disk_size is None:
+            return None
+
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                                url=disk_href,
+                                                headers=headers)
+
+        if response.status_code == 403:
+            response = self.retry_rest('GET', disk_href)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
+                                                                            response.status_code))
+            return None
+        try:
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                if item.find("rasd:Description",namespaces).text == "Hard disk":
+                    disk_item = item.find("rasd:HostResource" ,namespaces )
+                    if disk_item is not None:
+                        disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
+                        break
+
+            data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
+                                             xml_declaration=True)
+
+            #Send PUT request to modify disk size
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+            response = self.perform_request(req_type='PUT',
+                                                url=disk_href,
+                                                headers=headers,
+                                                data=data)
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', disk_href, add_headers, data)
+
+            if response.status_code != 202:
+                self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
+                                                                            response.status_code))
+            else:
+                modify_disk_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
+                if result.get('status') == 'success':
+                    return True
+                else:
+                    return False
+            return None
+
+        except Exception as exp :
+                self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
+                return None
+
+    def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
+        """
+            Method to attach pci devices to VM
+
+             Args:
+                vapp_uuid - uuid of vApp/VM
+                pci_devices - pci devices infromation as specified in VNFD (flavor)
+
+            Returns:
+                The status of add pci device task , vm object and
+                vcenter_conect object
+        """
+        vm_obj = None
+        self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
+        vcenter_conect, content = self.get_vcenter_content()
+        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+
+        if vm_moref_id:
+            try:
+                no_of_pci_devices = len(pci_devices)
+                if no_of_pci_devices > 0:
+                    #Get VM and its host
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                    self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+                    if host_obj and vm_obj:
+                        #get PCI devies from host on which vapp is currently installed
+                        avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
+
+                        if avilable_pci_devices is None:
+                            #find other hosts with active pci devices
+                            new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
+                                                                content,
+                                                                no_of_pci_devices
+                                                                )
+
+                            if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+                                #Migrate vm to the host where PCI devices are availble
+                                self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
+                                task = self.relocate_vm(new_host_obj, vm_obj)
+                                if task is not None:
+                                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                                    self.logger.info("Migrate VM status: {}".format(result))
+                                    host_obj = new_host_obj
+                                else:
+                                    self.logger.info("Fail to migrate VM : {}".format(result))
+                                    raise vimconn.vimconnNotFoundException(
+                                    "Fail to migrate VM : {} to host {}".format(
+                                                    vmname_andid,
+                                                    new_host_obj)
+                                        )
+
+                        if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+                            #Add PCI devices one by one
+                            for pci_device in avilable_pci_devices:
+                                task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
+                                if task:
+                                    status= self.wait_for_vcenter_task(task, vcenter_conect)
+                                    if status:
+                                        self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+                                else:
+                                    self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+                            return True, vm_obj, vcenter_conect
+                        else:
+                            self.logger.error("Currently there is no host with"\
+                                              " {} number of avaialble PCI devices required for VM {}".format(
+                                                                            no_of_pci_devices,
+                                                                            vmname_andid)
+                                              )
+                            raise vimconn.vimconnNotFoundException(
+                                    "Currently there is no host with {} "\
+                                    "number of avaialble PCI devices required for VM {}".format(
+                                                                            no_of_pci_devices,
+                                                                            vmname_andid))
+                else:
+                    self.logger.debug("No infromation about PCI devices {} ",pci_devices)
+
+            except vmodl.MethodFault as error:
+                self.logger.error("Error occurred while adding PCI devices {} ",error)
+        return None, vm_obj, vcenter_conect
+
+    def get_vm_obj(self, content, mob_id):
+        """
+            Method to get the vsphere VM object associated with a given morf ID
+             Args:
+                vapp_uuid - uuid of vApp/VM
+                content - vCenter content object
+                mob_id - mob_id of VM
+
+            Returns:
+                    VM and host object
+        """
+        vm_obj = None
+        host_obj = None
+        try :
+            container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                        [vim.VirtualMachine], True
+                                                        )
+            for vm in container.view:
+                mobID = vm._GetMoId()
+                if mobID == mob_id:
+                    vm_obj = vm
+                    host_obj = vm_obj.runtime.host
+                    break
+        except Exception as exp:
+            self.logger.error("Error occurred while finding VM object : {}".format(exp))
+        return host_obj, vm_obj
+
+    def get_pci_devices(self, host, need_devices):
+        """
+            Method to get the details of pci devices on given host
+             Args:
+                host - vSphere host object
+                need_devices - number of pci devices needed on host
+
+             Returns:
+                array of pci devices
+        """
+        all_devices = []
+        all_device_ids = []
+        used_devices_ids = []
+
+        try:
+            if host:
+                pciPassthruInfo = host.config.pciPassthruInfo
+                pciDevies = host.hardware.pciDevice
+
+            for pci_status in pciPassthruInfo:
+                if pci_status.passthruActive:
+                    for device in pciDevies:
+                        if device.id == pci_status.id:
+                            all_device_ids.append(device.id)
+                            all_devices.append(device)
+
+            #check if devices are in use
+            avalible_devices = all_devices
+            for vm in host.vm:
+                if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+                    vm_devices = vm.config.hardware.device
+                    for device in vm_devices:
+                        if type(device) is vim.vm.device.VirtualPCIPassthrough:
+                            if device.backing.id in all_device_ids:
+                                for use_device in avalible_devices:
+                                    if use_device.id == device.backing.id:
+                                        avalible_devices.remove(use_device)
+                                used_devices_ids.append(device.backing.id)
+                                self.logger.debug("Device {} from devices {}"\
+                                        "is in use".format(device.backing.id,
+                                                           device)
+                                            )
+            if len(avalible_devices) < need_devices:
+                self.logger.debug("Host {} don't have {} number of active devices".format(host,
+                                                                            need_devices))
+                self.logger.debug("found only {} devives {}".format(len(avalible_devices),
+                                                                    avalible_devices))
+                return None
+            else:
+                required_devices = avalible_devices[:need_devices]
+                self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
+                                                            len(avalible_devices),
+                                                            host,
+                                                            need_devices))
+                self.logger.info("Retruning {} devices as {}".format(need_devices,
+                                                                required_devices ))
+                return required_devices
+
+        except Exception as exp:
+            self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
+
+        return None
+
+    def get_host_and_PCIdevices(self, content, need_devices):
+        """
+         Method to get the details of pci devices infromation on all hosts
+
+            Args:
+                content - vSphere host object
+                need_devices - number of pci devices needed on host
+
+            Returns:
+                 array of pci devices and host object
+        """
+        host_obj = None
+        pci_device_objs = None
+        try:
+            if content:
+                container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                            [vim.HostSystem], True)
+                for host in container.view:
+                    devices = self.get_pci_devices(host, need_devices)
+                    if devices:
+                        host_obj = host
+                        pci_device_objs = devices
+                        break
+        except Exception as exp:
+            self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
+
+        return host_obj,pci_device_objs
+
+    def relocate_vm(self, dest_host, vm) :
+        """
+         Method to get the relocate VM to new host
+
+            Args:
+                dest_host - vSphere host object
+                vm - vSphere VM object
+
+            Returns:
+                task object
+        """
+        task = None
+        try:
+            relocate_spec = vim.vm.RelocateSpec(host=dest_host)
+            task = vm.Relocate(relocate_spec)
+            self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
+        except Exception as exp:
+            self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
+                                                                            dest_host, vm, exp))
+        return task
+
+    def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
+        """
+        Waits and provides updates on a vSphere task
+        """
+        while task.info.state == vim.TaskInfo.State.running:
+            time.sleep(2)
+
+        if task.info.state == vim.TaskInfo.State.success:
+            if task.info.result is not None and not hideResult:
+                self.logger.info('{} completed successfully, result: {}'.format(
+                                                            actionName,
+                                                            task.info.result))
+            else:
+                self.logger.info('Task {} completed successfully.'.format(actionName))
+        else:
+            self.logger.error('{} did not complete successfully: {} '.format(
+                                                            actionName,
+                                                            task.info.error)
+                              )
+
+        return task.info.result
+
+    def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
+        """
+         Method to add pci device in given VM
+
+            Args:
+                host_object - vSphere host object
+                vm_object - vSphere VM object
+                host_pci_dev -  host_pci_dev must be one of the devices from the
+                                host_object.hardware.pciDevice list
+                                which is configured as a PCI passthrough device
+
+            Returns:
+                task object
+        """
+        task = None
+        if vm_object and host_object and host_pci_dev:
+            try :
+                #Add PCI device to VM
+                pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
+                systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
+
+                if host_pci_dev.id not in systemid_by_pciid:
+                    self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
+                    return None
+
+                deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
+                backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
+                                            id=host_pci_dev.id,
+                                            systemId=systemid_by_pciid[host_pci_dev.id],
+                                            vendorId=host_pci_dev.vendorId,
+                                            deviceName=host_pci_dev.deviceName)
+
+                hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
+
+                new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
+                new_device_config.operation = "add"
+                vmConfigSpec = vim.vm.ConfigSpec()
+                vmConfigSpec.deviceChange = [new_device_config]
+
+                task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
+                self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
+                                                            host_pci_dev, vm_object, host_object)
+                                )
+            except Exception as exp:
+                self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
+                                                                            host_pci_dev,
+                                                                            vm_object,
+                                                                             exp))
+        return task
+
+    def get_vm_vcenter_info(self):
+        """
+        Method to get details of vCenter and vm
+
+            Args:
+                vapp_uuid - uuid of vApp or VM
+
+            Returns:
+                Moref Id of VM and deails of vCenter
+        """
+        vm_vcenter_info = {}
+
+        if self.vcenter_ip is not None:
+            vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
+        else:
+            raise vimconn.vimconnException(message="vCenter IP is not provided."\
+                                           " Please provide vCenter IP while attaching datacenter to tenant in --config")
+        if self.vcenter_port is not None:
+            vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
+        else:
+            raise vimconn.vimconnException(message="vCenter port is not provided."\
+                                           " Please provide vCenter port while attaching datacenter to tenant in --config")
+        if self.vcenter_user is not None:
+            vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
+        else:
+            raise vimconn.vimconnException(message="vCenter user is not provided."\
+                                           " Please provide vCenter user while attaching datacenter to tenant in --config")
+
+        if self.vcenter_password is not None:
+            vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
+        else:
+            raise vimconn.vimconnException(message="vCenter user password is not provided."\
+                                           " Please provide vCenter user password while attaching datacenter to tenant in --config")
+
+        return vm_vcenter_info
+
+
+    def get_vm_pci_details(self, vmuuid):
+        """
+            Method to get VM PCI device details from vCenter
+
+            Args:
+                vm_obj - vSphere VM object
+
+            Returns:
+                dict of PCI devives attached to VM
+
+        """
+        vm_pci_devices_info = {}
+        try:
+            vcenter_conect, content = self.get_vcenter_content()
+            vm_moref_id = self.get_vm_moref_id(vmuuid)
+            if vm_moref_id:
+                #Get VM and its host
+                if content:
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                    if host_obj and vm_obj:
+                        vm_pci_devices_info["host_name"]= host_obj.name
+                        vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
+                        for device in vm_obj.config.hardware.device:
+                            if type(device) == vim.vm.device.VirtualPCIPassthrough:
+                                device_details={'devide_id':device.backing.id,
+                                                'pciSlotNumber':device.slotInfo.pciSlotNumber,
+                                            }
+                                vm_pci_devices_info[device.deviceInfo.label] = device_details
+                else:
+                    self.logger.error("Can not connect to vCenter while getting "\
+                                          "PCI devices infromationn")
+                return vm_pci_devices_info
+        except Exception as exp:
+            self.logger.error("Error occurred while getting VM infromationn"\
+                             " for VM : {}".format(exp))
+            raise vimconn.vimconnException(message=exp)
+
+
+    def reserve_memory_for_all_vms(self, vapp, memory_mb):
+        """
+            Method to reserve memory for all VMs
+            Args :
+                vapp - VApp
+                memory_mb - Memory in MB
+            Returns:
+                None
+        """
+
+        self.logger.info("Reserve memory for all VMs")
+        for vms in vapp.get_all_vms():
+            vm_id = vms.get('id').split(':')[-1]
+
+            url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
+            response = self.perform_request(req_type='GET',
+                                            url=url_rest_call,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                            response.content,
+                                                            response.status_code))
+                raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
+                                               "memory")
+
+            bytexml = bytes(bytearray(response.content, encoding='utf-8'))
+            contentelem = lxmlElementTree.XML(bytexml)
+            namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.items() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+            # Find the reservation element in the response
+            memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
+            for memelem in memelem_list:
+                memelem.text = str(memory_mb)
+
+            newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
+
+            response = self.perform_request(req_type='PUT',
+                                            url=url_rest_call,
+                                            headers=headers,
+                                            data=newdata)
+
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+            if response.status_code != 202:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {} ".format(url_rest_call,
+                                  response.content,
+                                  response.status_code))
+                raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
+                                               "virtual hardware memory section")
+            else:
+                mem_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=mem_task)
+                if result.get('status') == 'success':
+                    self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
+                                      .format(vm_id))
+                else:
+                    self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
+                                      .format(vm_id))
+
+    def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
+        """
+            Configure VApp network config with org vdc network
+            Args :
+                vapp - VApp
+            Returns:
+                None
+        """
+
+        self.logger.info("Connecting vapp {} to org vdc network {}".
+                         format(vapp_id, net_name))
+
+        url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
+
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+        response = self.perform_request(req_type='GET',
+                                        url=url_rest_call,
+                                        headers=headers)
+
+        if response.status_code == 403:
+            response = self.retry_rest('GET', url_rest_call)
+
+        if response.status_code != 200:
+            self.logger.error("REST call {} failed reason : {}"\
+                              "status code : {}".format(url_rest_call,
+                                                        response.content,
+                                                        response.status_code))
+            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
+                                           "network config section")
+
+        data = response.content
+        headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
+        net_id = self.get_network_id_by_name(net_name)
+        if not net_id:
+            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
+                                           "existing network")
+
+        bytexml = bytes(bytearray(data, encoding='utf-8'))
+        newelem = lxmlElementTree.XML(bytexml)
+        namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
+        namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
+        nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
+
+        # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
+        parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
+        if parentnetworklist:
+            for pn in parentnetworklist:
+                if "href" not in pn.keys():
+                    id_val = pn.get("id")
+                    href_val = "{}/api/network/{}".format(self.url, id_val)
+                    pn.set("href", href_val)
+
+        newstr = """<NetworkConfig networkName="{}">
+                  <Configuration>
+                       <ParentNetwork href="{}/api/network/{}"/>
+                       <FenceMode>bridged</FenceMode>
+                  </Configuration>
+              </NetworkConfig>
+           """.format(net_name, self.url, net_id)
+        newcfgelem = lxmlElementTree.fromstring(newstr)
+        if nwcfglist:
+            nwcfglist[0].addnext(newcfgelem)
+
+        newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
+
+        response = self.perform_request(req_type='PUT',
+                                        url=url_rest_call,
+                                        headers=headers,
+                                        data=newdata)
+
+        if response.status_code == 403:
+            add_headers = {'Content-Type': headers['Content-Type']}
+            response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+        if response.status_code != 202:
+            self.logger.error("REST call {} failed reason : {}"\
+                              "status code : {} ".format(url_rest_call,
+                              response.content,
+                              response.status_code))
+            raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
+                                           "network config section")
+        else:
+            vapp_task = self.get_task_from_response(response.content)
+            result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
+            if result.get('status') == 'success':
+                self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
+                                 "network {}".format(vapp_id, net_name))
+            else:
+                self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
+                                  "connect to network {}".format(vapp_id, net_name))
+
+    def remove_primary_network_adapter_from_all_vms(self, vapp):
+        """
+            Method to remove network adapter type to vm
+            Args :
+                vapp - VApp
+            Returns:
+                None
+        """
+
+        self.logger.info("Removing network adapter from all VMs")
+        for vms in vapp.get_all_vms():
+            vm_id = vms.get('id').split(':')[-1]
+
+            url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=url_rest_call,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                response = self.retry_rest('GET', url_rest_call)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {}".format(url_rest_call,
+                                                            response.content,
+                                                            response.status_code))
+                raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
+                                               "network connection section")
+
+            data = response.content
+            data = data.split('<Link rel="edit"')[0]
+
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+
+            newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+                      <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
+                              xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
+                              xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
+                              xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
+                              xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
+                              xmlns:vmw="http://www.vmware.com/schema/ovf"
+                              xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
+                              xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
+                              xmlns:ns9="http://www.vmware.com/vcloud/versions"
+                              href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
+                              <ovf:Info>Specifies the available VM network connections</ovf:Info>
+                             <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
+                             <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
+                      </NetworkConnectionSection>""".format(url=url_rest_call)
+            response = self.perform_request(req_type='PUT',
+                                            url=url_rest_call,
+                                            headers=headers,
+                                            data=newdata)
+
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+
+            if response.status_code != 202:
+                self.logger.error("REST call {} failed reason : {}"\
+                                  "status code : {} ".format(url_rest_call,
+                                  response.content,
+                                  response.status_code))
+                raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
+                                               "network connection section")
+            else:
+                nic_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                if result.get('status') == 'success':
+                    self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
+                                      "default NIC type".format(vm_id))
+                else:
+                    self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
+                                      "connect NIC type".format(vm_id))
+
+    def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
+        """
+            Method to add network adapter type to vm
+            Args :
+                network_name - name of network
+                primary_nic_index - int value for primary nic index
+                nicIndex - int value for nic index
+                nic_type - specify model name to which add to vm
+            Returns:
+                None
+        """
+
+        self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
+                         format(network_name, nicIndex, nic_type))
+        try:
+            ip_address = None
+            floating_ip = False
+            mac_address = None
+            if 'floating_ip' in net: floating_ip = net['floating_ip']
+
+            # Stub for ip_address feature
+            if 'ip_address' in net: ip_address = net['ip_address']
+
+            if 'mac_address' in net: mac_address = net['mac_address']
+
+            if floating_ip:
+                allocation_mode = "POOL"
+            elif ip_address:
+                allocation_mode = "MANUAL"
+            else:
+                allocation_mode = "DHCP"
+
+            if not nic_type:
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                    response = self.perform_request(req_type='GET',
+                                                    url=url_rest_call,
+                                                    headers=headers)
+
+                    if response.status_code == 403:
+                        response = self.retry_rest('GET', url_rest_call)
+
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {}".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                         "network connection section")
+
+                    data = response.content
+                    data = data.split('<Link rel="edit"')[0]
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        self.logger.debug("add_network_adapter PrimaryNIC not in data")
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                                         allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            item =  item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
+                    else:
+                        self.logger.debug("add_network_adapter PrimaryNIC in data")
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                          allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data + new_item + '</NetworkConnectionSection>'
+
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+
+                    response = self.perform_request(req_type='PUT',
+                                                    url=url_rest_call,
+                                                    headers=headers,
+                                                    data=data)
+
+                    if response.status_code == 403:
+                        add_headers = {'Content-Type': headers['Content-Type']}
+                        response = self.retry_rest('PUT', url_rest_call, add_headers, data)
+
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {} ".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                            "network connection section")
+                    else:
+                        nic_task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                        if result.get('status') == 'success':
+                            self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
+                                                               "default NIC type".format(vm_id))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
+                                                              "connect NIC type".format(vm_id))
+            else:
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                    response = self.perform_request(req_type='GET',
+                                                    url=url_rest_call,
+                                                    headers=headers)
+
+                    if response.status_code == 403:
+                        response = self.retry_rest('GET', url_rest_call)
+
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                        "network connection section")
+                    data = response.content
+                    data = data.split('<Link rel="edit"')[0]
+                    vcd_netadapter_type = nic_type
+                    if nic_type in ['SR-IOV', 'VF']:
+                        vcd_netadapter_type = "SRIOVETHERNETCARD"
+
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                <NetworkAdapterType>{}</NetworkAdapterType>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                               allocation_mode, vcd_netadapter_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
+                    else:
+                        self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    <NetworkAdapterType>{}</NetworkAdapterType>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                allocation_mode, vcd_netadapter_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        if mac_address:
+                            mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                            new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+
+                        data = data + new_item + '</NetworkConnectionSection>'
+
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+
+                    response = self.perform_request(req_type='PUT',
+                                                    url=url_rest_call,
+                                                    headers=headers,
+                                                    data=data)
+
+                    if response.status_code == 403:
+                        add_headers = {'Content-Type': headers['Content-Type']}
+                        response = self.retry_rest('PUT', url_rest_call, add_headers, data)
+
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                           "network connection section")
+                    else:
+                        nic_task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                        if result.get('status') == 'success':
+                            self.logger.info("add_network_adapter_to_vms(): VM {} "\
+                                               "conneced to NIC type {}".format(vm_id, nic_type))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} "\
+                                               "failed to connect NIC type {}".format(vm_id, nic_type))
+        except Exception as exp:
+            self.logger.error("add_network_adapter_to_vms() : exception occurred "\
+                                               "while adding Network adapter")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def set_numa_affinity(self, vmuuid, paired_threads_id):
+        """
+            Method to assign numa affinity in vm configuration parammeters
+            Args :
+                vmuuid - vm uuid
+                paired_threads_id - one or more virtual processor
+                                    numbers
+            Returns:
+                return if True
+        """
+        try:
+            vcenter_conect, content = self.get_vcenter_content()
+            vm_moref_id = self.get_vm_moref_id(vmuuid)
+
+            host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
+            if vm_obj:
+                config_spec = vim.vm.ConfigSpec()
+                config_spec.extraConfig = []
+                opt = vim.option.OptionValue()
+                opt.key = 'numa.nodeAffinity'
+                opt.value = str(paired_threads_id)
+                config_spec.extraConfig.append(opt)
+                task = vm_obj.ReconfigVM_Task(config_spec)
+                if task:
+                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                    extra_config = vm_obj.config.extraConfig
+                    flag = False
+                    for opts in extra_config:
+                        if 'numa.nodeAffinity' in opts.key:
+                            flag = True
+                            self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
+                                                     "value {} for vm {}".format(opt.value, vm_obj))
+                        if flag:
+                            return
+            else:
+                self.logger.error("set_numa_affinity: Failed to assign numa affinity")
+        except Exception as exp:
+            self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
+                                                       "for VM {} : {}".format(vm_obj, vm_moref_id))
+            raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
+                                                                           "affinity".format(exp))
+
+
+    def cloud_init(self, vapp, cloud_config):
+        """
+        Method to inject ssh-key
+        vapp - vapp object
+        cloud_config a dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                    or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
+        """
+        try:
+            if not isinstance(cloud_config, dict):
+                raise Exception("cloud_init : parameter cloud_config is not a dictionary")
+            else:
+                key_pairs = []
+                userdata = []
+                if "key-pairs" in cloud_config:
+                    key_pairs = cloud_config["key-pairs"]
+
+                if "users" in cloud_config:
+                    userdata = cloud_config["users"]
+
+                self.logger.debug("cloud_init : Guest os customization started..")
+                customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
+                customize_script = customize_script.replace("&","&amp;")
+                self.guest_customization(vapp, customize_script)
+
+        except Exception as exp:
+            self.logger.error("cloud_init : exception occurred while injecting "\
+                                                                       "ssh-key")
+            raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
+                                                               "ssh-key".format(exp))
+
+    def format_script(self, key_pairs=[], users_list=[]):
+        bash_script = """#!/bin/sh
+        echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+        if [ "$1" = "precustomization" ];then
+            echo performing precustomization tasks   on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+        """
+
+        keys = "\n".join(key_pairs)
+        if keys:
+            keys_data = """
+            if [ ! -d /root/.ssh ];then
+                mkdir /root/.ssh
+                chown root:root /root/.ssh
+                chmod 700 /root/.ssh
+                touch /root/.ssh/authorized_keys
+                chown root:root /root/.ssh/authorized_keys
+                chmod 600 /root/.ssh/authorized_keys
+                # make centos with selinux happy
+                which restorecon && restorecon -Rv /root/.ssh
+            else
+                touch /root/.ssh/authorized_keys
+                chown root:root /root/.ssh/authorized_keys
+                chmod 600 /root/.ssh/authorized_keys
+            fi
+            echo '{key}' >> /root/.ssh/authorized_keys
+            """.format(key=keys)
+
+            bash_script+= keys_data
+
+        for user in users_list:
+            if 'name' in user: user_name = user['name']
+            if 'key-pairs' in user:
+                user_keys = "\n".join(user['key-pairs'])
+            else:
+                user_keys = None
+
+            add_user_name = """
+                useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
+                """.format(user_name=user_name)
+
+            bash_script+= add_user_name
+
+            if user_keys:
+                user_keys_data = """
+                mkdir /home/{user_name}/.ssh
+                chown {user_name}:{user_name} /home/{user_name}/.ssh
+                chmod 700 /home/{user_name}/.ssh
+                touch /home/{user_name}/.ssh/authorized_keys
+                chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+                chmod 600 /home/{user_name}/.ssh/authorized_keys
+                # make centos with selinux happy
+                which restorecon && restorecon -Rv /home/{user_name}/.ssh
+                echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+                """.format(user_name=user_name,user_key=user_keys)
+
+                bash_script+= user_keys_data
+
+        return bash_script+"\n\tfi"
+
+    def guest_customization(self, vapp, customize_script):
+        """
+        Method to customize guest os
+        vapp - Vapp object
+        customize_script - Customize script to be run at first boot of VM.
+        """
+        for vm in vapp.get_all_vms():
+            vm_id = vm.get('id').split(':')[-1]
+            vm_name = vm.get('name')
+            vm_name = vm_name.replace('_','-')
+
+            vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+            headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
+
+            data = """<GuestCustomizationSection
+                           xmlns="http://www.vmware.com/vcloud/v1.5"
+                           xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
+                           ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
+                           <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
+                           <Enabled>true</Enabled>
+                           <ChangeSid>false</ChangeSid>
+                           <VirtualMachineId>{}</VirtualMachineId>
+                           <JoinDomainEnabled>false</JoinDomainEnabled>
+                           <UseOrgSettings>false</UseOrgSettings>
+                           <AdminPasswordEnabled>false</AdminPasswordEnabled>
+                           <AdminPasswordAuto>true</AdminPasswordAuto>
+                           <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
+                           <AdminAutoLogonCount>0</AdminAutoLogonCount>
+                           <ResetPasswordRequired>false</ResetPasswordRequired>
+                           <CustomizationScript>{}</CustomizationScript>
+                           <ComputerName>{}</ComputerName>
+                           <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
+                       </GuestCustomizationSection>
+                   """.format(vm_customization_url,
+                                             vm_id,
+                                  customize_script,
+                                           vm_name,
+                              vm_customization_url)
+
+            response = self.perform_request(req_type='PUT',
+                                             url=vm_customization_url,
+                                             headers=headers,
+                                             data=data)
+            if response.status_code == 202:
+                guest_task = self.get_task_from_response(response.content)
+                self.client.get_task_monitor().wait_for_success(task=guest_task)
+                self.logger.info("guest_customization : customized guest os task "\
+                                             "completed for VM {}".format(vm_name))
+            else:
+                self.logger.error("guest_customization : task for customized guest os"\
+                                                    "failed for VM {}".format(vm_name))
+                raise vimconn.vimconnException("guest_customization : failed to perform"\
+                                       "guest os customization on VM {}".format(vm_name))
+
+    def add_new_disk(self, vapp_uuid, disk_size):
+        """
+            Method to create an empty vm disk
+
+            Args:
+                vapp_uuid - is vapp identifier.
+                disk_size - size of disk to be created in GB
+
+            Returns:
+                None
+        """
+        status = False
+        vm_details = None
+        try:
+            #Disk size in GB, convert it into MB
+            if disk_size is not None:
+                disk_size_mb = int(disk_size) * 1024
+                vm_details = self.get_vapp_details_rest(vapp_uuid)
+
+            if vm_details and "vm_virtual_hardware" in vm_details:
+                self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+                disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+                status = self.add_new_disk_rest(disk_href, disk_size_mb)
+
+        except Exception as exp:
+            msg = "Error occurred while creating new disk {}.".format(exp)
+            self.rollback_newvm(vapp_uuid, msg)
+
+        if status:
+            self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+        else:
+            #If failed to add disk, delete VM
+            msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def add_new_disk_rest(self, disk_href, disk_size_mb):
+        """
+        Retrives vApp Disks section & add new empty disk
+
+        Args:
+            disk_href: Disk section href to addd disk
+            disk_size_mb: Disk size in MB
+
+            Returns: Status of add new disk task
+        """
+        status = False
+        if self.client._session:
+            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+            response = self.perform_request(req_type='GET',
+                                            url=disk_href,
+                                            headers=headers)
+
+        if response.status_code == 403:
+            response = self.retry_rest('GET', disk_href)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
+                              .format(disk_href, response.status_code))
+            return status
+        try:
+            #Find but type & max of instance IDs assigned to disks
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            #For python3
+            #namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.items() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+            instance_id = 0
+            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                if item.find("rasd:Description",namespaces).text == "Hard disk":
+                    inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
+                    if inst_id > instance_id:
+                        instance_id = inst_id
+                        disk_item = item.find("rasd:HostResource" ,namespaces)
+                        bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
+                        bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
+
+            instance_id = instance_id + 1
+            new_item =   """<Item>
+                                <rasd:Description>Hard disk</rasd:Description>
+                                <rasd:ElementName>New disk</rasd:ElementName>
+                                <rasd:HostResource
+                                    xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
+                                    vcloud:capacity="{}"
+                                    vcloud:busSubType="{}"
+                                    vcloud:busType="{}"></rasd:HostResource>
+                                <rasd:InstanceID>{}</rasd:InstanceID>
+                                <rasd:ResourceType>17</rasd:ResourceType>
+                            </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
+
+            new_data = response.content
+            #Add new item at the bottom
+            new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
+
+            # Send PUT request to modify virtual hardware section with new disk
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+            response = self.perform_request(req_type='PUT',
+                                            url=disk_href,
+                                            data=new_data,
+                                            headers=headers)
+
+            if response.status_code == 403:
+                add_headers = {'Content-Type': headers['Content-Type']}
+                response = self.retry_rest('PUT', disk_href, add_headers, new_data)
+
+            if response.status_code != 202:
+                self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
+                                  .format(disk_href, response.status_code, response.content))
+            else:
+                add_disk_task = self.get_task_from_response(response.content)
+                result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
+                if result.get('status') == 'success':
+                    status = True
+                else:
+                    self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
+
+        except Exception as exp:
+            self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
+
+        return status
+
+
+    def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
+        """
+            Method to add existing disk to vm
+            Args :
+                catalogs - List of VDC catalogs
+                image_id - Catalog ID
+                template_name - Name of template in catalog
+                vapp_uuid - UUID of vApp
+            Returns:
+                None
+        """
+        disk_info = None
+        vcenter_conect, content = self.get_vcenter_content()
+        #find moref-id of vm in image
+        catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
+                                                         image_id=image_id,
+                                                        )
+
+        if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
+            if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
+                catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
+                if catalog_vm_moref_id:
+                    self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
+                    host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
+                    if catalog_vm_obj:
+                        #find existing disk
+                        disk_info = self.find_disk(catalog_vm_obj)
+                    else:
+                        exp_msg = "No VM with image id {} found".format(image_id)
+                        self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+        else:
+            exp_msg = "No Image found with image ID {} ".format(image_id)
+            self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+
+        if disk_info:
+            self.logger.info("Existing disk_info : {}".format(disk_info))
+            #get VM
+            vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+            host, vm_obj = self.get_vm_obj(content, vm_moref_id)
+            if vm_obj:
+                status = self.add_disk(vcenter_conect=vcenter_conect,
+                                       vm=vm_obj,
+                                       disk_info=disk_info,
+                                       size=size,
+                                       vapp_uuid=vapp_uuid
+                                       )
+            if status:
+                self.logger.info("Disk from image id {} added to {}".format(image_id,
+                                                                            vm_obj.config.name)
+                                 )
+        else:
+            msg = "No disk found with image id {} to add in VM {}".format(
+                                                            image_id,
+                                                            vm_obj.config.name)
+            self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
+
+
+    def find_disk(self, vm_obj):
+        """
+         Method to find details of existing disk in VM
+            Args :
+                vm_obj - vCenter object of VM
+                image_id - Catalog ID
+            Returns:
+                disk_info : dict of disk details
+        """
+        disk_info = {}
+        if vm_obj:
+            try:
+                devices = vm_obj.config.hardware.device
+                for device in devices:
+                    if type(device) is vim.vm.device.VirtualDisk:
+                        if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
+                            disk_info["full_path"] = device.backing.fileName
+                            disk_info["datastore"] = device.backing.datastore
+                            disk_info["capacityKB"] = device.capacityInKB
+                            break
+            except Exception as exp:
+                self.logger.error("find_disk() : exception occurred while "\
+                                  "getting existing disk details :{}".format(exp))
+        return disk_info
+
+
+    def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
+        """
+         Method to add existing disk in VM
+            Args :
+                vcenter_conect - vCenter content object
+                vm - vCenter vm object
+                disk_info : dict of disk details
+            Returns:
+                status : status of add disk task
+        """
+        datastore = disk_info["datastore"] if "datastore" in disk_info else None
+        fullpath = disk_info["full_path"] if "full_path" in disk_info else None
+        capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
+        if size is not None:
+            #Convert size from GB to KB
+            sizeKB = int(size) * 1024 * 1024
+            #compare size of existing disk and user given size.Assign whicherver is greater
+            self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
+                                                                    sizeKB, capacityKB))
+            if sizeKB > capacityKB:
+                capacityKB = sizeKB
+
+        if datastore and fullpath and capacityKB:
+            try:
+                spec = vim.vm.ConfigSpec()
+                # get all disks on a VM, set unit_number to the next available
+                unit_number = 0
+                for dev in vm.config.hardware.device:
+                    if hasattr(dev.backing, 'fileName'):
+                        unit_number = int(dev.unitNumber) + 1
+                        # unit_number 7 reserved for scsi controller
+                        if unit_number == 7:
+                            unit_number += 1
+                    if isinstance(dev, vim.vm.device.VirtualDisk):
+                        #vim.vm.device.VirtualSCSIController
+                        controller_key = dev.controllerKey
+
+                self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
+                                                                    unit_number, controller_key))
+                # add disk here
+                dev_changes = []
+                disk_spec = vim.vm.device.VirtualDeviceSpec()
+                disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+                disk_spec.device = vim.vm.device.VirtualDisk()
+                disk_spec.device.backing = \
+                    vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+                disk_spec.device.backing.thinProvisioned = True
+                disk_spec.device.backing.diskMode = 'persistent'
+                disk_spec.device.backing.datastore  = datastore
+                disk_spec.device.backing.fileName  = fullpath
+
+                disk_spec.device.unitNumber = unit_number
+                disk_spec.device.capacityInKB = capacityKB
+                disk_spec.device.controllerKey = controller_key
+                dev_changes.append(disk_spec)
+                spec.deviceChange = dev_changes
+                task = vm.ReconfigVM_Task(spec=spec)
+                status = self.wait_for_vcenter_task(task, vcenter_conect)
+                return status
+            except Exception as exp:
+                exp_msg = "add_disk() : exception {} occurred while adding disk "\
+                          "{} to vm {}".format(exp,
+                                               fullpath,
+                                               vm.config.name)
+                self.rollback_newvm(vapp_uuid, exp_msg)
+        else:
+            msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def get_vcenter_content(self):
+        """
+         Get the vsphere content object
+        """
+        try:
+            vm_vcenter_info = self.get_vm_vcenter_info()
+        except Exception as exp:
+            self.logger.error("Error occurred while getting vCenter infromationn"\
+                             " for VM : {}".format(exp))
+            raise vimconn.vimconnException(message=exp)
+
+        context = None
+        if hasattr(ssl, '_create_unverified_context'):
+            context = ssl._create_unverified_context()
+
+        vcenter_conect = SmartConnect(
+                    host=vm_vcenter_info["vm_vcenter_ip"],
+                    user=vm_vcenter_info["vm_vcenter_user"],
+                    pwd=vm_vcenter_info["vm_vcenter_password"],
+                    port=int(vm_vcenter_info["vm_vcenter_port"]),
+                    sslContext=context
+                )
+        atexit.register(Disconnect, vcenter_conect)
+        content = vcenter_conect.RetrieveContent()
+        return vcenter_conect, content
+
+
+    def get_vm_moref_id(self, vapp_uuid):
+        """
+        Get the moref_id of given VM
+        """
+        try:
+            if vapp_uuid:
+                vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
+                if vm_details and "vm_vcenter_info" in vm_details:
+                    vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
+            return vm_moref_id
+
+        except Exception as exp:
+            self.logger.error("Error occurred while getting VM moref ID "\
+                             " for VM : {}".format(exp))
+            return None
+
+
+    def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
+        """
+            Method to get vApp template details
+                Args :
+                    catalogs - list of VDC catalogs
+                    image_id - Catalog ID to find
+                    template_name : template name in catalog
+                Returns:
+                    parsed_respond : dict of vApp tempalte details
+        """
+        parsed_response = {}
+
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCD")
+
+        try:
+            org, vdc = self.get_vdc_details()
+            catalog = self.get_catalog_obj(image_id, catalogs)
+            if catalog:
+                items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
+                catalog_items = [items.attrib]
+
+                if len(catalog_items) == 1:
+                    headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+
+                    response = self.perform_request(req_type='GET',
+                                                    url=catalog_items[0].get('href'),
+                                                    headers=headers)
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    vapp_tempalte_href = entity.get("href")
+                    #get vapp details and parse moref id
+
+                    namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                                  'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                                  'vmw': 'http://www.vmware.com/schema/ovf',
+                                  'vm': 'http://www.vmware.com/vcloud/v1.5',
+                                  'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                                  'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
+                                  'xmlns':"http://www.vmware.com/vcloud/v1.5"
+                                }
+
+                    if vca._session:
+                        response = self.perform_request(req_type='GET',
+                                                    url=vapp_tempalte_href,
+                                                    headers=headers)
+
+                        if response.status_code != requests.codes.ok:
+                            self.logger.debug("REST API call {} failed. Return status code {}".format(
+                                                vapp_tempalte_href, response.status_code))
+
+                        else:
+                            xmlroot_respond = XmlElementTree.fromstring(response.content)
+                            children_section = xmlroot_respond.find('vm:Children/', namespaces)
+                            if children_section is not None:
+                                vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                            if vCloud_extension_section is not None:
+                                vm_vcenter_info = {}
+                                vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+                                vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                                if vmext is not None:
+                                    vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                                parsed_response["vm_vcenter_info"]= vm_vcenter_info
+
+        except Exception as exp :
+            self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+
+        return parsed_response
+
+
+    def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
+        """
+            Method to delete vApp
+                Args :
+                    vapp_uuid - vApp UUID
+                    msg - Error message to be logged
+                    exp_type : Exception type
+                Returns:
+                    None
+        """
+        if vapp_uuid:
+            status = self.delete_vminstance(vapp_uuid)
+        else:
+            msg = "No vApp ID"
+        self.logger.error(msg)
+        if exp_type == "Genric":
+            raise vimconn.vimconnException(msg)
+        elif exp_type == "NotFound":
+            raise vimconn.vimconnNotFoundException(message=msg)
+
+    def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
+        """
+            Method to attach SRIOV adapters to VM
+
+             Args:
+                vapp_uuid - uuid of vApp/VM
+                sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
+                vmname_andid - vmname
+
+            Returns:
+                The status of add SRIOV adapter task , vm object and
+                vcenter_conect object
+        """
+        vm_obj = None
+        vcenter_conect, content = self.get_vcenter_content()
+        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+
+        if vm_moref_id:
+            try:
+                no_of_sriov_devices = len(sriov_nets)
+                if no_of_sriov_devices > 0:
+                    #Get VM and its host
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                    self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+                    if host_obj and vm_obj:
+                        #get SRIOV devies from host on which vapp is currently installed
+                        avilable_sriov_devices = self.get_sriov_devices(host_obj,
+                                                                no_of_sriov_devices,
+                                                                )
+
+                        if len(avilable_sriov_devices) == 0:
+                            #find other hosts with active pci devices
+                            new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
+                                                                content,
+                                                                no_of_sriov_devices,
+                                                                )
+
+                            if new_host_obj is not None and len(avilable_sriov_devices)> 0:
+                                #Migrate vm to the host where SRIOV devices are available
+                                self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
+                                                                                    new_host_obj))
+                                task = self.relocate_vm(new_host_obj, vm_obj)
+                                if task is not None:
+                                    result = self.wait_for_vcenter_task(task, vcenter_conect)
+                                    self.logger.info("Migrate VM status: {}".format(result))
+                                    host_obj = new_host_obj
+                                else:
+                                    self.logger.info("Fail to migrate VM : {}".format(result))
+                                    raise vimconn.vimconnNotFoundException(
+                                    "Fail to migrate VM : {} to host {}".format(
+                                                    vmname_andid,
+                                                    new_host_obj)
+                                        )
+
+                        if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
+                            #Add SRIOV devices one by one
+                            for sriov_net in sriov_nets:
+                                network_name = sriov_net.get('net_id')
+                                dvs_portgr_name = self.create_dvPort_group(network_name)
+                                if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
+                                    #add vlan ID ,Modify portgroup for vlan ID
+                                    self.configure_vlanID(content, vcenter_conect, network_name)
+
+                                task = self.add_sriov_to_vm(content,
+                                                            vm_obj,
+                                                            host_obj,
+                                                            network_name,
+                                                            avilable_sriov_devices[0]
+                                                            )
+                                if task:
+                                    status= self.wait_for_vcenter_task(task, vcenter_conect)
+                                    if status:
+                                        self.logger.info("Added SRIOV {} to VM {}".format(
+                                                                        no_of_sriov_devices,
+                                                                        str(vm_obj)))
+                                else:
+                                    self.logger.error("Fail to add SRIOV {} to VM {}".format(
+                                                                        no_of_sriov_devices,
+                                                                        str(vm_obj)))
+                                    raise vimconn.vimconnUnexpectedResponse(
+                                    "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
+                                        )
+                            return True, vm_obj, vcenter_conect
+                        else:
+                            self.logger.error("Currently there is no host with"\
+                                              " {} number of avaialble SRIOV "\
+                                              "VFs required for VM {}".format(
+                                                                no_of_sriov_devices,
+                                                                vmname_andid)
+                                              )
+                            raise vimconn.vimconnNotFoundException(
+                                    "Currently there is no host with {} "\
+                                    "number of avaialble SRIOV devices required for VM {}".format(
+                                                                            no_of_sriov_devices,
+                                                                            vmname_andid))
+                else:
+                    self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
+
+            except vmodl.MethodFault as error:
+                self.logger.error("Error occurred while adding SRIOV {} ",error)
+        return None, vm_obj, vcenter_conect
+
+
+    def get_sriov_devices(self,host, no_of_vfs):
+        """
+            Method to get the details of SRIOV devices on given host
+             Args:
+                host - vSphere host object
+                no_of_vfs - number of VFs needed on host
+
+             Returns:
+                array of SRIOV devices
+        """
+        sriovInfo=[]
+        if host:
+            for device in host.config.pciPassthruInfo:
+                if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
+                    if device.numVirtualFunction >= no_of_vfs:
+                        sriovInfo.append(device)
+                        break
+        return sriovInfo
+
+
+    def get_host_and_sriov_devices(self, content, no_of_vfs):
+        """
+         Method to get the details of SRIOV devices infromation on all hosts
+
+            Args:
+                content - vSphere host object
+                no_of_vfs - number of pci VFs needed on host
+
+            Returns:
+                 array of SRIOV devices and host object
+        """
+        host_obj = None
+        sriov_device_objs = None
+        try:
+            if content:
+                container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                            [vim.HostSystem], True)
+                for host in container.view:
+                    devices = self.get_sriov_devices(host, no_of_vfs)
+                    if devices:
+                        host_obj = host
+                        sriov_device_objs = devices
+                        break
+        except Exception as exp:
+            self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
+
+        return host_obj,sriov_device_objs
+
+
+    def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
+        """
+         Method to add SRIOV adapter to vm
+
+            Args:
+                host_obj - vSphere host object
+                vm_obj - vSphere vm object
+                content - vCenter content object
+                network_name - name of distributed virtaul portgroup
+                sriov_device - SRIOV device info
+
+            Returns:
+                 task object
+        """
+        devices = []
+        vnic_label = "sriov nic"
+        try:
+            dvs_portgr = self.get_dvport_group(network_name)
+            network_name = dvs_portgr.name
+            nic = vim.vm.device.VirtualDeviceSpec()
+            # VM device
+            nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+            nic.device = vim.vm.device.VirtualSriovEthernetCard()
+            nic.device.addressType = 'assigned'
+            #nic.device.key = 13016
+            nic.device.deviceInfo = vim.Description()
+            nic.device.deviceInfo.label = vnic_label
+            nic.device.deviceInfo.summary = network_name
+            nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
+
+            nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
+            nic.device.backing.deviceName = network_name
+            nic.device.backing.useAutoDetect = False
+            nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+            nic.device.connectable.startConnected = True
+            nic.device.connectable.allowGuestControl = True
+
+            nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
+            nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+            nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
+
+            devices.append(nic)
+            vmconf = vim.vm.ConfigSpec(deviceChange=devices)
+            task = vm_obj.ReconfigVM_Task(vmconf)
+            return task
+        except Exception as exp:
+            self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
+            return None
+
+
+    def create_dvPort_group(self, network_name):
+        """
+         Method to create disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                portgroup key
+        """
+        try:
+            new_network_name = [network_name, '-', str(uuid.uuid4())]
+            network_name=''.join(new_network_name)
+            vcenter_conect, content = self.get_vcenter_content()
+
+            dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
+            if dv_switch:
+                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                dv_pg_spec.name = network_name
+
+                dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
+                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
+                dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
+                dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
+                dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
+
+                task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
+                self.wait_for_vcenter_task(task, vcenter_conect)
+
+                dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
+                if dvPort_group:
+                    self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
+                    return dvPort_group.key
+            else:
+                self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
+
+        except Exception as exp:
+            self.logger.error("Error occurred while creating disributed virtaul port group {}"\
+                             " : {}".format(network_name, exp))
+        return None
+
+    def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
+        """
+         Method to reconfigure disributed virtual portgroup
+
+            Args:
+                dvPort_group_name - name of disributed virtual portgroup
+                content - vCenter content object
+                config_info - disributed virtual portgroup configuration
+
+            Returns:
+                task object
+        """
+        try:
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                dv_pg_spec.configVersion = dvPort_group.config.configVersion
+                dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                if "vlanID" in config_info:
+                    dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
+                    dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
+
+                task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
+                return task
+            else:
+                return None
+        except Exception as exp:
+            self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
+                             " : {}".format(dvPort_group_name, exp))
+            return None
+
+
+    def destroy_dvport_group(self , dvPort_group_name):
+        """
+         Method to destroy disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                True if portgroup successfully got deleted else false
+        """
+        vcenter_conect, content = self.get_vcenter_content()
+        try:
+            status = None
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                task = dvPort_group.Destroy_Task()
+                status = self.wait_for_vcenter_task(task, vcenter_conect)
+            return status
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
+                                                                    exp, dvPort_group_name))
+            return None
+
+
+    def get_dvport_group(self, dvPort_group_name):
+        """
+        Method to get disributed virtual portgroup
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                portgroup object
+        """
+        vcenter_conect, content = self.get_vcenter_content()
+        dvPort_group = None
+        try:
+            container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
+            for item in container.view:
+                if item.key == dvPort_group_name:
+                    dvPort_group = item
+                    break
+            return dvPort_group
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                            exp, dvPort_group_name))
+            return None
+
+    def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
+        """
+         Method to get disributed virtual portgroup vlanID
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                vlan ID
+        """
+        vlanId = None
+        try:
+            dvPort_group = self.get_dvport_group(dvPort_group_name)
+            if dvPort_group:
+                vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
+        except vmodl.MethodFault as exp:
+            self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                            exp, dvPort_group_name))
+        return vlanId
+
+
+    def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
+        """
+         Method to configure vlanID in disributed virtual portgroup vlanID
+
+            Args:
+                network_name - name of network/portgroup
+
+            Returns:
+                None
+        """
+        vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
+        if vlanID == 0:
+            #configure vlanID
+            vlanID = self.genrate_vlanID(dvPort_group_name)
+            config = {"vlanID":vlanID}
+            task = self.reconfig_portgroup(content, dvPort_group_name,
+                                    config_info=config)
+            if task:
+                status= self.wait_for_vcenter_task(task, vcenter_conect)
+                if status:
+                    self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
+                                                        dvPort_group_name,vlanID))
+            else:
+                self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
+                                        dvPort_group_name, vlanID))
+
+
+    def genrate_vlanID(self, network_name):
+        """
+         Method to get unused vlanID
+            Args:
+                network_name - name of network/portgroup
+            Returns:
+                vlanID
+        """
+        vlan_id = None
+        used_ids = []
+        if self.config.get('vlanID_range') == None:
+            raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
+                        "at config value before creating sriov network with vlan tag")
+        if "used_vlanIDs" not in self.persistent_info:
+                self.persistent_info["used_vlanIDs"] = {}
+        else:
+            used_ids = list(self.persistent_info["used_vlanIDs"].values())
+
+        for vlanID_range in self.config.get('vlanID_range'):
+            start_vlanid , end_vlanid = vlanID_range.split("-")
+            if start_vlanid > end_vlanid:
+                raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
+                                                                        vlanID_range))
+
+            for id in xrange(int(start_vlanid), int(end_vlanid) + 1):
+            #For python3
+            #for id in range(int(start_vlanid), int(end_vlanid) + 1):
+                if id not in used_ids:
+                    vlan_id = id
+                    self.persistent_info["used_vlanIDs"][network_name] = vlan_id
+                    return vlan_id
+        if vlan_id is None:
+            raise vimconn.vimconnConflictException("All Vlan IDs are in use")
+
+
+    def get_obj(self, content, vimtype, name):
+        """
+         Get the vsphere object associated with a given text name
+        """
+        obj = None
+        container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
+        for item in container.view:
+            if item.name == name:
+                obj = item
+                break
+        return obj
+
+
+    def insert_media_to_vm(self, vapp, image_id):
+        """
+        Method to insert media CD-ROM (ISO image) from catalog to vm.
+        vapp - vapp object to get vm id
+        Image_id - image id for cdrom to be inerted to vm
+        """
+        # create connection object
+        vca = self.connect()
+        try:
+            # fetching catalog details
+            rest_url = "{}/api/catalog/{}".format(self.url, image_id)
+            if vca._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                response = self.perform_request(req_type='GET',
+                                                url=rest_url,
+                                                headers=headers)
+
+            if response.status_code != 200:
+                self.logger.error("REST call {} failed reason : {}"\
+                             "status code : {}".format(url_rest_call,
+                                                    response.content,
+                                               response.status_code))
+                raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
+                                                                    "catalog details")
+            # searching iso name and id
+            iso_name,media_id = self.get_media_details(vca, response.content)
+
+            if iso_name and media_id:
+                data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+                     <ns6:MediaInsertOrEjectParams
+                     xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" 
+                     xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" 
+                     xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" 
+                     xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" 
+                     xmlns:ns6="http://www.vmware.com/vcloud/v1.5" 
+                     xmlns:ns7="http://www.vmware.com/schema/ovf" 
+                     xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" 
+                     xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
+                     <ns6:Media
+                        type="application/vnd.vmware.vcloud.media+xml"
+                        name="{}"
+                        id="urn:vcloud:media:{}"
+                        href="https://{}/api/media/{}"/>
+                     </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
+                                                                self.url,media_id)
+
+                for vms in vapp.get_all_vms():
+                    vm_id = vms.get('id').split(':')[-1]
+
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
+                    rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
+
+                    response = self.perform_request(req_type='POST',
+                                                       url=rest_url,
+                                                          data=data,
+                                                    headers=headers)
+
+                    if response.status_code != 202:
+                        error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
+                                    "Status code {}".format(response.text, response.status_code)
+                        self.logger.error(error_msg)
+                        raise vimconn.vimconnException(error_msg)
+                    else:
+                        task = self.get_task_from_response(response.content)
+                        result = self.client.get_task_monitor().wait_for_success(task=task)
+                        if result.get('status') == 'success':
+                            self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
+                                                                    " image to vm {}".format(vm_id))
+
+        except Exception as exp:
+            self.logger.error("insert_media_to_vm() : exception occurred "\
+                                            "while inserting media CD-ROM")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def get_media_details(self, vca, content):
+        """
+        Method to get catalog item details
+        vca - connection object
+        content - Catalog details
+        Return - Media name, media id
+        """
+        cataloghref_list = []
+        try:
+            if content:
+                vm_list_xmlroot = XmlElementTree.fromstring(content)
+                for child in vm_list_xmlroot.iter():
+                    if 'CatalogItem' in child.tag:
+                        cataloghref_list.append(child.attrib.get('href'))
+                if cataloghref_list is not None:
+                    for href in cataloghref_list:
+                        if href:
+                            headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                            response = self.perform_request(req_type='GET',
+                                                                  url=href,
+                                                           headers=headers)
+                            if response.status_code != 200:
+                                self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {}".format(href,
+                                                           response.content,
+                                                      response.status_code))
+                                raise vimconn.vimconnException("get_media_details : Failed to get "\
+                                                                         "catalogitem details")
+                            list_xmlroot = XmlElementTree.fromstring(response.content)
+                            for child in list_xmlroot.iter():
+                                if 'Entity' in child.tag:
+                                    if 'media' in child.attrib.get('href'):
+                                        name = child.attrib.get('name')
+                                        media_id = child.attrib.get('href').split('/').pop()
+                                        return name,media_id
+                            else:
+                                self.logger.debug("Media name and id not found")
+                                return False,False
+        except Exception as exp:
+            self.logger.error("get_media_details : exception occurred "\
+                                               "getting media details")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def retry_rest(self, method, url, add_headers=None, data=None):
+        """ Method to get Token & retry respective REST request
+            Args:
+                api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
+                url - request url to be used
+                add_headers - Additional headers (optional)
+                data - Request payload data to be passed in request
+            Returns:
+                response - Response of request
+        """
+        response = None
+
+        #Get token
+        self.get_token()
+
+        if self.client._session:
+                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                           'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+        if add_headers:
+            headers.update(add_headers)
+
+        if method == 'GET':
+            response = self.perform_request(req_type='GET',
+                                            url=url,
+                                            headers=headers)
+        elif method == 'PUT':
+            response = self.perform_request(req_type='PUT',
+                                            url=url,
+                                            headers=headers,
+                                            data=data)
+        elif method == 'POST':
+            response = self.perform_request(req_type='POST',
+                                            url=url,
+                                            headers=headers,
+                                            data=data)
+        elif method == 'DELETE':
+            response = self.perform_request(req_type='DELETE',
+                                            url=url,
+                                            headers=headers)
+        return response
+
+
+    def get_token(self):
+        """ Generate a new token if expired
+
+            Returns:
+                The return client object that letter can be used to connect to vCloud director as admin for VDC
+        """
+        try:
+            self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
+                                                                                      self.user,
+                                                                                      self.org_name))
+            host = self.url
+            client = Client(host, verify_ssl_certs=False)
+            client.set_highest_supported_version()
+            client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
+            # connection object
+            self.client = client
+
+        except:
+            raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
+                                                     "{} as user: {}".format(self.org_name, self.user))
+
+        if not client:
+            raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
+
+
+    def get_vdc_details(self):
+        """ Get VDC details using pyVcloud Lib
+
+            Returns org and vdc object
+        """
+        vdc = None
+        try:
+            org = Org(self.client, resource=self.client.get_org())
+            vdc = org.get_vdc(self.tenant_name)
+        except Exception as e:
+            # pyvcloud not giving a specific exception, Refresh nevertheless
+            self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
+
+        #Retry once, if failed by refreshing token
+        if vdc is None:
+            self.get_token()
+            org = Org(self.client, resource=self.client.get_org())
+            vdc = org.get_vdc(self.tenant_name)
+
+        return org, vdc
+
+
+    def perform_request(self, req_type, url, headers=None, data=None):
+        """Perform the POST/PUT/GET/DELETE request."""
+
+        #Log REST request details
+        self.log_request(req_type, url=url, headers=headers, data=data)
+        # perform request and return its result
+        if req_type == 'GET':
+            response = requests.get(url=url,
+                                headers=headers,
+                                verify=False)
+        elif req_type == 'PUT':
+            response = requests.put(url=url,
+                                headers=headers,
+                                data=data,
+                                verify=False)
+        elif req_type == 'POST':
+            response = requests.post(url=url,
+                                 headers=headers,
+                                 data=data,
+                                 verify=False)
+        elif req_type == 'DELETE':
+            response = requests.delete(url=url,
+                                 headers=headers,
+                                 verify=False)
+        #Log the REST response
+        self.log_response(response)
+
+        return response
+
+
+    def log_request(self, req_type, url=None, headers=None, data=None):
+        """Logs REST request details"""
+
+        if req_type is not None:
+            self.logger.debug("Request type: {}".format(req_type))
+
+        if url is not None:
+            self.logger.debug("Request url: {}".format(url))
+
+        if headers is not None:
+            for header in headers:
+                self.logger.debug("Request header: {}: {}".format(header, headers[header]))
+
+        if data is not None:
+            self.logger.debug("Request data: {}".format(data))
+
+
+    def log_response(self, response):
+        """Logs REST response details"""
+
+        self.logger.debug("Response status code: {} ".format(response.status_code))
+
+
+    def get_task_from_response(self, content):
+        """
+        content - API response content(response.content)
+        return task object
+        """
+        xmlroot = XmlElementTree.fromstring(content)
+        if xmlroot.tag.split('}')[1] == "Task":
+            return xmlroot
+        else:
+            for ele in xmlroot:
+                if ele.tag.split("}")[1] == "Tasks":
+                    task = ele[0]
+                    break
+            return task
+
+
+    def power_on_vapp(self,vapp_id, vapp_name):
+        """
+        vapp_id - vApp uuid
+        vapp_name - vAapp name
+        return - Task object
+        """
+        headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+
+        poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
+                                                                          vapp_id)
+        response = self.perform_request(req_type='POST',
+                                       url=poweron_href,
+                                        headers=headers)
+
+        if response.status_code != 202:
+            self.logger.error("REST call {} failed reason : {}"\
+                         "status code : {} ".format(poweron_href,
+                                                response.content,
+                                           response.status_code))
+            raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
+                                                      "vApp {}".format(vapp_name))
+        else:
+            poweron_task = self.get_task_from_response(response.content)
+            return poweron_task
+
+
diff --git a/RO-VIM-vmware/requirements.txt b/RO-VIM-vmware/requirements.txt
new file mode 100644
index 0000000..af74bad
--- /dev/null
+++ b/RO-VIM-vmware/requirements.txt
@@ -0,0 +1,25 @@
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+PyYAML
+requests
+netaddr
+pyvcloud==19.1.1
+pyvmomi
+progressbar
+prettytable
+# TODO py3 genisoimage
+git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro
diff --git a/RO-VIM-vmware/setup.py b/RO-VIM-vmware/setup.py
new file mode 100644
index 0000000..193102e
--- /dev/null
+++ b/RO-VIM-vmware/setup.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+from setuptools import setup
+
+_name = "osm_rovim_vmware"
+
+README = """
+===========
+osm-rovim_vmware
+===========
+
+osm-ro pluging for vmware VIM
+"""
+
+setup(
+    name=_name,
+    description='OSM ro vim plugin for vmware',
+    long_description=README,
+    version_command=('git describe --match v* --tags --long --dirty', 'pep440-git-full'),
+    # version=VERSION,
+    # python_requires='>3.5.0',
+    author='ETSI OSM',
+    # TODO py3 author_email='',
+    maintainer='OSM_TECH@LIST.ETSI.ORG',  # TODO py3
+    # TODO py3 maintainer_email='',
+    url='https://osm.etsi.org/gitweb/?p=osm/RO.git;a=summary',
+    license='Apache 2.0',
+
+    packages=[_name],
+    include_package_data=True,
+    dependency_links=["git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro"],
+    install_requires=[
+        "pyvcloud==19.1.1", "progressbar", "prettytable", "pyvmomi",
+        "requests", "netaddr", "PyYAML",
+        "osm-ro",
+    ],
+    setup_requires=['setuptools-version-command'],
+    entry_points={
+        'osm_rovim.plugins': ['rovim_vmware = osm_rovim_vmware.vimconn_vmware'],
+    },
+)
diff --git a/RO-VIM-vmware/stdeb.cfg b/RO-VIM-vmware/stdeb.cfg
new file mode 100644
index 0000000..ff50a2f
--- /dev/null
+++ b/RO-VIM-vmware/stdeb.cfg
@@ -0,0 +1,20 @@
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[DEFAULT]
+X-Python3-Version : >= 3.5
+Depends3: python3-requests, python3-netaddr, python3-yaml, python3-osm-ro, python3-pip,
+          genisoimage, python3-progressbar, python3-prettytable, python3-pyvmomi
diff --git a/RO-VIM-vmware/tox.ini b/RO-VIM-vmware/tox.ini
new file mode 100644
index 0000000..448b263
--- /dev/null
+++ b/RO-VIM-vmware/tox.ini
@@ -0,0 +1,42 @@
+##
+# Copyright VMware Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##
+
+[tox]
+envlist = py3
+toxworkdir={homedir}/.tox
+
+[testenv]
+basepython = python3
+install_command = python3 -m pip install -r requirements.txt -U {opts} {packages}
+# deps = -r{toxinidir}/test-requirements.txt
+commands=python3 -m unittest discover -v
+
+[testenv:flake8]
+basepython = python3
+deps = flake8
+commands = flake8 osm_rovim_vmware --max-line-length 120 \
+    --exclude .svn,CVS,.gz,.git,__pycache__,.tox,local,temp --ignore W291,W293,E226,W504
+
+[testenv:unittest]
+basepython = python3
+commands = python3 -m unittest osm_rovim_vmware.tests
+
+[testenv:build]
+basepython = python3
+deps = stdeb
+       setuptools-version-command
+commands = python3 setup.py --command-packages=stdeb.command bdist_deb
+