Changes in vimconn_vmware.py : 1.Feature #1137: Multi-disk support 2.Removed redundan...
[osm/RO.git] / vimconn_vmware.py
index 5d77aa4..f36de3e 100644 (file)
@@ -63,6 +63,7 @@ import hashlib
 import socket
 import struct
 import netaddr
+import random
 
 # global variable for vcd connector type
 STANDALONE = 'standalone'
@@ -71,13 +72,9 @@ STANDALONE = 'standalone'
 FLAVOR_RAM_KEY = 'ram'
 FLAVOR_VCPUS_KEY = 'vcpus'
 FLAVOR_DISK_KEY = 'disk'
-DEFAULT_IP_PROFILE = {'gateway_address':"192.168.1.1",
-                      'dhcp_count':50,
-                      'subnet_address':"192.168.1.0/24",
+DEFAULT_IP_PROFILE = {'dhcp_count':50,
                       'dhcp_enabled':True,
-                      'dhcp_start_address':"192.168.1.3",
-                      'ip_version':"IPv4",
-                      'dns_address':"192.168.1.2"
+                      'ip_version':"IPv4"
                       }
 # global variable for wait time
 INTERVAL_TIME = 5
@@ -181,10 +178,6 @@ class vimconnector(vimconn.vimconnector):
         self.nsx_manager = None
         self.nsx_user = None
         self.nsx_password = None
-        self.vcenter_ip = None
-        self.vcenter_port = None
-        self.vcenter_user = None
-        self.vcenter_password = None
 
         if tenant_name is not None:
             orgnameandtenant = tenant_name.split(":")
@@ -1019,6 +1012,25 @@ class vimconnector(vimconn.vimconnector):
                 return catalog.name
         return None
 
+    def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
+        """  Method check catalog and return catalog name lookup done by catalog UUID.
+
+        Args
+            catalog_name: catalog name as string
+            catalogs:  list of catalogs.
+
+        Return: catalogs name or None
+        """
+
+        if not self.validate_uuid4(uuid_string=catalog_uuid):
+            return None
+
+        for catalog in catalogs:
+            catalog_id = catalog.get_id().split(":")[3]
+            if catalog_id == catalog_uuid:
+                return catalog
+        return None
+
     def get_image_id_from_path(self, path=None, progress=False):
         """  Method upload OVF image to vCloud director.
 
@@ -1244,8 +1256,8 @@ class vimconnector(vimconn.vimconnector):
         """
 
         self.logger.info("Creating new instance for entry {}".format(name))
-        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {}".format(
-                                    description, start, image_id, flavor_id, net_list, cloud_config))
+        self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {}".format(
+                                    description, start, image_id, flavor_id, net_list, cloud_config, disk_list))
         vca = self.connect()
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed.")
@@ -1390,13 +1402,41 @@ class vimconnector(vimconn.vimconnector):
                                                             pci_devices_info,
                                                             vmname_andid)
                                  )
-        # add vm disk
+        # Modify vm disk
         if vm_disk:
             #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
             result = self.modify_vm_disk(vapp_uuid, vm_disk)
             if result :
                 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
 
+        #Add new or existing disks to vApp
+        if disk_list:
+            added_existing_disk = False
+            for disk in disk_list:
+                if "image_id" in disk and disk["image_id"] is not None:
+                    self.logger.debug("Adding existing disk from image {} to vm {} ".format(
+                                                                    disk["image_id"] , vapp_uuid))
+                    self.add_existing_disk(catalogs=catalogs,
+                                           image_id=disk["image_id"],
+                                           size = disk["size"],
+                                           template_name=templateName,
+                                           vapp_uuid=vapp_uuid
+                                           )
+                    added_existing_disk = True
+                else:
+                    #Wait till added existing disk gets reflected into vCD database/API
+                    if added_existing_disk:
+                        time.sleep(5)
+                        added_existing_disk = False
+                    self.add_new_disk(vca, vapp_uuid, disk['size'])
+
+        if numas:
+            # Assigning numa affinity setting
+            for numa in numas:
+                if 'paired-threads-id' in numa:
+                    paired_threads_id = numa['paired-threads-id']
+                    self.set_numa_affinity(vapp_uuid, paired_threads_id)
+
         # add NICs & connect to networks in netlist
         try:
             self.logger.info("Request to connect VM to a network: {}".format(net_list))
@@ -1431,15 +1471,32 @@ class vimconnector(vimconn.vimconnector):
                         if type(task) is GenericTask:
                             vca.block_until_completed(task)
                         # connect network to VM - with all DHCP by default
-                        self.logger.info("new_vminstance(): Connecting VM to a network {}".format(nets[0].name))
-                        task = vapp.connect_vms(nets[0].name,
-                                                connection_index=nicIndex,
-                                                connections_primary_index=primary_nic_index,
-                                                ip_allocation_mode='DHCP')
-                        if type(task) is GenericTask:
-                            vca.block_until_completed(task)
+
+                        type_list = ['PF','VF','VFnotShared']
+                        if 'type' in net and net['type'] not in type_list:
+                            # fetching nic type from vnf
+                            if 'model' in net:
+                                nic_type = net['model']
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                          "to a network {}".format(nets[0].name))
+                                self.add_network_adapter_to_vms(vapp, nets[0].name,
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net,
+                                                                nic_type=nic_type)
+                            else:
+                                self.logger.info("new_vminstance(): adding network adapter "\
+                                                         "to a network {}".format(nets[0].name))
+                                self.add_network_adapter_to_vms(vapp, nets[0].name,
+                                                                primary_nic_index,
+                                                                nicIndex,
+                                                                net)
                 nicIndex += 1
 
+            # cloud-init for ssh-key injection
+            if cloud_config:
+                self.cloud_init(vapp,cloud_config)
+
             # deploy and power on vm
             self.logger.debug("new_vminstance(): Deploying vApp {} ".format(name))
             deploytask = vapp.deploy(powerOn=False)
@@ -1704,39 +1761,6 @@ class vimconnector(vimconn.vimconnector):
 
         self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
 
-        mac_ip_addr={}
-        rheaders = {'Content-Type': 'application/xml'}
-        iso_edges = ['edge-2','edge-3','edge-6','edge-7','edge-8','edge-9','edge-10']
-
-        try:
-            for edge in iso_edges:
-                nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
-                self.logger.debug("refresh_vms_status: NSX Manager url: {}".format(nsx_api_url))
-
-                resp = requests.get(self.nsx_manager + nsx_api_url,
-                                    auth = (self.nsx_user, self.nsx_password),
-                                    verify = False, headers = rheaders)
-
-                if resp.status_code == requests.codes.ok:
-                    dhcp_leases = XmlElementTree.fromstring(resp.text)
-                    for child in dhcp_leases:
-                        if child.tag == 'dhcpLeaseInfo':
-                            dhcpLeaseInfo = child
-                            for leaseInfo in dhcpLeaseInfo:
-                                for elem in leaseInfo:
-                                    if (elem.tag)=='macAddress':
-                                        mac_addr = elem.text
-                                    if (elem.tag)=='ipAddress':
-                                        ip_addr = elem.text
-                                if (mac_addr) is not None:
-                                    mac_ip_addr[mac_addr]= ip_addr
-                    self.logger.debug("NSX Manager DHCP Lease info: mac_ip_addr : {}".format(mac_ip_addr))
-                else:
-                    self.logger.debug("Error occurred while getting DHCP lease info from NSX Manager: {}".format(resp.content))
-        except KeyError:
-            self.logger.debug("Error in response from NSX Manager {}".format(KeyError.message))
-            self.logger.debug(traceback.format_exc())
-
         vca = self.connect()
         if not vca:
             raise vimconn.vimconnConnectionException("self.connect() is failed.")
@@ -1746,6 +1770,7 @@ class vimconnector(vimconn.vimconnector):
             raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
 
         vms_dict = {}
+        nsx_edge_list = []
         for vmuuid in vm_list:
             vmname = self.get_namebyvappid(vca, vdc, vmuuid)
             if vmname is not None:
@@ -1767,12 +1792,19 @@ class vimconnector(vimconn.vimconnector):
                         for vm_network in vapp_network:
                             if vm_network['name'] == vmname:
                                 #Assign IP Address based on MAC Address in NSX DHCP lease info
-                                for mac_adres,ip_adres in mac_ip_addr.iteritems():
-                                    if mac_adres == vm_network['mac']:
-                                        vm_network['ip']=ip_adres
+                                if vm_network['ip'] is None:
+                                    if not nsx_edge_list:
+                                        nsx_edge_list = self.get_edge_details()
+                                        if nsx_edge_list is None:
+                                            raise vimconn.vimconnException("refresh_vms_status:"\
+                                                                           "Failed to get edge details from NSX Manager")
+                                    if vm_network['mac'] is not None:
+                                        vm_network['ip'] = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_network['mac'])
+
+                                vm_net_id = self.get_network_id_by_name(vm_network['network_name'])
                                 interface = {"mac_address": vm_network['mac'],
-                                             "vim_net_id": self.get_network_id_by_name(vm_network['network_name']),
-                                             "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']),
+                                             "vim_net_id": vm_net_id,
+                                             "vim_interface_id": vm_net_id,
                                              'ip_address': vm_network['ip']}
                                 # interface['vim_info'] = yaml.safe_dump(vm_network)
                                 vm_dict["interfaces"].append(interface)
@@ -1784,6 +1816,110 @@ class vimconnector(vimconn.vimconnector):
 
         return vms_dict
 
+
+    def get_edge_details(self):
+        """Get the NSX edge list from NSX Manager
+           Returns list of NSX edges
+        """
+        edge_list = []
+        rheaders = {'Content-Type': 'application/xml'}
+        nsx_api_url = '/api/4.0/edges'
+
+        self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
+
+        try:
+            resp = requests.get(self.nsx_manager + nsx_api_url,
+                                auth = (self.nsx_user, self.nsx_password),
+                                verify = False, headers = rheaders)
+            if resp.status_code == requests.codes.ok:
+                paged_Edge_List = XmlElementTree.fromstring(resp.text)
+                for edge_pages in paged_Edge_List:
+                    if edge_pages.tag == 'edgePage':
+                        for edge_summary in edge_pages:
+                            if edge_summary.tag == 'pagingInfo':
+                                for element in edge_summary:
+                                    if element.tag == 'totalCount' and element.text == '0':
+                                        raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
+                                                                       .format(self.nsx_manager))
+
+                            if edge_summary.tag == 'edgeSummary':
+                                for element in edge_summary:
+                                    if element.tag == 'id':
+                                        edge_list.append(element.text)
+                    else:
+                        raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
+                                                       .format(self.nsx_manager))
+
+                if not edge_list:
+                    raise vimconn.vimconnException("get_edge_details: "\
+                                                   "No NSX edge details found: {}"
+                                                   .format(self.nsx_manager))
+                else:
+                    self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
+                    return edge_list
+            else:
+                self.logger.debug("get_edge_details: "
+                                  "Failed to get NSX edge details from NSX Manager: {}"
+                                  .format(resp.content))
+                return None
+
+        except Exception as exp:
+            self.logger.debug("get_edge_details: "\
+                              "Failed to get NSX edge details from NSX Manager: {}"
+                              .format(exp))
+            raise vimconn.vimconnException("get_edge_details: "\
+                                           "Failed to get NSX edge details from NSX Manager: {}"
+                                           .format(exp))
+
+
+    def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
+        """Get IP address details from NSX edges, using the MAC address
+           PARAMS: nsx_edges : List of NSX edges
+                   mac_address : Find IP address corresponding to this MAC address
+           Returns: IP address corrresponding to the provided MAC address
+        """
+
+        ip_addr = None
+        rheaders = {'Content-Type': 'application/xml'}
+
+        self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
+
+        try:
+            for edge in nsx_edges:
+                nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
+
+                resp = requests.get(self.nsx_manager + nsx_api_url,
+                                    auth = (self.nsx_user, self.nsx_password),
+                                    verify = False, headers = rheaders)
+
+                if resp.status_code == requests.codes.ok:
+                    dhcp_leases = XmlElementTree.fromstring(resp.text)
+                    for child in dhcp_leases:
+                        if child.tag == 'dhcpLeaseInfo':
+                            dhcpLeaseInfo = child
+                            for leaseInfo in dhcpLeaseInfo:
+                                for elem in leaseInfo:
+                                    if (elem.tag)=='macAddress':
+                                        edge_mac_addr = elem.text
+                                    if (elem.tag)=='ipAddress':
+                                        ip_addr = elem.text
+                                if edge_mac_addr is not None:
+                                    if edge_mac_addr == mac_address:
+                                        self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
+                                                          .format(ip_addr, mac_address,edge))
+                                        return ip_addr
+                else:
+                    self.logger.debug("get_ipaddr_from_NSXedge: "\
+                                      "Error occurred while getting DHCP lease info from NSX Manager: {}"
+                                      .format(resp.content))
+
+            self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
+            return None
+
+        except XmlElementTree.ParseError as Err:
+            self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
+
+
     def action_vminstance(self, vm__vim_uuid=None, action_dict=None):
         """Send and action over a VM instance from VIM
         Returns the vm_id if the action was successfully sent to the VIM"""
@@ -1813,56 +1949,54 @@ class vimconnector(vimconn.vimconnector):
             if "start" in action_dict:
                 vm_info = the_vapp.get_vms_details()
                 vm_status = vm_info[0]['status']
-                self.logger.info("Power on vApp: vm_status:{} {}".format(type(vm_status),vm_status))
+                self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
                 if vm_status == "Suspended" or vm_status == "Powered off":
                     power_on_task = the_vapp.poweron()
-                    if power_on_task is not None and type(power_on_task) is GenericTask:
-                        result = vca.block_until_completed(power_on_task)
-                        if result:
-                            self.logger.info("action_vminstance: Powered on vApp: {}".format(vapp_name))
-                        else:
-                            self.logger.info("action_vminstance: Failed to power on vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Wait for vApp {} to power on".format(vapp_name))
-            elif "rebuild" in action_dict:
-                self.logger.info("action_vminstance: Rebuilding vApp: {}".format(vapp_name))
-                power_on_task = the_vapp.deploy(powerOn=True)
-                if type(power_on_task) is GenericTask:
                     result = vca.block_until_completed(power_on_task)
-                    if result:
-                        self.logger.info("action_vminstance: Rebuilt vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Failed to rebuild vApp: {}".format(vapp_name))
-                else:
-                    self.logger.info("action_vminstance: Wait for vApp rebuild {} to power on".format(vapp_name))
+                    self.instance_actions_result("start", result, vapp_name)
+            elif "rebuild" in action_dict:
+                self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
+                rebuild_task = the_vapp.deploy(powerOn=True)
+                result = vca.block_until_completed(rebuild_task)
+                self.instance_actions_result("rebuild", result, vapp_name)
             elif "pause" in action_dict:
-                pass
-                ## server.pause()
+                self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
+                pause_task = the_vapp.undeploy(action='suspend')
+                result = vca.block_until_completed(pause_task)
+                self.instance_actions_result("pause", result, vapp_name)
             elif "resume" in action_dict:
-                pass
-                ## server.resume()
+                self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
+                power_task = the_vapp.poweron()
+                result = vca.block_until_completed(power_task)
+                self.instance_actions_result("resume", result, vapp_name)
             elif "shutoff" in action_dict or "shutdown" in action_dict:
+                action_name , value = action_dict.items()[0]
+                self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
                 power_off_task = the_vapp.undeploy(action='powerOff')
-                if type(power_off_task) is GenericTask:
-                    result = vca.block_until_completed(power_off_task)
-                    if result:
-                        self.logger.info("action_vminstance: Powered off vApp: {}".format(vapp_name))
-                    else:
-                        self.logger.info("action_vminstance: Failed to power off vApp: {}".format(vapp_name))
+                result = vca.block_until_completed(power_off_task)
+                if action_name == "shutdown":
+                    self.instance_actions_result("shutdown", result, vapp_name)
                 else:
-                    self.logger.info("action_vminstance: Wait for vApp {} to power off".format(vapp_name))
+                    self.instance_actions_result("shutoff", result, vapp_name)
             elif "forceOff" in action_dict:
-                the_vapp.reset()
-            elif "terminate" in action_dict:
-                the_vapp.delete()
-            # elif "createImage" in action_dict:
-            #     server.create_image()
+                result = the_vapp.undeploy(action='force')
+                self.instance_actions_result("forceOff", result, vapp_name)
+            elif "reboot" in action_dict:
+                self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
+                reboot_task = the_vapp.reboot()
             else:
-                pass
+                raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
+            return vm__vim_uuid
         except Exception as exp :
             self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
             raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
 
+    def instance_actions_result(self, action, result, vapp_name):
+        if result:
+            self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
+        else:
+            self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
+
     def get_vminstance_console(self, vm_id, console_type="vnc"):
         """
         Get a console for the virtual machine
@@ -2510,20 +2644,25 @@ class vimconnector(vimconn.vimconnector):
                 #Configure IP profile of the network
                 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
 
+                if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
+                    subnet_rand = random.randint(0, 255)
+                    ip_base = "192.168.{}.".format(subnet_rand)
+                    ip_profile['subnet_address'] = ip_base + "0/24"
+                else:
+                    ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
+
                 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
-                    ip_profile['gateway_address']=DEFAULT_IP_PROFILE['gateway_address']
+                    ip_profile['gateway_address']=ip_base + "1"
                 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
                     ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
-                if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
-                    ip_profile['subnet_address']=DEFAULT_IP_PROFILE['subnet_address']
                 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
                     ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
                 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
-                    ip_profile['dhcp_start_address']=DEFAULT_IP_PROFILE['dhcp_start_address']
+                    ip_profile['dhcp_start_address']=ip_base + "3"
                 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
                     ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
                 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
-                    ip_profile['dns_address']=DEFAULT_IP_PROFILE['dns_address']
+                    ip_profile['dns_address']=ip_base + "2"
 
                 gateway_address=ip_profile['gateway_address']
                 dhcp_count=int(ip_profile['dhcp_count'])
@@ -2552,64 +2691,36 @@ class vimconnector(vimconn.vimconnector):
                 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
                 add_vdc_rest_url = ''.join(url_list)
 
-            if net_type=='ptp':
-                fence_mode="isolated"
-                isshared='false'
-                is_inherited='false'
-                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
-                                <Description>Openmano created</Description>
-                                        <Configuration>
-                                            <IpScopes>
-                                                <IpScope>
-                                                    <IsInherited>{1:s}</IsInherited>
-                                                    <Gateway>{2:s}</Gateway>
-                                                    <Netmask>{3:s}</Netmask>
-                                                    <Dns1>{4:s}</Dns1>
-                                                    <IsEnabled>{5:s}</IsEnabled>
-                                                    <IpRanges>
-                                                        <IpRange>
-                                                            <StartAddress>{6:s}</StartAddress>
-                                                            <EndAddress>{7:s}</EndAddress>
-                                                        </IpRange>
-                                                    </IpRanges>
-                                                </IpScope>
-                                            </IpScopes>
-                                            <FenceMode>{8:s}</FenceMode>
-                                        </Configuration>
-                                        <IsShared>{9:s}</IsShared>
-                            </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
-                                                        subnet_address, dns_address, dhcp_enabled,
-                                                        dhcp_start_address, dhcp_end_address, fence_mode, isshared)
-
-            else:
-                fence_mode="bridged"
-                is_inherited='false'
-                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
-                                <Description>Openmano created</Description>
-                                        <Configuration>
-                                            <IpScopes>
-                                                <IpScope>
-                                                    <IsInherited>{1:s}</IsInherited>
-                                                    <Gateway>{2:s}</Gateway>
-                                                    <Netmask>{3:s}</Netmask>
-                                                    <Dns1>{4:s}</Dns1>
-                                                    <IsEnabled>{5:s}</IsEnabled>
-                                                    <IpRanges>
-                                                        <IpRange>
-                                                            <StartAddress>{6:s}</StartAddress>
-                                                            <EndAddress>{7:s}</EndAddress>
-                                                        </IpRange>
-                                                    </IpRanges>
-                                                </IpScope>
-                                            </IpScopes>
-                                            <ParentNetwork href="{8:s}"/>
-                                            <FenceMode>{9:s}</FenceMode>
-                                        </Configuration>
-                                        <IsShared>{10:s}</IsShared>
-                            </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
-                                                        subnet_address, dns_address, dhcp_enabled,
-                                                        dhcp_start_address, dhcp_end_address, available_networks,
-                                                        fence_mode, isshared)
+            #Creating all networks as Direct Org VDC type networks.
+            #Unused in case of Underlay (data/ptp) network interface.
+            fence_mode="bridged"
+            is_inherited='false'
+            data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+                            <Description>Openmano created</Description>
+                                    <Configuration>
+                                        <IpScopes>
+                                            <IpScope>
+                                                <IsInherited>{1:s}</IsInherited>
+                                                <Gateway>{2:s}</Gateway>
+                                                <Netmask>{3:s}</Netmask>
+                                                <Dns1>{4:s}</Dns1>
+                                                <IsEnabled>{5:s}</IsEnabled>
+                                                <IpRanges>
+                                                    <IpRange>
+                                                        <StartAddress>{6:s}</StartAddress>
+                                                        <EndAddress>{7:s}</EndAddress>
+                                                    </IpRange>
+                                                </IpRanges>
+                                            </IpScope>
+                                        </IpScopes>
+                                        <ParentNetwork href="{8:s}"/>
+                                        <FenceMode>{9:s}</FenceMode>
+                                    </Configuration>
+                                    <IsShared>{10:s}</IsShared>
+                        </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
+                                                    subnet_address, dns_address, dhcp_enabled,
+                                                    dhcp_start_address, dhcp_end_address, available_networks,
+                                                    fence_mode, isshared)
 
             headers = vca.vcloud_session.get_vcloud_headers()
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
@@ -3133,33 +3244,16 @@ class vimconnector(vimconn.vimconnector):
                 vcenter_conect object
         """
         vm_obj = None
-        vcenter_conect = None
         self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
-        try:
-            vm_vcenter_info = self.get_vm_vcenter_info(vapp_uuid)
-        except Exception as exp:
-            self.logger.error("Error occurred while getting vCenter infromationn"\
-                             " for VM : {}".format(exp))
-            raise vimconn.vimconnException(message=exp)
+        vcenter_conect, content = self.get_vcenter_content()
+        vm_moref_id = self.get_vm_moref_id(vapp_uuid)
 
-        if vm_vcenter_info["vm_moref_id"]:
-            context = None
-            if hasattr(ssl, '_create_unverified_context'):
-                context = ssl._create_unverified_context()
+        if vm_moref_id:
             try:
                 no_of_pci_devices = len(pci_devices)
                 if no_of_pci_devices > 0:
-                    vcenter_conect = SmartConnect(
-                                            host=vm_vcenter_info["vm_vcenter_ip"],
-                                            user=vm_vcenter_info["vm_vcenter_user"],
-                                            pwd=vm_vcenter_info["vm_vcenter_password"],
-                                            port=int(vm_vcenter_info["vm_vcenter_port"]),
-                                            sslContext=context)
-                    atexit.register(Disconnect, vcenter_conect)
-                    content = vcenter_conect.RetrieveContent()
-
                     #Get VM and its host
-                    host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
                     self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
                     if host_obj and vm_obj:
                         #get PCI devies from host on which vapp is currently installed
@@ -3428,7 +3522,7 @@ class vimconnector(vimconn.vimconnector):
                                                                              exp))
         return task
 
-    def get_vm_vcenter_info(self , vapp_uuid):
+    def get_vm_vcenter_info(self):
         """
         Method to get details of vCenter and vm
 
@@ -3461,16 +3555,8 @@ class vimconnector(vimconn.vimconnector):
         else:
             raise vimconn.vimconnException(message="vCenter user password is not provided."\
                                            " Please provide vCenter user password while attaching datacenter to tenant in --config")
-        try:
-            vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
-            if vm_details and "vm_vcenter_info" in vm_details:
-                vm_vcenter_info["vm_moref_id"] = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
 
-            return vm_vcenter_info
-
-        except Exception as exp:
-            self.logger.error("Error occurred while getting vCenter infromationn"\
-                             " for VM : {}".format(exp))
+        return vm_vcenter_info
 
 
     def get_vm_pci_details(self, vmuuid):
@@ -3486,23 +3572,12 @@ class vimconnector(vimconn.vimconnector):
         """
         vm_pci_devices_info = {}
         try:
-            vm_vcenter_info = self.get_vm_vcenter_info(vmuuid)
-            if vm_vcenter_info["vm_moref_id"]:
-                context = None
-                if hasattr(ssl, '_create_unverified_context'):
-                    context = ssl._create_unverified_context()
-                vcenter_conect = SmartConnect(host=vm_vcenter_info["vm_vcenter_ip"],
-                                        user=vm_vcenter_info["vm_vcenter_user"],
-                                        pwd=vm_vcenter_info["vm_vcenter_password"],
-                                        port=int(vm_vcenter_info["vm_vcenter_port"]),
-                                        sslContext=context
-                                    )
-                atexit.register(Disconnect, vcenter_conect)
-                content = vcenter_conect.RetrieveContent()
-
+            vcenter_conect, content = self.get_vcenter_content()
+            vm_moref_id = self.get_vm_moref_id(vmuuid)
+            if vm_moref_id:
                 #Get VM and its host
                 if content:
-                    host_obj, vm_obj = self.get_vm_obj(content ,vm_vcenter_info["vm_moref_id"])
+                    host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
                     if host_obj and vm_obj:
                         vm_pci_devices_info["host_name"]= host_obj.name
                         vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
@@ -3521,3 +3596,726 @@ class vimconnector(vimconn.vimconnector):
                              " for VM : {}".format(exp))
             raise vimconn.vimconnException(message=exp)
 
+    def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
+        """
+            Method to add network adapter type to vm
+            Args :
+                network_name - name of network
+                primary_nic_index - int value for primary nic index
+                nicIndex - int value for nic index
+                nic_type - specify model name to which add to vm
+            Returns:
+                None
+        """
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
+
+        try:
+            ip_address = None
+            floating_ip = False
+            if 'floating_ip' in net: floating_ip = net['floating_ip']
+
+            # Stub for ip_address feature
+            if 'ip_address' in net: ip_address = net['ip_address']
+
+            if floating_ip:
+                allocation_mode = "POOL"
+            elif ip_address:
+                allocation_mode = "MANUAL"
+            else:
+                allocation_mode = "DHCP"
+
+            if not nic_type:
+                for vms in vapp._get_vms():
+                    vm_id = (vms.id).split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
+
+                    response = Http.get(url=url_rest_call,
+                                        headers=vca.vcloud_session.get_vcloud_headers(),
+                                        verify=vca.verify,
+                                        logger=vca.logger)
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {}".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                         "network connection section")
+
+                    data = response.content
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                                         allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
+                    else:
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                          allocation_mode)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
+
+                    headers = vca.vcloud_session.get_vcloud_headers()
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+                    response = Http.put(url=url_rest_call, headers=headers, data=data,
+                                                                   verify=vca.verify,
+                                                                   logger=vca.logger)
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {} ".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                            "network connection section")
+                    else:
+                        nic_task = taskType.parseString(response.content, True)
+                        if isinstance(nic_task, GenericTask):
+                            vca.block_until_completed(nic_task)
+                            self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
+                                                               "default NIC type".format(vm_id))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
+                                                              "connect NIC type".format(vm_id))
+            else:
+                for vms in vapp._get_vms():
+                    vm_id = (vms.id).split(':')[-1]
+
+                    url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(vca.host, vm_id)
+
+                    response = Http.get(url=url_rest_call,
+                                        headers=vca.vcloud_session.get_vcloud_headers(),
+                                        verify=vca.verify,
+                                        logger=vca.logger)
+                    if response.status_code != 200:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                        "network connection section")
+                    data = response.content
+                    if '<PrimaryNetworkConnectionIndex>' not in data:
+                        item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                <NetworkConnection network="{}">
+                                <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                <IsConnected>true</IsConnected>
+                                <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                <NetworkAdapterType>{}</NetworkAdapterType>
+                                </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                               allocation_mode, nic_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n'.format(item))
+                    else:
+                        new_item = """<NetworkConnection network="{}">
+                                    <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                    <IsConnected>true</IsConnected>
+                                    <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                    <NetworkAdapterType>{}</NetworkAdapterType>
+                                    </NetworkConnection>""".format(network_name, nicIndex,
+                                                                allocation_mode, nic_type)
+                        # Stub for ip_address feature
+                        if ip_address:
+                            ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                            new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+
+                        data = data.replace('</NetworkConnection>\n','</NetworkConnection>\n{}\n'.format(new_item))
+
+                    headers = vca.vcloud_session.get_vcloud_headers()
+                    headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+                    response = Http.put(url=url_rest_call, headers=headers, data=data,
+                                                                   verify=vca.verify,
+                                                                   logger=vca.logger)
+
+                    if response.status_code != 202:
+                        self.logger.error("REST call {} failed reason : {}"\
+                                            "status code : {}".format(url_rest_call,
+                                                                   response.content,
+                                                              response.status_code))
+                        raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                           "network connection section")
+                    else:
+                        nic_task = taskType.parseString(response.content, True)
+                        if isinstance(nic_task, GenericTask):
+                            vca.block_until_completed(nic_task)
+                            self.logger.info("add_network_adapter_to_vms(): VM {} "\
+                                               "conneced to NIC type {}".format(vm_id, nic_type))
+                        else:
+                            self.logger.error("add_network_adapter_to_vms(): VM {} "\
+                                               "failed to connect NIC type {}".format(vm_id, nic_type))
+        except Exception as exp:
+            self.logger.error("add_network_adapter_to_vms() : exception occurred "\
+                                               "while adding Network adapter")
+            raise vimconn.vimconnException(message=exp)
+
+
+    def set_numa_affinity(self, vmuuid, paired_threads_id):
+        """
+            Method to assign numa affinity in vm configuration parammeters
+            Args :
+                vmuuid - vm uuid
+                paired_threads_id - one or more virtual processor
+                                    numbers
+            Returns:
+                return if True
+        """
+        try:
+            vm_moref_id , vm_vcenter_host , vm_vcenter_username, vm_vcenter_port = self.get_vcenter_info_rest(vmuuid)
+            if vm_moref_id and vm_vcenter_host and vm_vcenter_username:
+                context = None
+                if hasattr(ssl, '_create_unverified_context'):
+                    context = ssl._create_unverified_context()
+                    vcenter_conect = SmartConnect(host=vm_vcenter_host, user=vm_vcenter_username,
+                                  pwd=self.passwd, port=int(vm_vcenter_port),
+                                  sslContext=context)
+                    atexit.register(Disconnect, vcenter_conect)
+                    content = vcenter_conect.RetrieveContent()
+
+                    host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
+                    if vm_obj:
+                        config_spec = vim.vm.ConfigSpec()
+                        config_spec.extraConfig = []
+                        opt = vim.option.OptionValue()
+                        opt.key = 'numa.nodeAffinity'
+                        opt.value = str(paired_threads_id)
+                        config_spec.extraConfig.append(opt)
+                        task = vm_obj.ReconfigVM_Task(config_spec)
+                        if task:
+                            result = self.wait_for_vcenter_task(task, vcenter_conect)
+                            extra_config = vm_obj.config.extraConfig
+                            flag = False
+                            for opts in extra_config:
+                                if 'numa.nodeAffinity' in opts.key:
+                                    flag = True
+                                    self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
+                                                             "value {} for vm {}".format(opt.value, vm_obj))
+                            if flag:
+                                return
+                    else:
+                        self.logger.error("set_numa_affinity: Failed to assign numa affinity")
+        except Exception as exp:
+            self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
+                                                       "for VM {} : {}".format(vm_obj, vm_moref_id))
+            raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
+                                                                           "affinity".format(exp))
+
+
+    def cloud_init(self, vapp, cloud_config):
+        """
+        Method to inject ssh-key
+        vapp - vapp object
+        cloud_config a dictionary with:
+                'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                'users': (optional) list of users to be inserted, each item is a dict with:
+                    'name': (mandatory) user name,
+                    'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                'user-data': (optional) string is a text script to be passed directly to cloud-init
+                'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                    'dest': (mandatory) string with the destination absolute path
+                    'encoding': (optional, by default text). Can be one of:
+                        'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                    'content' (mandatory): string with the content of the file
+                    'permissions': (optional) string with file permissions, typically octal notation '0644'
+                    'owner': (optional) file owner, string with the format 'owner:group'
+                'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
+        """
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("Failed to connect vCloud director")
+
+        try:
+            if isinstance(cloud_config, dict):
+                key_pairs = []
+                userdata = []
+                if "key-pairs" in cloud_config:
+                    key_pairs = cloud_config["key-pairs"]
+
+                if "users" in cloud_config:
+                    userdata = cloud_config["users"]
+
+            for key in key_pairs:
+                for user in userdata:
+                    if 'name' in user: user_name = user['name']
+                    if 'key-pairs' in user and len(user['key-pairs']) > 0:
+                        for user_key in user['key-pairs']:
+                            customize_script = """
+                        #!/bin/bash
+                        echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+                        if [ "$1" = "precustomization" ];then
+                            echo performing precustomization tasks   on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+                            if [ ! -d /root/.ssh ];then
+                                mkdir /root/.ssh
+                                chown root:root /root/.ssh
+                                chmod 700 /root/.ssh
+                                touch /root/.ssh/authorized_keys
+                                chown root:root /root/.ssh/authorized_keys
+                                chmod 600 /root/.ssh/authorized_keys
+                                # make centos with selinux happy
+                                which restorecon && restorecon -Rv /root/.ssh
+                                echo '{key}' >> /root/.ssh/authorized_keys
+                            else
+                                touch /root/.ssh/authorized_keys
+                                chown root:root /root/.ssh/authorized_keys
+                                chmod 600 /root/.ssh/authorized_keys
+                                echo '{key}' >> /root/.ssh/authorized_keys
+                            fi
+                            if [ -d /home/{user_name} ];then
+                                if [ ! -d /home/{user_name}/.ssh ];then
+                                    mkdir /home/{user_name}/.ssh
+                                    chown {user_name}:{user_name} /home/{user_name}/.ssh
+                                    chmod 700 /home/{user_name}/.ssh
+                                    touch /home/{user_name}/.ssh/authorized_keys
+                                    chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+                                    chmod 600 /home/{user_name}/.ssh/authorized_keys
+                                    # make centos with selinux happy
+                                    which restorecon && restorecon -Rv /home/{user_name}/.ssh
+                                    echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+                                else
+                                    touch /home/{user_name}/.ssh/authorized_keys
+                                    chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+                                    chmod 600 /home/{user_name}/.ssh/authorized_keys
+                                    echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+                                fi
+                            fi
+                        fi""".format(key=key, user_name=user_name, user_key=user_key)
+
+                            for vm in vapp._get_vms():
+                                vm_name = vm.name
+                                task = vapp.customize_guest_os(vm_name, customization_script=customize_script)
+                                if isinstance(task, GenericTask):
+                                    vca.block_until_completed(task)
+                                    self.logger.info("cloud_init : customized guest os task "\
+                                                        "completed for VM {}".format(vm_name))
+                                else:
+                                    self.logger.error("cloud_init : task for customized guest os"\
+                                                               "failed for VM {}".format(vm_name))
+        except Exception as exp:
+            self.logger.error("cloud_init : exception occurred while injecting "\
+                                                                       "ssh-key")
+            raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
+                                                               "ssh-key".format(exp))
+
+
+    def add_new_disk(self, vca, vapp_uuid, disk_size):
+        """
+            Method to create an empty vm disk
+
+            Args:
+                vapp_uuid - is vapp identifier.
+                disk_size - size of disk to be created in GB
+
+            Returns:
+                None
+        """
+        status = False
+        vm_details = None
+        try:
+            #Disk size in GB, convert it into MB
+            if disk_size is not None:
+                disk_size_mb = int(disk_size) * 1024
+                vm_details = self.get_vapp_details_rest(vapp_uuid)
+
+            if vm_details and "vm_virtual_hardware" in vm_details:
+                self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+                disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+                status = self.add_new_disk_rest(vca, disk_href, disk_size_mb)
+
+        except Exception as exp:
+            msg = "Error occurred while creating new disk {}.".format(exp)
+            self.rollback_newvm(vapp_uuid, msg)
+
+        if status:
+            self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+        else:
+            #If failed to add disk, delete VM
+            msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def add_new_disk_rest(self, vca, disk_href, disk_size_mb):
+        """
+        Retrives vApp Disks section & add new empty disk
+
+        Args:
+            disk_href: Disk section href to addd disk
+            disk_size_mb: Disk size in MB
+
+            Returns: Status of add new disk task
+        """
+        status = False
+        if vca.vcloud_session and vca.vcloud_session.organization:
+            response = Http.get(url=disk_href,
+                                headers=vca.vcloud_session.get_vcloud_headers(),
+                                verify=vca.verify,
+                                logger=vca.logger)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
+                              .format(disk_href, response.status_code))
+            return status
+        try:
+            #Find but type & max of instance IDs assigned to disks
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+            instance_id = 0
+            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                if item.find("rasd:Description",namespaces).text == "Hard disk":
+                    inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
+                    if inst_id > instance_id:
+                        instance_id = inst_id
+                        disk_item = item.find("rasd:HostResource" ,namespaces)
+                        bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
+                        bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
+
+            instance_id = instance_id + 1
+            new_item =   """<Item>
+                                <rasd:Description>Hard disk</rasd:Description>
+                                <rasd:ElementName>New disk</rasd:ElementName>
+                                <rasd:HostResource
+                                    xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
+                                    vcloud:capacity="{}"
+                                    vcloud:busSubType="{}"
+                                    vcloud:busType="{}"></rasd:HostResource>
+                                <rasd:InstanceID>{}</rasd:InstanceID>
+                                <rasd:ResourceType>17</rasd:ResourceType>
+                            </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
+
+            new_data = response.content
+            #Add new item at the bottom
+            new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
+
+            # Send PUT request to modify virtual hardware section with new disk
+            headers = vca.vcloud_session.get_vcloud_headers()
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+            response = Http.put(url=disk_href,
+                                data=new_data,
+                                headers=headers,
+                                verify=vca.verify, logger=self.logger)
+
+            if response.status_code != 202:
+                self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
+                                  .format(disk_href, response.status_code, response.content))
+            else:
+                add_disk_task = taskType.parseString(response.content, True)
+                if type(add_disk_task) is GenericTask:
+                    status = vca.block_until_completed(add_disk_task)
+                    if not status:
+                        self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
+
+        except Exception as exp:
+            self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
+
+        return status
+
+
+    def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
+        """
+            Method to add existing disk to vm
+            Args :
+                catalogs - List of VDC catalogs
+                image_id - Catalog ID
+                template_name - Name of template in catalog
+                vapp_uuid - UUID of vApp
+            Returns:
+                None
+        """
+        disk_info = None
+        vcenter_conect, content = self.get_vcenter_content()
+        #find moref-id of vm in image
+        catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
+                                                         image_id=image_id,
+                                                        )
+
+        if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
+            if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
+                catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
+                if catalog_vm_moref_id:
+                    self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
+                    host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
+                    if catalog_vm_obj:
+                        #find existing disk
+                        disk_info = self.find_disk(catalog_vm_obj)
+                    else:
+                        exp_msg = "No VM with image id {} found".format(image_id)
+                        self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+        else:
+            exp_msg = "No Image found with image ID {} ".format(image_id)
+            self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+
+        if disk_info:
+            self.logger.info("Existing disk_info : {}".format(disk_info))
+            #get VM
+            vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+            host, vm_obj = self.get_vm_obj(content, vm_moref_id)
+            if vm_obj:
+                status = self.add_disk(vcenter_conect=vcenter_conect,
+                                       vm=vm_obj,
+                                       disk_info=disk_info,
+                                       size=size,
+                                       vapp_uuid=vapp_uuid
+                                       )
+            if status:
+                self.logger.info("Disk from image id {} added to {}".format(image_id,
+                                                                            vm_obj.config.name)
+                                 )
+        else:
+            msg = "No disk found with image id {} to add in VM {}".format(
+                                                            image_id,
+                                                            vm_obj.config.name)
+            self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
+
+
+    def find_disk(self, vm_obj):
+        """
+         Method to find details of existing disk in VM
+            Args :
+                vm_obj - vCenter object of VM
+                image_id - Catalog ID
+            Returns:
+                disk_info : dict of disk details
+        """
+        disk_info = {}
+        if vm_obj:
+            try:
+                devices = vm_obj.config.hardware.device
+                for device in devices:
+                    if type(device) is vim.vm.device.VirtualDisk:
+                        if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
+                            disk_info["full_path"] = device.backing.fileName
+                            disk_info["datastore"] = device.backing.datastore
+                            disk_info["capacityKB"] = device.capacityInKB
+                            break
+            except Exception as exp:
+                self.logger.error("find_disk() : exception occurred while "\
+                                  "getting existing disk details :{}".format(exp))
+        return disk_info
+
+
+    def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
+        """
+         Method to add existing disk in VM
+            Args :
+                vcenter_conect - vCenter content object
+                vm - vCenter vm object
+                disk_info : dict of disk details
+            Returns:
+                status : status of add disk task
+        """
+        datastore = disk_info["datastore"] if "datastore" in disk_info else None
+        fullpath = disk_info["full_path"] if "full_path" in disk_info else None
+        capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
+        if size is not None:
+            #Convert size from GB to KB
+            sizeKB = int(size) * 1024 * 1024
+            #compare size of existing disk and user given size.Assign whicherver is greater
+            self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
+                                                                    sizeKB, capacityKB))
+            if sizeKB > capacityKB:
+                capacityKB = sizeKB
+
+        if datastore and fullpath and capacityKB:
+            try:
+                spec = vim.vm.ConfigSpec()
+                # get all disks on a VM, set unit_number to the next available
+                unit_number = 0
+                for dev in vm.config.hardware.device:
+                    if hasattr(dev.backing, 'fileName'):
+                        unit_number = int(dev.unitNumber) + 1
+                        # unit_number 7 reserved for scsi controller
+                        if unit_number == 7:
+                            unit_number += 1
+                    if isinstance(dev, vim.vm.device.VirtualDisk):
+                        #vim.vm.device.VirtualSCSIController
+                        controller_key = dev.controllerKey
+
+                self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
+                                                                    unit_number, controller_key))
+                # add disk here
+                dev_changes = []
+                disk_spec = vim.vm.device.VirtualDeviceSpec()
+                disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+                disk_spec.device = vim.vm.device.VirtualDisk()
+                disk_spec.device.backing = \
+                    vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+                disk_spec.device.backing.thinProvisioned = True
+                disk_spec.device.backing.diskMode = 'persistent'
+                disk_spec.device.backing.datastore  = datastore
+                disk_spec.device.backing.fileName  = fullpath
+
+                disk_spec.device.unitNumber = unit_number
+                disk_spec.device.capacityInKB = capacityKB
+                disk_spec.device.controllerKey = controller_key
+                dev_changes.append(disk_spec)
+                spec.deviceChange = dev_changes
+                task = vm.ReconfigVM_Task(spec=spec)
+                status = self.wait_for_vcenter_task(task, vcenter_conect)
+                return status
+            except Exception as exp:
+                exp_msg = "add_disk() : exception {} occurred while adding disk "\
+                          "{} to vm {}".format(exp,
+                                               fullpath,
+                                               vm.config.name)
+                self.rollback_newvm(vapp_uuid, exp_msg)
+        else:
+            msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
+            self.rollback_newvm(vapp_uuid, msg)
+
+
+    def get_vcenter_content(self):
+        """
+         Get the vsphere content object
+        """
+        try:
+            vm_vcenter_info = self.get_vm_vcenter_info()
+        except Exception as exp:
+            self.logger.error("Error occurred while getting vCenter infromationn"\
+                             " for VM : {}".format(exp))
+            raise vimconn.vimconnException(message=exp)
+
+        context = None
+        if hasattr(ssl, '_create_unverified_context'):
+            context = ssl._create_unverified_context()
+
+        vcenter_conect = SmartConnect(
+                    host=vm_vcenter_info["vm_vcenter_ip"],
+                    user=vm_vcenter_info["vm_vcenter_user"],
+                    pwd=vm_vcenter_info["vm_vcenter_password"],
+                    port=int(vm_vcenter_info["vm_vcenter_port"]),
+                    sslContext=context
+                )
+        atexit.register(Disconnect, vcenter_conect)
+        content = vcenter_conect.RetrieveContent()
+        return vcenter_conect, content
+
+
+    def get_vm_moref_id(self, vapp_uuid):
+        """
+        Get the moref_id of given VM
+        """
+        try:
+            if vapp_uuid:
+                vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
+                if vm_details and "vm_vcenter_info" in vm_details:
+                    vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
+
+            return vm_moref_id
+
+        except Exception as exp:
+            self.logger.error("Error occurred while getting VM moref ID "\
+                             " for VM : {}".format(exp))
+            return None
+
+
+    def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
+        """
+            Method to get vApp template details
+                Args :
+                    catalogs - list of VDC catalogs
+                    image_id - Catalog ID to find
+                    template_name : template name in catalog
+                Returns:
+                    parsed_respond : dict of vApp tempalte details
+        """
+        parsed_response = {}
+
+        vca = self.connect_as_admin()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed")
+
+        try:
+            catalog = self.get_catalog_obj(image_id, catalogs)
+            if catalog:
+                template_name = self.get_catalogbyid(image_id, catalogs)
+                catalog_items = filter(lambda catalogItemRef: catalogItemRef.get_name() == template_name, catalog.get_CatalogItems().get_CatalogItem())
+                if len(catalog_items) == 1:
+                    response = Http.get(catalog_items[0].get_href(),
+                                        headers=vca.vcloud_session.get_vcloud_headers(),
+                                        verify=vca.verify,
+                                        logger=vca.logger)
+                    catalogItem = XmlElementTree.fromstring(response.content)
+                    entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                    vapp_tempalte_href = entity.get("href")
+                    #get vapp details and parse moref id
+
+                    namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                                  'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                                  'vmw': 'http://www.vmware.com/schema/ovf',
+                                  'vm': 'http://www.vmware.com/vcloud/v1.5',
+                                  'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                                  'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
+                                  'xmlns':"http://www.vmware.com/vcloud/v1.5"
+                                }
+
+                    if vca.vcloud_session and vca.vcloud_session.organization:
+                        response = Http.get(url=vapp_tempalte_href,
+                                            headers=vca.vcloud_session.get_vcloud_headers(),
+                                            verify=vca.verify,
+                                            logger=vca.logger
+                                            )
+
+                        if response.status_code != requests.codes.ok:
+                            self.logger.debug("REST API call {} failed. Return status code {}".format(
+                                                vapp_tempalte_href, response.status_code))
+
+                        else:
+                            xmlroot_respond = XmlElementTree.fromstring(response.content)
+                            children_section = xmlroot_respond.find('vm:Children/', namespaces)
+                            if children_section is not None:
+                                vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                            if vCloud_extension_section is not None:
+                                vm_vcenter_info = {}
+                                vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+                                vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                                if vmext is not None:
+                                    vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                                parsed_response["vm_vcenter_info"]= vm_vcenter_info
+
+        except Exception as exp :
+            self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+
+        return parsed_response
+
+
+    def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
+        """
+            Method to delete vApp
+                Args :
+                    vapp_uuid - vApp UUID
+                    msg - Error message to be logged
+                    exp_type : Exception type
+                Returns:
+                    None
+        """
+        if vapp_uuid:
+            status = self.delete_vminstance(vapp_uuid)
+        else:
+            msg = "No vApp ID"
+        self.logger.error(msg)
+        if exp_type == "Genric":
+            raise vimconn.vimconnException(msg)
+        elif exp_type == "NotFound":
+            raise vimconn.vimconnNotFoundException(message=msg)
+