Changes in vimconn_vmware.py :
[osm/RO.git] / vimconn_vmware.py
index 5b3ac3d..26ad23a 100644 (file)
@@ -34,6 +34,7 @@ import itertools
 import requests
 
 from xml.etree import ElementTree as XmlElementTree
+from lxml import etree as lxmlElementTree
 
 import yaml
 from pyvcloud import Http
@@ -50,11 +51,13 @@ from pyvcloud.schema.vcd.v1_5.schemas.admin.vCloudEntities import TasksInProgres
 
 import logging
 import json
-import vimconn
 import time
 import uuid
 import httplib
 import hashlib
+import socket
+import struct
+import netaddr
 
 # global variable for vcd connector type
 STANDALONE = 'standalone'
@@ -62,14 +65,23 @@ STANDALONE = 'standalone'
 # key for flavor dicts
 FLAVOR_RAM_KEY = 'ram'
 FLAVOR_VCPUS_KEY = 'vcpus'
-
-# global variable for number of retry
-DELETE_INSTANCE_RETRY = 3
+FLAVOR_DISK_KEY = 'disk'
+DEFAULT_IP_PROFILE = {'gateway_address':"192.168.1.1",
+                      'dhcp_count':50,
+                      'subnet_address':"192.168.1.0/24",
+                      'dhcp_enabled':True,
+                      'dhcp_start_address':"192.168.1.3",
+                      'ip_version':"IPv4",
+                      'dns_address':"192.168.1.2"
+                      }
+# global variable for wait time
+INTERVAL_TIME = 5
+MAX_WAIT_TIME = 1800
 
 VCAVERSION = '5.9'
 
-__author__ = "Mustafa Bayramov"
-__date__ = "$26-Aug-2016 11:09:29$"
+__author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare"
+__date__ = "$12-Jan-2017 11:09:29$"
 __version__ = '0.1'
 
 #     -1: "Could not be created",
@@ -164,16 +176,13 @@ class vimconnector(vimconn.vimconnector):
         if tenant_name is not None:
             orgnameandtenant = tenant_name.split(":")
             if len(orgnameandtenant) == 2:
-                self.tenant_name = orgnameandtenant[0]
-                self.org_name = orgnameandtenant[1]
+                self.tenant_name = orgnameandtenant[1]
+                self.org_name = orgnameandtenant[0]
             else:
                 self.tenant_name = tenant_name
-        elif "orgname" in config:
+        if "orgname" in config:
             self.org_name = config['orgname']
-        else:
-            raise vimconn.vimconnException(message="Please indicate valid organization name. "
-                                                   "Either pass by org config attribute "
-                                                   "or as tenant_name:tenant_id.")
+
         if log_level:
             self.logger.setLevel(getattr(logging, log_level))
 
@@ -427,19 +436,20 @@ class vimconnector(vimconn.vimconnector):
     def new_network(self, net_name, net_type, ip_profile=None, shared=False):
         """Adds a tenant network to VIM
             net_name is the name
-            net_type can be 'bridge','data'.'ptp'.  TODO: this need to be revised
+            net_type can be 'bridge','data'.'ptp'.
             ip_profile is a dict containing the IP parameters of the network
             shared is a boolean
         Returns the network identifier"""
 
-        self.logger.debug(
-            "new_network tenant {} net_type {} ip_profile {} shared {}".format(net_name, net_type, ip_profile, shared))
+        self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
+                          .format(net_name, net_type, ip_profile, shared))
 
         isshared = 'false'
         if shared:
             isshared = 'true'
 
-        network_uuid = self.create_network(network_name=net_name, isshared=isshared)
+        network_uuid = self.create_network(network_name=net_name, net_type=net_type,
+                                           ip_profile=ip_profile, isshared=isshared)
         if network_uuid is not None:
             return network_uuid
         else:
@@ -649,7 +659,7 @@ class vimconnector(vimconn.vimconnector):
                 errormsg = ''
                 vcd_network = self.get_vcd_network(network_uuid=net)
                 if vcd_network is not None and vcd_network:
-                    if vcd_network['status'] == 1:
+                    if vcd_network['status'] == '1':
                         status = 'ACTIVE'
                     else:
                         status = 'DOWN'
@@ -658,7 +668,7 @@ class vimconnector(vimconn.vimconnector):
                     errormsg = 'Network not found.'
 
                 dict_entry[net] = {'status': status, 'error_msg': errormsg,
-                                   'vm_info': yaml.safe_dump(vcd_network)}
+                                   'vim_info': yaml.safe_dump(vcd_network)}
         except:
             self.logger.debug("Error in refresh_nets_status")
             self.logger.debug(traceback.format_exc())
@@ -690,15 +700,37 @@ class vimconnector(vimconn.vimconnector):
                             vpci: requested virtual PCI address
                 disk: disk size
                 is_public:
-
-
-
                  #TODO to concrete
         Returns the flavor identifier"""
 
+        # generate a new uuid put to internal dict and return it.
+        self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
+        new_flavor=flavor_data
+        ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
+        cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
+        disk = flavor_data.get(FLAVOR_DISK_KEY, 1)
+
+        extended_flv = flavor_data.get("extended")
+        if extended_flv:
+            numas=extended_flv.get("numas")
+            if numas:
+                for numa in numas:
+                    #overwrite ram and vcpus
+                    ram = numa['memory']*1024
+                    if 'paired-threads' in numa:
+                        cpu = numa['paired-threads']*2
+                    elif 'cores' in numa:
+                        cpu = numa['cores']
+                    elif 'threads' in numa:
+                        cpu = numa['threads']
+
+        new_flavor[FLAVOR_RAM_KEY] = ram
+        new_flavor[FLAVOR_VCPUS_KEY] = cpu
+        new_flavor[FLAVOR_DISK_KEY] = disk
         # generate a new uuid put to internal dict and return it.
         flavor_id = uuid.uuid4()
-        flavorlist[str(flavor_id)] = flavor_data
+        flavorlist[str(flavor_id)] = new_flavor
+        self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
 
         return str(flavor_id)
 
@@ -892,6 +924,7 @@ class vimconnector(vimconn.vimconnector):
                             f.close()
                             if progress:
                                 progress_bar.finish()
+                            time.sleep(10)
                     return True
                 else:
                     self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
@@ -1113,7 +1146,7 @@ class vimconnector(vimconn.vimconnector):
         return None
 
     def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list={},
-                       cloud_config=None):
+                       cloud_config=None, disk_list=None):
         """Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
@@ -1174,6 +1207,8 @@ class vimconnector(vimconn.vimconnector):
         #
         vm_cpus = None
         vm_memory = None
+        vm_disk = None
+
         if flavor_id is not None:
             if flavor_id not in flavorlist:
                 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
@@ -1184,6 +1219,8 @@ class vimconnector(vimconn.vimconnector):
                     flavor = flavorlist[flavor_id]
                     vm_cpus = flavor[FLAVOR_VCPUS_KEY]
                     vm_memory = flavor[FLAVOR_RAM_KEY]
+                    vm_disk = flavor[FLAVOR_DISK_KEY]
+
                 except KeyError:
                     raise vimconn.vimconnException("Corrupted flavor. {}".format(flavor_id))
 
@@ -1194,32 +1231,34 @@ class vimconnector(vimconn.vimconnector):
             power_on = 'true'
 
         # client must provide at least one entry in net_list if not we report error
-        #
+        #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
+        #If no mgmt, then the 1st NN in netlist is considered as primary net. 
+        primary_net = None
         primary_netname = None
         network_mode = 'bridged'
         if net_list is not None and len(net_list) > 0:
-            primary_net = net_list[0]
+            for net in net_list:
+                if 'use' in net and net['use'] == 'mgmt':
+                    primary_net = net
             if primary_net is None:
-                raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
-            else:
-                try:
-                    primary_net_id = primary_net['net_id']
-                    network_dict = self.get_vcd_network(network_uuid=primary_net_id)
-                    if 'name' in network_dict:
-                        primary_netname = network_dict['name']
-                    self.logger.info("Connecting VM to a network name {} "
-                                     " network id {}".format(primary_netname, primary_net_id))
-                    if 'use' in primary_net:
-                        if primary_net['use'] == 'bridge':
-                            network_mode = 'bridged'
-                except KeyError:
-                    raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
+                primary_net = net_list[0]
+
+            try:
+                primary_net_id = primary_net['net_id']
+                network_dict = self.get_vcd_network(network_uuid=primary_net_id)
+                if 'name' in network_dict:
+                    primary_netname = network_dict['name']
+
+            except KeyError:
+                raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
+        else:
+            raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
 
         # use: 'data', 'bridge', 'mgmt'
         # create vApp.  Set vcpu and ram based on flavor id.
         vapptask = vca.create_vapp(self.tenant_name, vmname_andid, templateName,
                                    self.get_catalogbyid(image_id, catalogs),
-                                   network_name=primary_netname,  # can be None if net_list None
+                                   network_name=None,  # None while creating vapp
                                    network_mode=network_mode,
                                    vm_name=vmname_andid,
                                    vm_cpus=vm_cpus,  # can be None if flavor is None
@@ -1232,14 +1271,24 @@ class vimconnector(vimconn.vimconnector):
 
         # we should have now vapp in undeployed state.
         vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+        vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
         if vapp is None:
             raise vimconn.vimconnUnexpectedResponse(
-                "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format(vmname_andid))
+                "new_vminstance(): Failed failed retrieve vApp {} after we deployed".format(
+                                                                            vmname_andid))
+
+        # add vm disk
+        if vm_disk:
+            #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
+            result = self.modify_vm_disk(vapp_uuid, vm_disk)
+            if result :
+                self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
 
-        # add first NIC
+        # add NICs & connect to networks in netlist
         try:
             self.logger.info("Request to connect VM to a network: {}".format(net_list))
             nicIndex = 0
+            primary_nic_index = 0
             for net in net_list:
                 # openmano uses network id in UUID format.
                 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
@@ -1253,6 +1302,9 @@ class vimconnector(vimconn.vimconnector):
                 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
                 interface_network_mode = net['use']
 
+                if interface_network_mode == 'mgmt':
+                    primary_nic_index = nicIndex
+
                 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
                                   - DHCP (The IP address is obtained from a DHCP service.)
                                   - MANUAL (The IP address is assigned manually in the IpAddress element.)
@@ -1261,21 +1313,19 @@ class vimconnector(vimconn.vimconnector):
                 if primary_netname is not None:
                     nets = filter(lambda n: n.name == interface_net_name, vca.get_networks(self.tenant_name))
                     if len(nets) == 1:
-                        self.logger.info("Found requested network: {}".format(nets[0].name))
+                        self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].name))
                         task = vapp.connect_to_network(nets[0].name, nets[0].href)
                         if type(task) is GenericTask:
                             vca.block_until_completed(task)
-                        # connect network to VM
-                        # TODO figure out mapping between openmano representation to vCloud director.
-                        # one idea use first nic as management DHCP all remaining in bridge mode
-                        self.logger.info("Connecting VM to a network network {}".format(nets[0].name))
+                        # connect network to VM - with all DHCP by default
+                        self.logger.info("new_vminstance(): Connecting VM to a network {}".format(nets[0].name))
                         task = vapp.connect_vms(nets[0].name,
                                                 connection_index=nicIndex,
-                                                connections_primary_index=nicIndex,
+                                                connections_primary_index=primary_nic_index,
                                                 ip_allocation_mode='DHCP')
                         if type(task) is GenericTask:
                             vca.block_until_completed(task)
-            nicIndex += 1
+                nicIndex += 1
         except KeyError:
             # it might be a case if specific mandatory entry in dict is empty
             self.logger.debug("Key error {}".format(KeyError.message))
@@ -1290,7 +1340,19 @@ class vimconnector(vimconn.vimconnector):
             vca.block_until_completed(deploytask)
 
         # check if vApp deployed and if that the case return vApp UUID otherwise -1
-        vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+        wait_time = 0
+        vapp_uuid = None
+        while wait_time <= MAX_WAIT_TIME:
+            vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vmname_andid)
+            if vapp and vapp.me.deployed:
+                vapp_uuid = self.get_vappid(vca.get_vdc(self.tenant_name), vmname_andid)
+                break
+            else:
+                self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
+                time.sleep(INTERVAL_TIME)
+
+            wait_time +=INTERVAL_TIME
+
         if vapp_uuid is not None:
             return vapp_uuid
         else:
@@ -1384,47 +1446,97 @@ class vimconnector(vimconn.vimconnector):
                 self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
 
             # Delete vApp and wait for status change if task executed and vApp is None.
-            # We successfully delete vApp from vCloud
             vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
-            # poweroff vapp / undeploy and delete
-            power_off_task = vapp.poweroff()
-            if type(power_off_task) is GenericTask:
-                vca.block_until_completed(power_off_task)
-            else:
-                if not power_off_task:
-                    self.logger.debug("delete_vminstance(): Failed power off VM uuid {} ".format(vm__vim_uuid))
-
-            # refresh status
-            if vapp.me.deployed:
-                undeploy_task = vapp.undeploy()
-                if type(undeploy_task) is GenericTask:
-                    retry = 0
-                    while retry <= DELETE_INSTANCE_RETRY:
-                        result = vca.block_until_completed(undeploy_task)
-                        if result:
-                            break
-                        retry += 1
-                else:
-                    return -1
 
-            # delete vapp
-            vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
-            if vapp is not None:
-                delete_task = vapp.delete()
-                retry = 0
-                while retry <= DELETE_INSTANCE_RETRY:
-                    task = vapp.delete()
-                    if type(task) is GenericTask:
-                        vca.block_until_completed(delete_task)
-                    if not delete_task:
+            if vapp:
+                if vapp.me.deployed:
+                    self.logger.info("Powering off vApp {}".format(vapp_name))
+                    #Power off vApp
+                    powered_off = False
+                    wait_time = 0
+                    while wait_time <= MAX_WAIT_TIME:
+                        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+                        if not vapp:
+                            self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                            return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+
+                        power_off_task = vapp.poweroff()
+                        if type(power_off_task) is GenericTask:
+                            result = vca.block_until_completed(power_off_task)
+                            if result:
+                                powered_off = True
+                                break
+                        else:
+                            self.logger.info("Wait for vApp {} to power off".format(vapp_name))
+                            time.sleep(INTERVAL_TIME)
+
+                        wait_time +=INTERVAL_TIME
+                    if not powered_off:
+                        self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
+                    else:
+                        self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
+
+                    #Undeploy vApp
+                    self.logger.info("Undeploy vApp {}".format(vapp_name))
+                    wait_time = 0
+                    undeployed = False
+                    while wait_time <= MAX_WAIT_TIME:
+                        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+                        if not vapp:
+                            self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                            return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+                        undeploy_task = vapp.undeploy(action='powerOff')
+
+                        if type(undeploy_task) is GenericTask:
+                            result = vca.block_until_completed(undeploy_task)
+                            if result:
+                                undeployed = True
+                                break
+                        else:
+                            self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
+                            time.sleep(INTERVAL_TIME)
+
+                        wait_time +=INTERVAL_TIME
+
+                    if not undeployed:
+                        self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid)) 
+
+                # delete vapp
+                self.logger.info("Start deletion of vApp {} ".format(vapp_name))
+                vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+
+                if vapp is not None:
+                    wait_time = 0
+                    result = False
+
+                    while wait_time <= MAX_WAIT_TIME:
+                        vapp = vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name)
+                        if not vapp:
+                            self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                            return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+
+                        delete_task = vapp.delete()
+
+                        if type(delete_task) is GenericTask:
+                            vca.block_until_completed(delete_task)
+                            result = vca.block_until_completed(delete_task)
+                            if result:
+                                break
+                        else:
+                            self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
+                            time.sleep(INTERVAL_TIME)
+
+                        wait_time +=INTERVAL_TIME
+
+                    if not result:
                         self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
-                    retry += 1
 
         except:
             self.logger.debug(traceback.format_exc())
             raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
 
         if vca.get_vapp(vca.get_vdc(self.tenant_name), vapp_name) is None:
+            self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
             return vm__vim_uuid
         else:
             raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
@@ -1482,8 +1594,8 @@ class vimconnector(vimconn.vimconnector):
                         for vm_network in vapp_network:
                             if vm_network['name'] == vmname:
                                 interface = {"mac_address": vm_network['mac'],
-                                             "vim_net_id": self.get_network_name_by_id(vm_network['network_name']),
-                                             "vim_interface_id": vm_network['network_name'],
+                                             "vim_net_id": self.get_network_id_by_name(vm_network['network_name']),
+                                             "vim_interface_id": self.get_network_id_by_name(vm_network['network_name']),
                                              'ip_address': vm_network['ip']}
                                 # interface['vim_info'] = yaml.safe_dump(vm_network)
                                 vm_dict["interfaces"].append(interface)
@@ -1643,6 +1755,37 @@ class vimconnector(vimconn.vimconnector):
 
         return None
 
+    def get_network_id_by_name(self, network_name=None):
+        """Method gets vcloud director network uuid based on supplied name.
+
+        Args:
+            network_name: network_name
+        Returns:
+            The return network uuid.
+            network_uuid: network_id
+        """
+
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed.")
+
+        if not network_name:
+            self.logger.debug("get_network_id_by_name() : Network name is empty")
+            return None
+
+        try:
+            org_dict = self.get_org(self.org_uuid)
+            if org_dict and 'networks' in org_dict:
+                org_network_dict = org_dict['networks']
+                for net_uuid,net_name in org_network_dict.iteritems():
+                    if net_name == network_name:
+                        return net_uuid
+
+        except KeyError as exp:
+            self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
+
+        return None
+
     def list_org_action(self):
         """
         Method leverages vCloud director and query for available organization for particular user
@@ -2044,12 +2187,16 @@ class vimconnector(vimconn.vimconnector):
 
         return False
 
-    def create_network(self, network_name=None, parent_network_uuid=None, isshared='true'):
+    def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+                       ip_profile=None, isshared='true'):
         """
         Method create network in vCloud director
 
         Args:
             network_name - is network name to be created.
+            net_type - can be 'bridge','data','ptp','mgmt'.
+            ip_profile is a dict containing the IP parameters of the network
+            isshared - is a boolean
             parent_network_uuid - is parent provider vdc network that will be used for mapping.
             It optional attribute. by default if no parent network indicate the first available will be used.
 
@@ -2059,6 +2206,8 @@ class vimconnector(vimconn.vimconnector):
 
         new_network_name = [network_name, '-', str(uuid.uuid4())]
         content = self.create_network_rest(network_name=''.join(new_network_name),
+                                           ip_profile=ip_profile,
+                                           net_type=net_type,
                                            parent_network_uuid=parent_network_uuid,
                                            isshared=isshared)
         if content is None:
@@ -2075,12 +2224,16 @@ class vimconnector(vimconn.vimconnector):
             self.logger.debug("Failed create network {}".format(network_name))
             return None
 
-    def create_network_rest(self, network_name=None, parent_network_uuid=None, isshared='true'):
+    def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+                            ip_profile=None, isshared='true'):
         """
         Method create network in vCloud director
 
         Args:
             network_name - is network name to be created.
+            net_type - can be 'bridge','data','ptp','mgmt'.
+            ip_profile is a dict containing the IP parameters of the network
+            isshared - is a boolean
             parent_network_uuid - is parent provider vdc network that will be used for mapping.
             It optional attribute. by default if no parent network indicate the first available will be used.
 
@@ -2151,29 +2304,138 @@ class vimconnector(vimconn.vimconnector):
                 except:
                     return None
 
+            #Configure IP profile of the network
+            ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
+
+            gateway_address=ip_profile['gateway_address']
+            dhcp_count=int(ip_profile['dhcp_count'])
+            subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+
+            if ip_profile['dhcp_enabled']==True:
+                dhcp_enabled='true'
+            else:
+                dhcp_enabled='false'
+            dhcp_start_address=ip_profile['dhcp_start_address']
+
+            #derive dhcp_end_address from dhcp_start_address & dhcp_count
+            end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
+            end_ip_int += dhcp_count - 1
+            dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
+
+            ip_version=ip_profile['ip_version']
+            dns_address=ip_profile['dns_address']
+
             # either use client provided UUID or search for a first available
             #  if both are not defined we return none
             if parent_network_uuid is not None:
                 url_list = [vca.host, '/api/admin/network/', parent_network_uuid]
                 add_vdc_rest_url = ''.join(url_list)
 
-            # return response.content
-            data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
-                            <Description>Openmano created</Description>
-                                    <Configuration>
-                                        <ParentNetwork href="{1:s}"/>
-                                        <FenceMode>{2:s}</FenceMode>
-                                    </Configuration>
-                                    <IsShared>{3:s}</IsShared>
-                        </OrgVdcNetwork> """.format(escape(network_name), available_networks, "bridged", isshared)
+            if net_type=='ptp':
+                fence_mode="isolated"
+                isshared='false'
+                is_inherited='false'
+                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+                                <Description>Openmano created</Description>
+                                        <Configuration>
+                                            <IpScopes>
+                                                <IpScope>
+                                                    <IsInherited>{1:s}</IsInherited>
+                                                    <Gateway>{2:s}</Gateway>
+                                                    <Netmask>{3:s}</Netmask>
+                                                    <Dns1>{4:s}</Dns1>
+                                                    <IsEnabled>{5:s}</IsEnabled>
+                                                    <IpRanges>
+                                                        <IpRange>
+                                                            <StartAddress>{6:s}</StartAddress>
+                                                            <EndAddress>{7:s}</EndAddress>
+                                                        </IpRange>
+                                                    </IpRanges>
+                                                </IpScope>
+                                            </IpScopes>
+                                            <FenceMode>{8:s}</FenceMode>
+                                        </Configuration>
+                                        <IsShared>{9:s}</IsShared>
+                            </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
+                                                        subnet_address, dns_address, dhcp_enabled,
+                                                        dhcp_start_address, dhcp_end_address, fence_mode, isshared)
+
+            else:
+                fence_mode="bridged"
+                is_inherited='false'
+                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+                                <Description>Openmano created</Description>
+                                        <Configuration>
+                                            <IpScopes>
+                                                <IpScope>
+                                                    <IsInherited>{1:s}</IsInherited>
+                                                    <Gateway>{2:s}</Gateway>
+                                                    <Netmask>{3:s}</Netmask>
+                                                    <Dns1>{4:s}</Dns1>
+                                                    <IsEnabled>{5:s}</IsEnabled>
+                                                    <IpRanges>
+                                                        <IpRange>
+                                                            <StartAddress>{6:s}</StartAddress>
+                                                            <EndAddress>{7:s}</EndAddress>
+                                                        </IpRange>
+                                                    </IpRanges>
+                                                </IpScope>
+                                            </IpScopes>
+                                            <ParentNetwork href="{8:s}"/>
+                                            <FenceMode>{9:s}</FenceMode>
+                                        </Configuration>
+                                        <IsShared>{10:s}</IsShared>
+                            </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
+                                                        subnet_address, dns_address, dhcp_enabled,
+                                                        dhcp_start_address, dhcp_end_address, available_networks,
+                                                        fence_mode, isshared)
 
             headers = vca.vcloud_session.get_vcloud_headers()
             headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
-            response = Http.post(url=add_vdc_rest_url, headers=headers, data=data, verify=vca.verify, logger=vca.logger)
+            try:
+                response = Http.post(url=add_vdc_rest_url,
+                                     headers=headers,
+                                     data=data,
+                                     verify=vca.verify,
+                                     logger=vca.logger)
+
+                if response.status_code != 201:
+                    self.logger.debug("Create Network POST REST API call failed. Return status code {}"
+                                      .format(response.status_code))
+                else:
+                    network = networkType.parseString(response.content, True)
+                    create_nw_task = network.get_Tasks().get_Task()[0]
+
+                    # if we all ok we respond with content after network creation completes
+                    # otherwise by default return None
+                    if create_nw_task is not None:
+                        self.logger.debug("Create Network REST : Waiting for Nw creation complete")
+                        status = vca.block_until_completed(create_nw_task)
+                        if status:
+                            return response.content
+                        else:
+                            self.logger.debug("create_network_rest task failed. Network Create response : {}"
+                                              .format(response.content))
+            except Exception as exp:
+                self.logger.debug("create_network_rest : Exception : {} ".format(exp))
 
-            # if we all ok we respond with content otherwise by default None
-            if response.status_code == 201:
-                return response.content
+        return None
+
+    def convert_cidr_to_netmask(self, cidr_ip=None):
+        """
+        Method sets convert CIDR netmask address to normal IP format
+        Args:
+            cidr_ip : CIDR IP address
+            Returns:
+                netmask : Converted netmask
+        """
+        if cidr_ip is not None:
+            if '/' in cidr_ip:
+                network, net_bits = cidr_ip.split('/')
+                netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
+            else:
+                netmask = cidr_ip
+            return netmask
         return None
 
     def get_provider_rest(self, vca=None):
@@ -2408,21 +2670,26 @@ class vimconnector(vimconn.vimconnector):
                 xmlroot_respond = XmlElementTree.fromstring(response.content)
                 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
 
-                namespaces_ovf = {'ovf': 'http://schemas.dmtf.org/ovf/envelope/1'}
-                namespace_vmm = {'vmw': 'http://www.vmware.com/schema/ovf'}
-                namespace_vm = {'vm': 'http://www.vmware.com/vcloud/v1.5'}
+                namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                              'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                              'vmw': 'http://www.vmware.com/schema/ovf',
+                              'vm': 'http://www.vmware.com/vcloud/v1.5',
+                              'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                              "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
+                              "xmlns":"http://www.vmware.com/vcloud/v1.5"
+                             }
 
-                created_section = xmlroot_respond.find('vm:DateCreated', namespace_vm)
+                created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
                 if created_section is not None:
                     parsed_respond['created'] = created_section.text
 
-                network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespace_vm)
+                network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
                 if network_section is not None and 'networkName' in network_section.attrib:
                     parsed_respond['networkname'] = network_section.attrib['networkName']
 
                 ipscopes_section = \
                     xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
-                                         namespace_vm)
+                                         namespaces)
                 if ipscopes_section is not None:
                     for ipscope in ipscopes_section:
                         for scope in ipscope:
@@ -2436,14 +2703,15 @@ class vimconnector(vimconn.vimconnector):
                                 parsed_respond[tag_key] = scope.text
 
                 # parse children section for other attrib
-                children_section = xmlroot_respond.find('vm:Children/', namespace_vm)
+                children_section = xmlroot_respond.find('vm:Children/', namespaces)
                 if children_section is not None:
                     parsed_respond['name'] = children_section.attrib['name']
-                    parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled']
+                    parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
+                     if  "nestedHypervisorEnabled" in children_section.attrib else None
                     parsed_respond['deployed'] = children_section.attrib['deployed']
                     parsed_respond['status'] = children_section.attrib['status']
                     parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
-                    network_adapter = children_section.find('vm:NetworkConnectionSection', namespace_vm)
+                    network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
                     nic_list = []
                     for adapters in network_adapter:
                         adapter_key = adapters.tag.split("}")[1]
@@ -2466,9 +2734,27 @@ class vimconnector(vimconn.vimconnector):
                                 parsed_respond['acquireMksTicket'] = link.attrib
 
                     parsed_respond['interfaces'] = nic_list
-            except:
-                pass
 
+                    virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
+                    vm_virtual_hardware_info = {}
+                    if virtual_hardware_section is not None:
+                        for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
+                            if item.find("rasd:Description",namespaces).text == "Hard disk":
+                                disk_size = item.find("rasd:HostResource" ,namespaces
+                                                ).attrib["{"+namespaces['vm']+"}capacity"]
+
+                                vm_virtual_hardware_info["disk_size"]= disk_size
+                                break
+
+                        for link in virtual_hardware_section:
+                            if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+                                if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
+                                    vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
+                                    break
+
+                    parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
+            except Exception as exp :
+                self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
         return parsed_respond
 
     def acuire_console(self, vm_uuid=None):
@@ -2493,3 +2779,110 @@ class vimconnector(vimconn.vimconnector):
                 return response.content
 
         return None
+
+    def modify_vm_disk(self, vapp_uuid, flavor_disk):
+        """
+        Method retrieve vm disk details
+
+        Args:
+            vapp_uuid - is vapp identifier.
+            flavor_disk - disk size as specified in VNFD (flavor)
+
+            Returns:
+                The return network uuid or return None
+        """
+        status = None
+        try:
+            #Flavor disk is in GB convert it into MB
+            flavor_disk = int(flavor_disk) * 1024
+            vm_details = self.get_vapp_details_rest(vapp_uuid)
+            if vm_details:
+                vm_name = vm_details["name"]
+                self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
+
+            if vm_details and "vm_virtual_hardware" in vm_details:
+                vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
+                disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+
+                self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
+
+                if flavor_disk > vm_disk:
+                    status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
+                    self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
+                                                         vm_disk,  flavor_disk ))
+                else:
+                    status = True
+                    self.logger.info("No need to modify disk of VM {}".format(vm_name))
+
+            return status
+        except Exception as exp:
+            self.logger.info("Error occurred while modifing disk size {}".format(exp))
+
+
+    def modify_vm_disk_rest(self, disk_href , disk_size):
+        """
+        Method retrieve modify vm disk size
+
+        Args:
+            disk_href - vCD API URL to GET and PUT disk data
+            disk_size - disk size as specified in VNFD (flavor)
+
+            Returns:
+                The return network uuid or return None
+        """
+        vca = self.connect()
+        if not vca:
+            raise vimconn.vimconnConnectionException("self.connect() is failed")
+        if disk_href is None or disk_size is None:
+            return None
+
+        if vca.vcloud_session and vca.vcloud_session.organization:
+            response = Http.get(url=disk_href,
+                                headers=vca.vcloud_session.get_vcloud_headers(),
+                                verify=vca.verify,
+                                logger=vca.logger)
+
+        if response.status_code != requests.codes.ok:
+            self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
+                                                                            response.status_code))
+            return None
+        try:
+            lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+            namespaces = {prefix:uri for prefix,uri in lxmlroot_respond.nsmap.iteritems() if prefix}
+            namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+
+            for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                if item.find("rasd:Description",namespaces).text == "Hard disk":
+                    disk_item = item.find("rasd:HostResource" ,namespaces )
+                    if disk_item is not None:
+                        disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
+                        break
+
+            data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
+                                             xml_declaration=True)
+
+            #Send PUT request to modify disk size
+            headers = vca.vcloud_session.get_vcloud_headers()
+            headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+
+            response = Http.put(url=disk_href,
+                                data=data,
+                                headers=headers,
+                                verify=vca.verify, logger=self.logger)
+
+            if response.status_code != 202:
+                self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
+                                                                            response.status_code))
+            else:
+                modify_disk_task = taskType.parseString(response.content, True)
+                if type(modify_disk_task) is GenericTask:
+                    status = vca.block_until_completed(modify_disk_task)
+                    return status
+
+            return None
+
+        except Exception as exp :
+                self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
+                return None
+
+