Merge branch 'py3' features 8029 8030 22/8222/4
authortierno <alfonso.tiernosepulveda@telefonica.com>
Sat, 23 Nov 2019 15:11:15 +0000 (15:11 +0000)
committertierno <alfonso.tiernosepulveda@telefonica.com>
Sat, 23 Nov 2019 18:44:35 +0000 (18:44 +0000)
Change-Id: Ia670d01fc45d63f4051209ef73ca272054895873
Signed-off-by: tierno <alfonso.tiernosepulveda@telefonica.com>
20 files changed:
1  2 
RO-VIM-aws/osm_rovim_aws/vimconn_aws.py
RO-VIM-aws/requirements.txt
RO-VIM-azure/osm_rovim_azure/vimconn_azure.py
RO-VIM-azure/requirements.txt
RO-VIM-fos/osm_rovim_fos/vimconn_fos.py
RO-VIM-fos/requirements.txt
RO-VIM-opennebula/osm_rovim_opennebula/vimconn_opennebula.py
RO-VIM-opennebula/requirements.txt
RO-VIM-openstack/osm_rovim_openstack/vimconn_openstack.py
RO-VIM-openstack/requirements.txt
RO-VIM-openvim/osm_rovim_openvim/vimconn_openvim.py
RO-VIM-openvim/requirements.txt
RO-VIM-vmware/osm_rovim_vmware/vimconn_vmware.py
RO-VIM-vmware/requirements.txt
RO/osm_ro/database_utils/migrate_mano_db.sh
RO/osm_ro/nfvo.py
RO/osm_ro/vim_thread.py
RO/osm_ro/vimconn.py
RO/test/test_RO.py
test-docker/test-gen-devops.sh

index 0000000,8173662..28dc4e9
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,804 +1,803 @@@
 -    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
+ # -*- coding: utf-8 -*-
+ ##
+ # Copyright 2017 xFlow Research Pvt. Ltd
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: saboor.ahmad@xflowresearch.com
+ ##
+ '''
+ AWS-connector implements all the methods to interact with AWS using the BOTO client
+ '''
+ __author__ = "Saboor Ahmad"
+ __date__ = "10-Apr-2017"
+ from osm_ro import vimconn
+ import yaml
+ import logging
+ import netaddr
+ import time
+ import boto
+ import boto.ec2
+ import boto.vpc
+ class vimconnector(vimconn.vimconnector):
+     def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
+                  config={}, persistent_info={}):
+         """ Params: uuid - id asigned to this VIM
+                 name - name assigned to this VIM, can be used for logging
+                 tenant_id - ID to be used for tenant
+                 tenant_name - name of tenant to be used VIM tenant to be used
+                 url_admin - optional, url used for administrative tasks
+                 user - credentials of the VIM user
+                 passwd - credentials of the VIM user
+                 log_level - if must use a different log_level than the general one
+                 config - dictionary with misc VIM information
+                     region_name - name of region to deploy the instances
+                     vpc_cidr_block - default CIDR block for VPC
+                     security_groups - default security group to specify this instance
+                 persistent_info - dict where the class can store information that will be available among class
+                     destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
+                     empty dict. Useful to store login/tokens information for speed up communication
+         """
+         vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+                                       config, persistent_info)
+         self.persistent_info = persistent_info
+         self.a_creds = {}
+         if user:
+             self.a_creds['aws_access_key_id'] = user
+         else:
+             raise vimconn.vimconnAuthException("Username is not specified")
+         if passwd:
+             self.a_creds['aws_secret_access_key'] = passwd
+         else:
+             raise vimconn.vimconnAuthException("Password is not specified")
+         if 'region_name' in config:
+             self.region = config.get('region_name')
+         else:
+             raise vimconn.vimconnException("AWS region_name is not specified at config")
+         self.vpc_data = {}
+         self.subnet_data = {}
+         self.conn = None
+         self.conn_vpc = None
+         self.account_id = None
+         self.vpc_id = self.get_tenant_list()[0]['id']
+         # we take VPC CIDR block if specified, otherwise we use the default CIDR
+         # block suggested by AWS while creating instance
+         self.vpc_cidr_block = '10.0.0.0/24'
+         if tenant_id:
+             self.vpc_id = tenant_id
+         if 'vpc_cidr_block' in config:
+             self.vpc_cidr_block = config['vpc_cidr_block']
+         self.security_groups = None
+         if 'security_groups' in config:
+             self.security_groups = config['security_groups']
+         self.key_pair = None
+         if 'key_pair' in config:
+             self.key_pair = config['key_pair']
+         self.flavor_info = None
+         if 'flavor_info' in config:
+             flavor_data = config.get('flavor_info')
+             if isinstance(flavor_data, str):
+                 try:
+                     if flavor_data[0] == "@":  # read from a file
+                         with open(flavor_data[1:], 'r') as stream:
+                             self.flavor_info = yaml.load(stream, Loader=yaml.Loader)
+                     else:
+                         self.flavor_info = yaml.load(flavor_data, Loader=yaml.Loader)
+                 except yaml.YAMLError as e:
+                     self.flavor_info = None
+                     raise vimconn.vimconnException("Bad format at file '{}': {}".format(flavor_data[1:], e))
+                 except IOError as e:
+                     raise vimconn.vimconnException("Error reading file '{}': {}".format(flavor_data[1:], e))
+             elif isinstance(flavor_data, dict):
+                 self.flavor_info = flavor_data
+         self.logger = logging.getLogger('openmano.vim.aws')
+         if log_level:
+             self.logger.setLevel(getattr(logging, log_level))
+     def __setitem__(self, index, value):
+         """Params: index - name of value of set
+                    value - value to set
+         """
+         if index == 'user':
+             self.a_creds['aws_access_key_id'] = value
+         elif index == 'passwd':
+             self.a_creds['aws_secret_access_key'] = value
+         elif index == 'region':
+             self.region = value
+         else:
+             vimconn.vimconnector.__setitem__(self, index, value)
+     def _reload_connection(self):
+         """Returns: sets boto.EC2 and boto.VPC connection to work with AWS services
+         """
+         try:
+             self.conn = boto.ec2.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'],
+                                                    aws_secret_access_key=self.a_creds['aws_secret_access_key'])
+             self.conn_vpc = boto.vpc.connect_to_region(self.region, aws_access_key_id=self.a_creds['aws_access_key_id'],
+                                                        aws_secret_access_key=self.a_creds['aws_secret_access_key'])
+             # client = boto3.client("sts", aws_access_key_id=self.a_creds['aws_access_key_id'], aws_secret_access_key=self.a_creds['aws_secret_access_key'])
+             # self.account_id = client.get_caller_identity()["Account"]
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def format_vimconn_exception(self, e):
+         """Params: an Exception object
+         Returns: Raises the exception 'e' passed in mehtod parameters
+         """
+         self.conn = None
+         self.conn_vpc = None
+         raise vimconn.vimconnConnectionException(type(e).__name__ + ": " + str(e))
+     def get_availability_zones_list(self):
+         """Obtain AvailabilityZones from AWS
+         """
+         try:
+             self._reload_connection()
+             az_list = []
+             for az in self.conn.get_all_zones():
+                 az_list.append(az.name)
+             return az_list
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def get_tenant_list(self, filter_dict={}):
+         """Obtain tenants of VIM
+         filter_dict dictionary that can contain the following keys:
+             name: filter by tenant name
+             id: filter by tenant uuid/id
+             <other VIM specific>
+         Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:
+             [{'name':'<name>, 'id':'<id>, ...}, ...]
+         """
+         try:
+             self._reload_connection()
+             vpc_ids = []
+             tfilters = {}
+             if filter_dict != {}:
+                 if 'id' in filter_dict:
+                     vpc_ids.append(filter_dict['id'])
+                     tfilters['name'] = filter_dict['id']
+             tenants = self.conn_vpc.get_all_vpcs(vpc_ids, tfilters)
+             tenant_list = []
+             for tenant in tenants:
+                 tenant_list.append({'id': str(tenant.id), 'name': str(tenant.id), 'status': str(tenant.state),
+                                     'cidr_block': str(tenant.cidr_block)})
+             return tenant_list
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def new_tenant(self, tenant_name, tenant_description):
+         """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided
+         "tenant_name": string max lenght 64
+         "tenant_description": string max length 256
+         returns the tenant identifier or raise exception
+         """
+         self.logger.debug("Adding a new VPC")
+         try:
+             self._reload_connection()
+             vpc = self.conn_vpc.create_vpc(self.vpc_cidr_block)
+             self.conn_vpc.modify_vpc_attribute(vpc.id, enable_dns_support=True)
+             self.conn_vpc.modify_vpc_attribute(vpc.id, enable_dns_hostnames=True)
+             gateway = self.conn_vpc.create_internet_gateway()
+             self.conn_vpc.attach_internet_gateway(gateway.id, vpc.id)
+             route_table = self.conn_vpc.create_route_table(vpc.id)
+             self.conn_vpc.create_route(route_table.id, '0.0.0.0/0', gateway.id)
+             self.vpc_data[vpc.id] = {'gateway': gateway.id, 'route_table': route_table.id,
+                                      'subnets': self.subnet_sizes(len(self.get_availability_zones_list()),
+                                                                   self.vpc_cidr_block)}
+             return vpc.id
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def delete_tenant(self, tenant_id):
+         """Delete a tenant from VIM
+         tenant_id: returned VIM tenant_id on "new_tenant"
+         Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException
+         """
+         self.logger.debug("Deleting specified VPC")
+         try:
+             self._reload_connection()
+             vpc = self.vpc_data.get(tenant_id)
+             if 'gateway' in vpc and 'route_table' in vpc:
+                 gateway_id, route_table_id = vpc['gateway'], vpc['route_table']
+                 self.conn_vpc.detach_internet_gateway(gateway_id, tenant_id)
+                 self.conn_vpc.delete_vpc(tenant_id)
+                 self.conn_vpc.delete_route(route_table_id, '0.0.0.0/0')
+             else:
+                 self.conn_vpc.delete_vpc(tenant_id)
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def subnet_sizes(self, availability_zones, cidr):
+         """Calcualtes possible subnets given CIDR value of VPC
+         """
+         if availability_zones != 2 and availability_zones != 3:
+             self.logger.debug("Number of AZs should be 2 or 3")
+             raise vimconn.vimconnNotSupportedException("Number of AZs should be 2 or 3")
+         netmasks = ('255.255.252.0', '255.255.254.0', '255.255.255.0', '255.255.255.128')
+         ip = netaddr.IPNetwork(cidr)
+         mask = ip.netmask
+         if str(mask) not in netmasks:
+             self.logger.debug("Netmask " + str(mask) + " not found")
+             raise vimconn.vimconnNotFoundException("Netmask " + str(mask) + " not found")
+         if availability_zones == 2:
+             for n, netmask in enumerate(netmasks):
+                 if str(mask) == netmask:
+                     subnets = list(ip.subnet(n + 24))
+         else:
+             for n, netmask in enumerate(netmasks):
+                 if str(mask) == netmask:
+                     pub_net = list(ip.subnet(n + 24))
+                     pri_subs = pub_net[1:]
+                     pub_mask = pub_net[0].netmask
+             pub_split = list(ip.subnet(26)) if (str(pub_mask) == '255.255.255.0') else list(ip.subnet(27))
+             pub_subs = pub_split[:3]
+             subnets = pub_subs + pri_subs
+         return map(str, subnets)
 -            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
++    def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+         """Adds a tenant network to VIM
+         Params:
+             'net_name': name of the network
+             'net_type': one of:
+                 'bridge': overlay isolated network
+                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+             'ip_profile': is a dict containing the IP parameters of the network (Currently only IPv4 is implemented)
+                 'ip-version': can be one of ["IPv4","IPv6"]
+                 'subnet-address': ip_prefix_schema, that is X.X.X.X/Y
+                 'gateway-address': (Optional) ip_schema, that is X.X.X.X
+                 'dns-address': (Optional) ip_schema,
+                 'dhcp': (Optional) dict containing
+                     'enabled': {"type": "boolean"},
+                     'start-address': ip_schema, first IP to grant
+                     'count': number of IPs to grant.
+             'shared': if this network can be seen/use by other tenants/organization
+         Returns a tuple with the network identifier and created_items, or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
+         self.logger.debug("Adding a subnet to VPC")
+         try:
+             created_items = {}
+             self._reload_connection()
+             subnet = None
+             vpc_id = self.vpc_id
+             if self.vpc_data.get(vpc_id, None):
+                 cidr_block = list(set(self.vpc_data[vpc_id]['subnets']) - set(self.get_network_details({'tenant_id': vpc_id}, detail='cidr_block')))[0]
+             else:
+                 vpc = self.get_tenant_list({'id': vpc_id})[0]
+                 subnet_list = self.subnet_sizes(len(self.get_availability_zones_list()), vpc['cidr_block'])
+                 cidr_block = list(set(subnet_list) - set(self.get_network_details({'tenant_id': vpc['id']}, detail='cidr_block')))[0]
+             subnet = self.conn_vpc.create_subnet(vpc_id, cidr_block)
+             return subnet.id, created_items
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def get_network_details(self, filters, detail):
+         """Get specified details related to a subnet
+         """
+         detail_list = []
+         subnet_list = self.get_network_list(filters)
+         for net in subnet_list:
+             detail_list.append(net[detail])
+         return detail_list
+     def get_network_list(self, filter_dict={}):
+         """Obtain tenant networks of VIM
+         Params:
+             'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
+                 name: string  => returns only networks with this name
+                 id:   string  => returns networks with this VIM id, this imply returns one network at most
+                 shared: boolean >= returns only networks that are (or are not) shared
+                 tenant_id: sting => returns only networks that belong to this tenant/project
+                 ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
+                 #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
+         Returns the network list of dictionaries. each dictionary contains:
+             'id': (mandatory) VIM network id
+             'name': (mandatory) VIM network name
+             'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
+             authorization, or some other unspecific error
+         """
+         self.logger.debug("Getting all subnets from VIM")
+         try:
+             self._reload_connection()
+             tfilters = {}
+             if filter_dict != {}:
+                 if 'tenant_id' in filter_dict:
+                     tfilters['vpcId'] = filter_dict['tenant_id']
+             subnets = self.conn_vpc.get_all_subnets(subnet_ids=filter_dict.get('name', None), filters=tfilters)
+             net_list = []
+             for net in subnets:
+                 net_list.append(
+                     {'id': str(net.id), 'name': str(net.id), 'status': str(net.state), 'vpc_id': str(net.vpc_id),
+                      'cidr_block': str(net.cidr_block), 'type': 'bridge'})
+             return net_list
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def get_network(self, net_id):
+         """Obtain network details from the 'net_id' VIM network
+         Return a dict that contains:
+             'id': (mandatory) VIM network id, that is, net_id
+             'name': (mandatory) VIM network name
+             'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         Raises an exception upon error or when network is not found
+         """
+         self.logger.debug("Getting Subnet from VIM")
+         try:
+             self._reload_connection()
+             subnet = self.conn_vpc.get_all_subnets(net_id)[0]
+             return {'id': str(subnet.id), 'name': str(subnet.id), 'status': str(subnet.state),
+                     'vpc_id': str(subnet.vpc_id), 'cidr_block': str(subnet.cidr_block)}
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def delete_network(self, net_id, created_items=None):
+         """
+         Removes a tenant network from VIM and its associated elements
+         :param net_id: VIM identifier of the network, provided by method new_network
+         :param created_items: dictionary with extra items to be deleted. provided by method new_network
+         Returns the network identifier or raises an exception upon error or when network is not found
+         """
+         self.logger.debug("Deleting subnet from VIM")
+         try:
+             self._reload_connection()
+             self.logger.debug("DELETING NET_ID: " + str(net_id))
+             self.conn_vpc.delete_subnet(net_id)
+             return net_id
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def refresh_nets_status(self, net_list):
+         """Get the status of the networks
+         Params:
+             'net_list': a list with the VIM network id to be get the status
+         Returns a dictionary with:
+             'net_id':         #VIM id of this network
+                 status:     #Mandatory. Text with one of:
+                     #  DELETED (not found at vim)
+                     #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
+                     #  OTHER (Vim reported other status not understood)
+                     #  ERROR (VIM indicates an ERROR status)
+                     #  ACTIVE, INACTIVE, DOWN (admin down),
+                     #  BUILD (on building process)
+                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+             'net_id2': ...
+         """
+         self._reload_connection()
+         try:
+             dict_entry = {}
+             for net_id in net_list:
+                 subnet_dict = {}
+                 subnet = None
+                 try:
+                     subnet = self.conn_vpc.get_all_subnets(net_id)[0]
+                     if subnet.state == "pending":
+                         subnet_dict['status'] = "BUILD"
+                     elif subnet.state == "available":
+                         subnet_dict['status'] = 'ACTIVE'
+                     else:
+                         subnet_dict['status'] = 'ERROR'
+                     subnet_dict['error_msg'] = ''
+                 except Exception as e:
+                     subnet_dict['status'] = 'DELETED'
+                     subnet_dict['error_msg'] = 'Network not found'
+                 finally:
+                     try:
+                         subnet_dict['vim_info'] = yaml.safe_dump(subnet, default_flow_style=True, width=256)
+                     except yaml.YAMLError as e:
+                         subnet_dict['vim_info'] = str(subnet)
+                 dict_entry[net_id] = subnet_dict
+             return dict_entry
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def get_flavor(self, flavor_id):
+         """Obtain flavor details from the VIM
+         Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
+         Raises an exception upon error or if not found
+         """
+         self.logger.debug("Getting instance type")
+         try:
+             if flavor_id in self.flavor_info:
+                 return self.flavor_info[flavor_id]
+             else:
+                 raise vimconn.vimconnNotFoundException("Cannot find flavor with this flavor ID/Name")
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def get_flavor_id_from_data(self, flavor_dict):
+         """Obtain flavor id that match the flavor description
+         Params:
+             'flavor_dict': dictionary that contains:
+                 'disk': main hard disk in GB
+                 'ram': memory in MB
+                 'vcpus': number of virtual cpus
+                 #todo: complete parameters for EPA
+         Returns the flavor_id or raises a vimconnNotFoundException
+         """
+         self.logger.debug("Getting flavor id from data")
+         try:
+             flavor = None
+             for key, values in self.flavor_info.items():
+                 if (values["ram"], values["cpus"], values["disk"]) == (
+                 flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]):
+                     flavor = (key, values)
+                     break
+                 elif (values["ram"], values["cpus"], values["disk"]) >= (
+                 flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"]):
+                     if not flavor:
+                         flavor = (key, values)
+                     else:
+                         if (flavor[1]["ram"], flavor[1]["cpus"], flavor[1]["disk"]) >= (
+                         values["ram"], values["cpus"], values["disk"]):
+                             flavor = (key, values)
+             if flavor:
+                 return flavor[0]
+             raise vimconn.vimconnNotFoundException("Cannot find flavor with this flavor ID/Name")
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def new_image(self, image_dict):
+         """ Adds a tenant image to VIM
+         Params: image_dict
+                     name (string) - The name of the AMI. Valid only for EBS-based images.
+                     description (string) - The description of the AMI.
+                     image_location (string) - Full path to your AMI manifest in Amazon S3 storage. Only used for S3-based AMI’s.
+                     architecture (string) - The architecture of the AMI. Valid choices are: * i386 * x86_64
+                     kernel_id (string) -  The ID of the kernel with which to launch the instances
+                     root_device_name (string) - The root device name (e.g. /dev/sdh)
+                     block_device_map (boto.ec2.blockdevicemapping.BlockDeviceMapping) - A BlockDeviceMapping data structure describing the EBS volumes associated with the Image.
+                     virtualization_type (string) - The virutalization_type of the image. Valid choices are: * paravirtual * hvm
+                     sriov_net_support (string) - Advanced networking support. Valid choices are: * simple
+                     snapshot_id (string) - A snapshot ID for the snapshot to be used as root device for the image. Mutually exclusive with block_device_map, requires root_device_name
+                     delete_root_volume_on_termination (bool) - Whether to delete the root volume of the image after instance termination. Only applies when creating image from snapshot_id. Defaults to False. Note that leaving volumes behind after instance termination is not free
+         Returns: image_id - image ID of the newly created image
+         """
+         try:
+             self._reload_connection()
+             image_location = image_dict.get('image_location', None)
+             if image_location:
+                 image_location = str(self.account_id) + str(image_location)
+             image_id = self.conn.register_image(image_dict.get('name', None), image_dict.get('description', None),
+                                                 image_location, image_dict.get('architecture', None),
+                                                 image_dict.get('kernel_id', None),
+                                                 image_dict.get('root_device_name', None),
+                                                 image_dict.get('block_device_map', None),
+                                                 image_dict.get('virtualization_type', None),
+                                                 image_dict.get('sriov_net_support', None),
+                                                 image_dict.get('snapshot_id', None),
+                                                 image_dict.get('delete_root_volume_on_termination', None))
+             return image_id
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def delete_image(self, image_id):
+         """Deletes a tenant image from VIM
+         Returns the image_id if image is deleted or raises an exception on error"""
+         try:
+             self._reload_connection()
+             self.conn.deregister_image(image_id)
+             return image_id
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def get_image_id_from_path(self, path):
+         '''
+         Params: path - location of the image
+         Returns: image_id - ID of the matching image
+         '''
+         self._reload_connection()
+         try:
+             filters = {}
+             if path:
+                 tokens = path.split('/')
+                 filters['owner_id'] = tokens[0]
+                 filters['name'] = '/'.join(tokens[1:])
+             image = self.conn.get_all_images(filters=filters)[0]
+             return image.id
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def get_image_list(self, filter_dict={}):
+         """Obtain tenant images from VIM
+         Filter_dict can be:
+             name: image name
+             id: image uuid
+             checksum: image checksum
+             location: image path
+         Returns the image list of dictionaries:
+             [{<the fields at Filter_dict plus some VIM specific>}, ...]
+             List can be empty
+         """
+         self.logger.debug("Getting image list from VIM")
+         try:
+             self._reload_connection()
+             image_id = None
+             filters = {}
+             if 'id' in filter_dict:
+                 image_id = filter_dict['id']
+             if 'name' in filter_dict:
+                 filters['name'] = filter_dict['name']
+             if 'location' in filter_dict:
+                 filters['location'] = filter_dict['location']
+             # filters['image_type'] = 'machine'
+             # filter_dict['owner_id'] = self.account_id
+             images = self.conn.get_all_images(image_id, filters=filters)
+             image_list = []
+             for image in images:
+                 image_list.append({'id': str(image.id), 'name': str(image.name), 'status': str(image.state),
+                                    'owner': str(image.owner_id), 'location': str(image.location),
+                                    'is_public': str(image.is_public), 'architecture': str(image.architecture),
+                                    'platform': str(image.platform)})
+             return image_list
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None,
+                        disk_list=None, availability_zone_index=None, availability_zone_list=None):
+         """Create a new VM/instance in AWS
+         Params: name
+                 decription
+                 start: (boolean) indicates if VM must start or created in pause mode.
+                 image_id - image ID in AWS
+                 flavor_id - instance type ID in AWS
+                 net_list
+                     name
+                     net_id - subnet_id from AWS
+                     vpci - (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                     model: (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                     mac_address: (optional) mac address to assign to this interface
+                     type: (mandatory) can be one of:
+                         virtual, in this case always connected to a network of type 'net_type=bridge'
+                         'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                            can created unconnected
+                         'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                         VFnotShared - (SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                             are allocated on the same physical NIC
+                     bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                     port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing or True, it must apply the default VIM behaviour
+                     vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this interface. 'net_list' is modified
+                     elastic_ip - True/False to define if an elastic_ip is required
+                 cloud_config': (optional) dictionary with:
+                     key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                     users': (optional) list of users to be inserted, each item is a dict with:
+                         name': (mandatory) user name,
+                         key-pairs': (optional) list of strings with the public key to be inserted to the user
+                     user-data': (optional) string is a text script to be passed directly to cloud-init
+                     config-files': (optional). List of files to be transferred. Each item is a dict with:
+                         dest': (mandatory) string with the destination absolute path
+                         encoding': (optional, by default text). Can be one of:
+                             b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                         content' (mandatory): string with the content of the file
+                         permissions': (optional) string with file permissions, typically octal notation '0644'
+                         owner: (optional) file owner, string with the format 'owner:group'
+                     boot-data-drive: boolean to indicate if user-data must be passed using a boot drive (hard disk)
+                     security-groups:
+                         subnet_id
+                         security_group_id
+                 disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                     image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                     size': (mandatory) string with the size of the disk in GB
+         Returns a tuple with the instance identifier and created_items or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
+         self.logger.debug("Creating a new VM instance")
+         try:
+             self._reload_connection()
+             instance = None
+             _, userdata = self._create_user_data(cloud_config)
+             if not net_list:
+                 reservation = self.conn.run_instances(
+                     image_id,
+                     key_name=self.key_pair,
+                     instance_type=flavor_id,
+                     security_groups=self.security_groups,
+                     user_data=userdata
+                 )
+             else:
+                 for index, subnet in enumerate(net_list):
+                     net_intr = boto.ec2.networkinterface.NetworkInterfaceSpecification(subnet_id=subnet.get('net_id'),
+                                                                                        groups=None,
+                                                                                        associate_public_ip_address=True)
+                     if subnet.get('elastic_ip'):
+                         eip = self.conn.allocate_address()
+                         self.conn.associate_address(allocation_id=eip.allocation_id, network_interface_id=net_intr.id)
+                     if index == 0:
+                         reservation = self.conn.run_instances(
+                             image_id,
+                             key_name=self.key_pair,
+                             instance_type=flavor_id,
+                             security_groups=self.security_groups,
+                             network_interfaces=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr),
+                             user_data=userdata
+                         )
+                     else:
+                         while True:
+                             try:
+                                 self.conn.attach_network_interface(
+                                     network_interface_id=boto.ec2.networkinterface.NetworkInterfaceCollection(net_intr),
+                                     instance_id=instance.id, device_index=0)
+                                 break
+                             except:
+                                 time.sleep(10)
+                     net_list[index]['vim_id'] = reservation.instances[0].interfaces[index].id
+             instance = reservation.instances[0]
+             return instance.id, None
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def get_vminstance(self, vm_id):
+         """Returns the VM instance information from VIM"""
+         try:
+             self._reload_connection()
+             reservation = self.conn.get_all_instances(vm_id)
+             return reservation[0].instances[0].__dict__
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def delete_vminstance(self, vm_id, created_items=None):
+         """Removes a VM instance from VIM
+         Returns the instance identifier"""
+         try:
+             self._reload_connection()
+             self.logger.debug("DELETING VM_ID: " + str(vm_id))
+             self.conn.terminate_instances(vm_id)
+             return vm_id
+         except Exception as e:
+             self.format_vimconn_exception(e)
+     def refresh_vms_status(self, vm_list):
+         """ Get the status of the virtual machines and their interfaces/ports
+         Params: the list of VM identifiers
+         Returns a dictionary with:
+             vm_id:          #VIM id of this Virtual Machine
+                 status:     #Mandatory. Text with one of:
+                             #  DELETED (not found at vim)
+                             #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                             #  OTHER (Vim reported other status not understood)
+                             #  ERROR (VIM indicates an ERROR status)
+                             #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                             #  BUILD (on building process), ERROR
+                             #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                             #
+                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                 interfaces: list with interface info. Each item a dictionary with:
+                     vim_interface_id -  The ID of the ENI.
+                     vim_net_id - The ID of the VPC subnet.
+                     mac_address - The MAC address of the interface.
+                     ip_address - The IP address of the interface within the subnet.
+         """
+         self.logger.debug("Getting VM instance information from VIM")
+         try:
+             self._reload_connection()
+             reservation = self.conn.get_all_instances(vm_list)[0]
+             instances = {}
+             instance_dict = {}
+             for instance in reservation.instances:
+                 try:
+                     if instance.state in ("pending"):
+                         instance_dict['status'] = "BUILD"
+                     elif instance.state in ("available", "running", "up"):
+                         instance_dict['status'] = 'ACTIVE'
+                     else:
+                         instance_dict['status'] = 'ERROR'
+                     instance_dict['error_msg'] = ""
+                     instance_dict['interfaces'] = []
+                     interface_dict = {}
+                     for interface in instance.interfaces:
+                         interface_dict['vim_interface_id'] = interface.id
+                         interface_dict['vim_net_id'] = interface.subnet_id
+                         interface_dict['mac_address'] = interface.mac_address
+                         if hasattr(interface, 'publicIp') and interface.publicIp != None:
+                             interface_dict['ip_address'] = interface.publicIp + ";" + interface.private_ip_address
+                         else:
+                             interface_dict['ip_address'] = interface.private_ip_address
+                         instance_dict['interfaces'].append(interface_dict)
+                 except Exception as e:
+                     self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
+                     instance_dict['status'] = "DELETED"
+                     instance_dict['error_msg'] = str(e)
+                 finally:
+                     try:
+                         instance_dict['vim_info'] = yaml.safe_dump(instance, default_flow_style=True, width=256)
+                     except yaml.YAMLError as e:
+                         # self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
+                         instance_dict['vim_info'] = str(instance)
+                 instances[instance.id] = instance_dict
+             return instances
+         except Exception as e:
+             self.logger.error("Exception getting vm status: %s", str(e), exc_info=True)
+             self.format_vimconn_exception(e)
+     def action_vminstance(self, vm_id, action_dict, created_items={}):
+         """Send and action over a VM instance from VIM
+         Returns the vm_id if the action was successfully sent to the VIM"""
+         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+         try:
+             self._reload_connection()
+             if "start" in action_dict:
+                 self.conn.start_instances(vm_id)
+             elif "stop" in action_dict or "stop" in action_dict:
+                 self.conn.stop_instances(vm_id)
+             elif "terminate" in action_dict:
+                 self.conn.terminate_instances(vm_id)
+             elif "reboot" in action_dict:
+                 self.conn.reboot_instances(vm_id)
+             return None
+         except Exception as e:
+             self.format_vimconn_exception(e)
index 0000000,22cc86c..3cbc851
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,20 +1,20 @@@
 -git+https://osm.etsi.org/gerrit/osm/RO.git@py3#egg=osm-ro&subdirectory=RO
+ ##
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+ #
+ #    http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ # implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ ##
+ PyYAML
+ requests
+ netaddr
+ boto
++git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
index 0000000,0cc143f..7f2b2ea
mode 000000,100755..100755
--- /dev/null
@@@ -1,0 -1,495 +1,1304 @@@
 -__author__='Sergio Gonzalez'
 -__date__ ='$18-apr-2019 23:59:59$'
 -
 -from osm_ro import vimconn
+ # -*- coding: utf-8 -*-
++##
++# Licensed under the Apache License, Version 2.0 (the "License"); you may
++# not use this file except in compliance with the License. You may obtain
++# a copy of the License at
++#
++#         http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++# License for the specific language governing permissions and limitations
++# under the License.
++##
 -from uuid import uuid4
 -
++import base64
++import vimconn
+ import logging
++import netaddr
++import re
+ from os import getenv
 -        # CREDENTIALS 
 -        self.credentials = ServicePrincipalCredentials(
 -            client_id=user,
 -            secret=passwd,
 -            tenant=(tenant_id or tenant_name)
 -        )
+ from azure.common.credentials import ServicePrincipalCredentials
+ from azure.mgmt.resource import ResourceManagementClient
+ from azure.mgmt.network import NetworkManagementClient
+ from azure.mgmt.compute import ComputeManagementClient
++from azure.mgmt.compute.models import DiskCreateOption
++from msrestazure.azure_exceptions import CloudError
++from msrest.exceptions import AuthenticationError
++from requests.exceptions import ConnectionError
++
++__author__ = 'Isabel Lloret, Sergio Gonzalez, Alfonso Tierno'
++__date__ = '$18-apr-2019 23:59:59$'
++
++
++if getenv('OSMRO_PDB_DEBUG'):
++    import sys
++    print(sys.path)
++    import pdb
++    pdb.set_trace()
+ class vimconnector(vimconn.vimconnector):
++    # Translate azure provisioning state to OSM provision state
++    # The first three ones are the transitional status once a user initiated action has been requested
++    # Once the operation is complete, it will transition into the states Succeeded or Failed
++    # https://docs.microsoft.com/en-us/azure/virtual-machines/windows/states-lifecycle
++    provision_state2osm = {
++        "Creating": "BUILD",
++        "Updating": "BUILD",
++        "Deleting": "INACTIVE",
++        "Succeeded": "ACTIVE",
++        "Failed": "ERROR"
++    }
++
++    # Translate azure power state to OSM provision state
++    power_state2osm = {
++        "starting": "INACTIVE",
++        "running": "ACTIVE",
++        "stopping": "INACTIVE",
++        "stopped": "INACTIVE",
++        "unknown": "OTHER",
++        "deallocated": "BUILD",
++        "deallocating": "BUILD"
++    }
++
+     def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
+                  config={}, persistent_info={}):
++        """
++        Constructor of VIM. Raise an exception is some needed parameter is missing, but it must not do any connectivity
++        checking against the VIM
++        Using common constructor parameters.
++        In this case: config must include the following parameters:
++        subscription_id: assigned azure subscription identifier
++        region_name: current region for azure network
++        resource_group: used for all azure created resources
++        vnet_name: base vnet for azure, created networks will be subnets from this base network
++        config may also include the following parameter:
++        flavors_pattern: pattern that will be used to select a range of vm sizes, for example
++            "^((?!Standard_B).)*$" will filter out Standard_B range that is cheap but is very overused
++            "^Standard_B" will select a serie B maybe for test environment
++        """
+         vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+                                       config, persistent_info)
++        # Variable that indicates if client must be reloaded or initialized
++        self.reload_client = True
++
++        self.vnet_address_space = None
+         # LOGGER
+         self.logger = logging.getLogger('openmano.vim.azure')
+         if log_level:
+             logging.basicConfig()
+             self.logger.setLevel(getattr(logging, log_level))
 -            self.subscription_id = config.get('subscription_id')
 -            self.logger.debug('Setting subscription '+str(self.subscription_id))
++        self.tenant = (tenant_id or tenant_name)
++
++        # Store config to create azure subscription later
++        self._config = {
++            "user": user,
++            "passwd": passwd,
++            "tenant": tenant_id or tenant_name
++        }
+         # SUBSCRIPTION
+         if 'subscription_id' in config:
 -        Sets connections to work with Azure service APIs
 -        :return:
++            self._config["subscription_id"] = config.get('subscription_id')
++            # self.logger.debug('Setting subscription to: %s', self.config["subscription_id"])
+         else:
+             raise vimconn.vimconnException('Subscription not specified')
++
+         # REGION
+         if 'region_name' in config:
+             self.region = config.get('region_name')
+         else:
+             raise vimconn.vimconnException('Azure region_name is not specified at config')
++
+         # RESOURCE_GROUP
+         if 'resource_group' in config:
+             self.resource_group = config.get('resource_group')
+         else:
+             raise vimconn.vimconnException('Azure resource_group is not specified at config')
++
+         # VNET_NAME
+         if 'vnet_name' in config:
+             self.vnet_name = config["vnet_name"]
+             
+         # public ssh key
+         self.pub_key = config.get('pub_key')
++
++        # flavor pattern regex
++        if 'flavors_pattern' in config:
++            self._config['flavors_pattern'] = config['flavors_pattern']
+             
+     def _reload_connection(self):
+         """
 -        self.logger.debug('Reloading API Connection')
 -        try:
 -            self.conn = ResourceManagementClient(self.credentials, self.subscription_id)
 -            self.conn_compute = ComputeManagementClient(self.credentials, self.subscription_id)
 -            self.conn_vnet = NetworkManagementClient(self.credentials, self.subscription_id)
 -            self._check_or_create_resource_group()
 -            self._check_or_create_vnet()
 -        except Exception as e:
 -            self.format_vimconn_exception(e)            
++        Called before any operation, checks python azure clients
+         """
 -        return str(resource_id.split('/')[-1])
++        if self.reload_client:
++            self.logger.debug('reloading azure client')
++            try:
++                self.credentials = ServicePrincipalCredentials(
++                    client_id=self._config["user"],
++                    secret=self._config["passwd"],
++                    tenant=self._config["tenant"]
++                )
++                self.conn = ResourceManagementClient(self.credentials, self._config["subscription_id"])
++                self.conn_compute = ComputeManagementClient(self.credentials, self._config["subscription_id"])
++                self.conn_vnet = NetworkManagementClient(self.credentials, self._config["subscription_id"])
++                self._check_or_create_resource_group()
++                self._check_or_create_vnet()
++
++                # Set to client created
++                self.reload_client = False
++            except Exception as e:
++                self._format_vimconn_exception(e)
+     def _get_resource_name_from_resource_id(self, resource_id):
 -        return self.conn.resource_groups.get(resource_group_name).location
 -        
++        """
++        Obtains resource_name from the azure complete identifier: resource_name will always be last item
++        """
++        try:
++            resource = str(resource_id.split('/')[-1])
++            return resource
++        except Exception as e:
++            raise vimconn.vimconnException("Unable to get resource name from resource_id '{}' Error: '{}'".
++                                           format(resource_id, e))
+     def _get_location_from_resource_group(self, resource_group_name):
 -        return str(resource_id.split('/')[4])
++        try:
++            location = self.conn.resource_groups.get(resource_group_name).location
++            return location
++        except Exception as e:
++            raise vimconn.vimconnNotFoundException("Location '{}' not found".format(resource_group_name))
++
+     def _get_resource_group_name_from_resource_id(self, resource_id):
 -        if len(set(self._get_resource_group_name_from_resource_id(net['id']) +
 -                   self._get_resource_name_from_resource_id(net['id']) for net in net_list)) != 1:
 -            raise self.format_vimconn_exception('Azure VMs can only attach to subnets in same VNET')
++
++        try:
++            rg = str(resource_id.split('/')[4])
++            return rg
++        except Exception as e:
++            raise vimconn.vimconnException("Unable to get resource group from invalid resource_id format '{}'".
++                                           format(resource_id))
++
++    def _get_net_name_from_resource_id(self, resource_id):
++
++        try:
++            net_name = str(resource_id.split('/')[8])
++            return net_name
++        except Exception as e:
++            raise vimconn.vimconnException("Unable to get azure net_name from invalid resource_id format '{}'".
++                                           format(resource_id))
+     def _check_subnets_for_vm(self, net_list):
+         # All subnets must belong to the same resource group and vnet
 -    def format_vimconn_exception(self, e):
++        rg_vnet = set(self._get_resource_group_name_from_resource_id(net['net_id']) +
++                      self._get_net_name_from_resource_id(net['net_id']) for net in net_list)
++
++        if len(rg_vnet) != 1:
++            raise self._format_vimconn_exception('Azure VMs can only attach to subnets in same VNET')
 -        Params: an Exception object
 -        :param e:
 -        :return: Raises the proper vimconnException
++    def _format_vimconn_exception(self, e):
+         """
 -        self.conn = None
 -        self.conn_vnet = None
 -        raise vimconn.vimconnConnectionException(type(e).__name__ + ': ' + str(e))        
++        Transforms a generic or azure exception to a vimcommException
+         """
 -        Creates a resource group in indicated region
 -        :return: None
++        if isinstance(e, vimconn.vimconnException):
++            raise
++        elif isinstance(e, AuthenticationError):
++            raise vimconn.vimconnAuthException(type(e).__name__ + ': ' + str(e))
++        elif isinstance(e, ConnectionError):
++            raise vimconn.vimconnConnectionException(type(e).__name__ + ': ' + str(e))
++        else:
++            # In case of generic error recreate client
++            self.reload_client = True
++            raise vimconn.vimconnException(type(e).__name__ + ': ' + str(e))
+     def _check_or_create_resource_group(self):
+         """
 -        self.logger.debug('Creating RG {} in location {}'.format(self.resource_group, self.region))
 -        self.conn.resource_groups.create_or_update(self.resource_group, {'location': self.region})
++        Creates the base resource group if it does not exist
+         """
 -                    'address_prefixes': "10.0.0.0/8"
++        try:
++            rg_exists = self.conn.resource_groups.check_existence(self.resource_group)
++            if not rg_exists:
++                self.logger.debug("create base rgroup: %s", self.resource_group)
++                self.conn.resource_groups.create_or_update(self.resource_group, {'location': self.region})
++        except Exception as e:
++            self._format_vimconn_exception(e)
+     def _check_or_create_vnet(self):
++        """
++        Try to get existent base vnet, in case it does not exist it creates it
++        """
++        try:
++            vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name)
++            self.vnet_address_space = vnet.address_space.address_prefixes[0]
++            self.vnet_id = vnet.id
++            return
++        except CloudError as e:
++            if e.error.error and "notfound" in e.error.error.lower():
++                pass
++                # continue and create it
++            else:
++                self._format_vimconn_exception(e)
++
++        # if it does not exist, create it
+         try:
+             vnet_params = {
+                 'location': self.region,
+                 'address_space': {
 -            self.format_vimconn_exception(e)
++                    'address_prefixes': ["10.0.0.0/8"]
+                 },
+             }
++            self.vnet_address_space = "10.0.0.0/8"
++
++            self.logger.debug("create base vnet: %s", self.vnet_name)
+             self.conn_vnet.virtual_networks.create_or_update(self.resource_group, self.vnet_name, vnet_params)
++            vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name)
++            self.vnet_id = vnet.id
+         except Exception as e:
 -    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
++            self._format_vimconn_exception(e)
 -        :param net_type:
++    def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+         """
+         Adds a tenant network to VIM
+         :param net_name: name of the network
 -                'gateway-address': (Optional) ip_schema, that is X.X.X.X
 -                'dns-address': (Optional) ip_schema,
 -                'dhcp': (Optional) dict containing
++        :param net_type: not used for azure networks
+         :param ip_profile: is a dict containing the IP parameters of the network (Currently only IPv4 is implemented)
+                 'ip-version': can be one of ['IPv4','IPv6']
+                 'subnet-address': ip_prefix_schema, that is X.X.X.X/Y
 -        :param shared:
 -        :param vlan:
++                'gateway-address': (Optional) ip_schema, that is X.X.X.X, not implemented for azure connector
++                'dns-address': (Optional) ip_schema, not implemented for azure connector
++                'dhcp': (Optional) dict containing, not implemented for azure connector
+                     'enabled': {'type': 'boolean'},
+                     'start-address': ip_schema, first IP to grant
+                     'count': number of IPs to grant.
 -
++        :param shared: Not allowed for Azure Connector
++        :param provider_network_profile: (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
+         :return: a tuple with the network identifier and created_items, or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
 -        Adds a tenant network to VIM. It creates a new VNET with a single subnet
 -        :param net_name:
+         return self._new_subnet(net_name, ip_profile)
+     def _new_subnet(self, net_name, ip_profile):
+         """
 -        :return:
++        Adds a tenant network to VIM. It creates a new subnet at existing base vnet
++        :param net_name: subnet name
+         :param ip_profile:
 -        self.logger.debug('Adding a subnet to VNET '+self.vnet_name)
++                subnet-address: if it is not provided a subnet/24 in the default vnet is created,
++                otherwise it creates a subnet in the indicated address
++        :return: a tuple with the network identifier and created_items, or raises an exception on error
+         """
 -            # TODO get a non used vnet ip range /24 and allocate automatically
 -            raise vimconn.vimconnException('Azure cannot create VNET with no CIDR')
++        self.logger.debug('create subnet name %s, ip_profile %s', net_name, ip_profile)
+         self._reload_connection()
+         if ip_profile is None:
 -            vnet_params= {
 -                'location': self.region,
 -                'address_space': {
 -                    'address_prefixes': [ip_profile['subnet_address']]
 -                },
 -                'subnets': [
 -                    {
 -                        'name': "{}-{}".format(net_name[:24], uuid4()),
 -                        'address_prefix': ip_profile['subnet_address']
 -                    }
 -                ]
++            # get a non used vnet ip range /24 and allocate automatically inside the range self.vnet_address_space
++            used_subnets = self.get_network_list()
++            for ip_range in netaddr.IPNetwork(self.vnet_address_space).subnet(24):
++                for used_subnet in used_subnets:
++                    subnet_range = netaddr.IPNetwork(used_subnet["cidr_block"])
++                    if subnet_range in ip_range or ip_range in subnet_range:
++                        # this range overlaps with an existing subnet ip range. Breaks and look for another
++                        break
++                else:
++                    ip_profile = {"subnet_address": str(ip_range)}
++                    self.logger.debug('dinamically obtained ip_profile: %s', ip_range)
++                    break
++            else:
++                raise vimconn.vimconnException("Cannot find a non-used subnet range in {}".
++                                               format(self.vnet_address_space))
++        else:
++            ip_profile = {"subnet_address": ip_profile['subnet_address']}
+         try:
 -            self.conn_vnet.virtual_networks.create_or_update(self.resource_group, self.vnet_name, vnet_params)
 -            # TODO return a tuple (subnet-ID, None)
++            # subnet_name = "{}-{}".format(net_name[:24], uuid4())
++            subnet_params = {
++                'address_prefix': ip_profile['subnet_address']
+             }
 -            self.format_vimconn_exception(e)
++            # Assign a not duplicated net name
++            subnet_name = self._get_unused_subnet_name(net_name)
++
++            self.logger.debug('creating subnet_name: {}'.format(subnet_name))
++            async_creation = self.conn_vnet.subnets.create_or_update(self.resource_group, self.vnet_name,
++                                                                     subnet_name, subnet_params)
++            async_creation.wait()
++            self.logger.debug('created subnet_name: {}'.format(subnet_name))
++
++            return "{}/subnets/{}".format(self.vnet_id, subnet_name), None
+         except Exception as e:
 -    def _create_nic(self, subnet_id, nic_name, static_ip=None):
++            self._format_vimconn_exception(e)
++
++    def _get_unused_subnet_name(self, subnet_name):
++        """
++        Adds a prefix to the subnet_name with a number in case the indicated name is repeated
++        Checks subnets with the indicated name (without suffix) and adds a suffix with a number
++        """
++        all_subnets = self.conn_vnet.subnets.list(self.resource_group, self.vnet_name)
++        # Filter to subnets starting with the indicated name
++        subnets = list(filter(lambda subnet: (subnet.name.startswith(subnet_name)), all_subnets))
++        net_names = [str(subnet.name) for subnet in subnets]
 -        
 -        resource_group_name=self._get_resource_group_name_from_resource_id(subnet_id)
 -        location = self._get_location_from_resource_group(resource_group_name)
 -            
 -        if static_ip:
 -            async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(
 -                resource_group_name,
 -                nic_name,
 -                {
 -                    'location': location,
 -                    'ip_configurations': [{
 -                        'name': nic_name + 'ipconfiguration',
 -                        'privateIPAddress': static_ip,
 -                        'privateIPAllocationMethod': 'Static',
 -                        'subnet': {
 -                            'id': subnet_id
 -                        }
 -                    }]
 -                }
 -            )
 -        else:
 -            async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(
 -                resource_group_name,
 -                nic_name,
 -                {
++        # get the name with the first not used suffix
++        name_suffix = 0
++        # name = subnet_name + "-" + str(name_suffix)
++        name = subnet_name  # first subnet created will have no prefix
++        while name in net_names:
++            name_suffix += 1
++            name = subnet_name + "-" + str(name_suffix)
++        return name
++
++    def _create_nic(self, net, nic_name, static_ip=None):
++
++        self.logger.debug('create nic name %s, net_name %s', nic_name, net)
+         self._reload_connection()
 -                    'ip_configurations': [{
 -                        'name': nic_name + 'ipconfiguration',
 -                        'subnet': {
 -                            'id': subnet_id
 -                        }
 -                    }]
++
++        subnet_id = net['net_id']
++        location = self._get_location_from_resource_group(self.resource_group)
++        try:
++            net_ifz = {'location': location}
++            net_ip_config = {'name': nic_name + '-ipconfiguration', 'subnet': {'id': subnet_id}}
++            if static_ip:
++                net_ip_config['privateIPAddress'] = static_ip
++                net_ip_config['privateIPAllocationMethod'] = 'Static'
++            net_ifz['ip_configurations'] = [net_ip_config]
++            mac_address = net.get('mac_address')
++            if mac_address:
++                net_ifz['mac_address'] = mac_address
++
++            async_nic_creation = self.conn_vnet.network_interfaces.create_or_update(self.resource_group, nic_name,
++                                                                                    net_ifz)
++            async_nic_creation.wait()
++            self.logger.debug('created nic name %s', nic_name)
++
++            public_ip = net.get('floating_ip')
++            if public_ip:
++                public_ip_address_params = {
+                     'location': location,
 -            )
++                    'public_ip_allocation_method': 'Dynamic'
+                 }
 -    def get_image_list(self, filter_dict={}):
++                public_ip_name = nic_name + '-public-ip'
++                public_ip = self.conn_vnet.public_ip_addresses.create_or_update(
++                    self.resource_group,
++                    public_ip_name,
++                    public_ip_address_params
++                )
++                self.logger.debug('created public IP: {}'.format(public_ip.result()))
++
++                # Associate NIC to Public IP
++                nic_data = self.conn_vnet.network_interfaces.get(
++                    self.resource_group,
++                    nic_name)
++
++                nic_data.ip_configurations[0].public_ip_address = public_ip.result()
++
++                self.conn_vnet.network_interfaces.create_or_update(
++                    self.resource_group,
++                    nic_name,
++                    nic_data)
++
++        except Exception as e:
++            self._format_vimconn_exception(e)
+         return async_nic_creation.result()
 -        The urn contains for marketplace  'publisher:offer:sku:version'
++    def new_flavor(self, flavor_data):
+         """
 -        :param filter_dict:
 -        :return:
++        It is not allowed to create new flavors in Azure, must always use an existing one
++        """
++        raise vimconn.vimconnAuthException("It is not possible to create new flavors in AZURE")
 -        image_list = []
++    def new_tenant(self, tenant_name, tenant_description):
+         """
 -        if filter_dict.get("name"):
 -            params = filter_dict["name"].split(":")
 -            if len(params) >= 3:
++        It is not allowed to create new tenants in azure
++        """
++        raise vimconn.vimconnAuthException("It is not possible to create a TENANT in AZURE")
++
++    def new_image(self, image_dict):
++        """
++        It is not allowed to create new images in Azure, must always use an existing one
++        """
++        raise vimconn.vimconnAuthException("It is not possible to create new images in AZURE")
++
++    def get_image_id_from_path(self, path):
++        """Get the image id from image path in the VIM database.
++           Returns the image_id or raises a vimconnNotFoundException
++        """
++        raise vimconn.vimconnAuthException("It is not possible to obtain image from path in AZURE")
++
++    def get_image_list(self, filter_dict={}):
++        """Obtain tenant images from VIM
++        Filter_dict can be:
++            name: image name with the format: publisher:offer:sku:version
++            If some part of the name is provide ex: publisher:offer it will search all availables skus and version
++            for the provided publisher and offer
++            id: image uuid, currently not supported for azure
++        Returns the image list of dictionaries:
++            [{<the fields at Filter_dict plus some VIM specific>}, ...]
++            List can be empty
++        """
++
++        self.logger.debug("get_image_list filter {}".format(filter_dict))
+         self._reload_connection()
 -                offer = params[1]
 -                sku = params[2]
 -                version = None
 -                if len(params) == 4:
 -                    version = params[3]
 -                images = self.conn_compute.virtual_machine_images.list(self.region, publisher, offer, sku)
 -                for image in images:
 -                    if version:
 -                        image_version = str(image.id).split("/")[-1]
 -                        if image_version != version:
 -                            continue
 -                    image_list.append({
 -                        'id': str(image.id),
 -                        'name': self._get_resource_name_from_resource_id(image.id)
 -                    })
 -                return image_list
 -
 -        images = self.conn_compute.virtual_machine_images.list()
 -
 -        for image in images:
 -            # TODO implement filter_dict
 -            if filter_dict:
 -                if filter_dict.get("id") and str(image.id) != filter_dict["id"]:
 -                    continue
 -                if filter_dict.get("name") and \
 -                        self._get_resource_name_from_resource_id(image.id) != filter_dict["name"]:
 -                    continue
 -                # TODO add checksum
 -            image_list.append({
 -                'id': str(image.id),
 -                'name': self._get_resource_name_from_resource_id(image.id),
 -            })
++        try:
++            image_list = []
++            if filter_dict.get("name"):
++                # name will have the format 'publisher:offer:sku:version'
++                # publisher is required, offer sku and version will be searched if not provided
++                params = filter_dict["name"].split(":")
+                 publisher = params[0]
 -            id: network uuid
 -            shared: boolean
 -            tenant_id: tenant
 -            admin_state_up: boolean
 -            status: 'ACTIVE'
++                if publisher:
++                    # obtain offer list
++                    offer_list = self._get_offer_list(params, publisher)
++                    for offer in offer_list:
++                        # obtain skus
++                        sku_list = self._get_sku_list(params, publisher, offer)
++                        for sku in sku_list:
++                            # if version is defined get directly version, else list images
++                            if len(params) == 4 and params[3]:
++                                version = params[3]
++                                image_list = self._get_version_image_list(publisher, offer, sku, version)
++                            else:
++                                image_list = self._get_sku_image_list(publisher, offer, sku)
++                else:
++                    raise vimconn.vimconnAuthException(
++                        "List images in Azure must include name param with at least publisher")
++            else:
++                raise vimconn.vimconnAuthException("List images in Azure must include name param with at"
++                                                   " least publisher")
++
++            return image_list
++        except Exception as e:
++            self._format_vimconn_exception(e)
++
++    def _get_offer_list(self, params, publisher):
++        """
++        Helper method to obtain offer list for defined publisher
++        """
++        if len(params) >= 2 and params[1]:
++            return [params[1]]
++        else:
++            try:
++                # get list of offers from azure
++                result_offers = self.conn_compute.virtual_machine_images.list_offers(self.region, publisher)
++                return [offer.name for offer in result_offers]
++            except CloudError as e:
++                # azure raises CloudError when not found
++                self.logger.info("error listing offers for publisher {}, Error: {}".format(publisher, e))
++                return []
++
++    def _get_sku_list(self, params, publisher, offer):
++        """
++        Helper method to obtain sku list for defined publisher and offer
++        """
++        if len(params) >= 3 and params[2]:
++            return [params[2]]
++        else:
++            try:
++                # get list of skus from azure
++                result_skus = self.conn_compute.virtual_machine_images.list_skus(self.region, publisher, offer)
++                return [sku.name for sku in result_skus]
++            except CloudError as e:
++                # azure raises CloudError when not found
++                self.logger.info("error listing skus for publisher {}, offer {}, Error: {}".format(publisher, offer, e))
++                return []
++
++    def _get_sku_image_list(self, publisher, offer, sku):
++        """
++        Helper method to obtain image list for publisher, offer and sku
++        """
++        image_list = []
++        try:
++            result_images = self.conn_compute.virtual_machine_images.list(self.region, publisher, offer, sku)
++            for result_image in result_images:
++                image_list.append({
++                    'id': str(result_image.id),
++                    'name': ":".join([publisher, offer, sku, result_image.name])
++                })
++        except CloudError as e:
++            self.logger.info(
++                "error listing skus for publisher {}, offer {}, Error: {}".format(publisher, offer, e))
++            image_list = []
++        return image_list
++
++    def _get_version_image_list(self, publisher, offer, sku, version):
++        image_list = []
++        try:
++            result_image = self.conn_compute.virtual_machine_images.get(self.region, publisher, offer, sku, version)
++            if result_image:
++                image_list.append({
++                    'id': str(result_image.id),
++                    'name': ":".join([publisher, offer, sku, version])
++                })
++        except CloudError as e:
++            # azure gives CloudError when not found
++            self.logger.info("error listing images for publisher {}, offer {}, sku {}, version {} Error: {}".
++                             format(publisher, offer, sku, version, e))
++            image_list = []
+         return image_list
+     def get_network_list(self, filter_dict={}):
+         """Obtain tenant networks of VIM
+         Filter_dict can be:
+             name: network name
 -        self.logger.debug('Getting all subnets from VIM')
++            id: network id
++            shared: boolean, not implemented in Azure
++            tenant_id: tenant, not used in Azure, all networks same tenants
++            admin_state_up: boolean, not implemented in Azure
++            status: 'ACTIVE', not implemented in Azure #
+         Returns the network list of dictionaries
+         """
 -            vnet = self.conn_vnet.virtual_networks.get(self.config["resource_group"], self.vnet_name)
++        # self.logger.debug('getting network list for vim, filter %s', filter_dict)
+         try:
+             self._reload_connection()
 -            
++
++            vnet = self.conn_vnet.virtual_networks.get(self.resource_group, self.vnet_name)
+             subnet_list = []
 -                # TODO implement filter_dict
++
+             for subnet in vnet.subnets:
 -                            self._get_resource_name_from_resource_id(subnet.id) != filter_dict["name"]:
+                 if filter_dict:
+                     if filter_dict.get("id") and str(subnet.id) != filter_dict["id"]:
+                         continue
+                     if filter_dict.get("name") and \
 -                     'name': self._get_resource_name_from_resource_id(subnet.id),
 -                     'status': str(vnet.provisioning_state),  # TODO Does subnet contains status???
 -                     'cidr_block': str(subnet.address_prefix)
 -                    }
 -                )
++                            str(subnet.name) != filter_dict["name"]:
+                         continue
++                name = self._get_resource_name_from_resource_id(subnet.id)
++
+                 subnet_list.append({
+                     'id': str(subnet.id),
 -            self.format_vimconn_exception(e)
++                    'name': name,
++                    'status': self.provision_state2osm[subnet.provisioning_state],
++                    'cidr_block': str(subnet.address_prefix),
++                    'type': 'bridge',
++                    'shared': False
++                })
++
+             return subnet_list
+         except Exception as e:
 -    def new_vminstance(self, vm_name, description, start, image_id, flavor_id, net_list, cloud_config=None,
++            self._format_vimconn_exception(e)
 -        return self._new_vminstance(vm_name, image_id, flavor_id, net_list)
 -        
 -    def _new_vminstance(self, vm_name, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
 -                        availability_zone_index=None, availability_zone_list=None):
 -        #Create NICs
++    def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None,
+                        disk_list=None, availability_zone_index=None, availability_zone_list=None):
 -            subnet_id=net['subnet_id']
++        self.logger.debug("new vm instance name: %s, image_id: %s, flavor_id: %s, net_list: %s, cloud_config: %s, "
++                          "disk_list: %s, availability_zone_index: %s, availability_zone_list: %s",
++                          name, image_id, flavor_id, net_list, cloud_config, disk_list,
++                          availability_zone_index, availability_zone_list)
++
++        self._reload_connection()
++
++        # Validate input data is valid
++        # The virtual machine name must have less or 64 characters and it can not have the following
++        # characters: (~ ! @ # $ % ^ & * ( ) = + _ [ ] { } \ | ; : ' " , < > / ?.)
++        vm_name = self._check_vm_name(name)
++        # Obtain vm unused name
++        vm_name = self._get_unused_vm_name(vm_name)
++
++        # At least one network must be provided
++        if not net_list:
++            raise vimconn.vimconnException("At least one net must be provided to create a new VM")
++
++        # image_id are several fields of the image_id
++        image_reference = self._get_image_reference(image_id)
++
+         self._check_subnets_for_vm(net_list)
+         vm_nics = []
+         for idx, net in enumerate(net_list):
 -            vm_nic = self._create_nic(subnet_id, nic_name)
 -            vm_nics.append({ 'id': str(vm_nic.id)})
++            # Fault with subnet_id
++            # subnet_id=net['subnet_id']
++            # subnet_id=net['net_id']
+             nic_name = vm_name + '-nic-'+str(idx)
 -                'os_profile': {
 -                    'computer_name': vm_name,  # TODO if vm_name cannot be repeated add uuid4() suffix
 -                    'admin_username': 'sergio',  # TODO is it mandatory???
 -                    'linuxConfiguration': {
 -                        'disablePasswordAuthentication': 'true',
 -                        'ssh': {
 -                          'publicKeys': [
 -                            {
 -                              'path': '/home/sergio/.ssh/authorized_keys',
 -                              'keyData': self.pub_key
 -                            }
 -                          ]
 -                        }
 -                    }                    
 -                    
 -                },
++            vm_nic = self._create_nic(net, nic_name, net.get('ip_address'))
++            vm_nics.append({'id': str(vm_nic.id)})
++            net['vim_id'] = vm_nic.id
+         try:
++
++            # cloud-init configuration
++            # cloud config
++            if cloud_config:
++                config_drive, userdata = self._create_user_data(cloud_config)
++                custom_data = base64.b64encode(userdata.encode('utf-8')).decode('latin-1')
++                key_data = None
++                key_pairs = cloud_config.get("key-pairs")
++                if key_pairs:
++                    key_data = key_pairs[0]
++
++                if cloud_config.get("users"):
++                    user_name = cloud_config.get("users")[0].get("name", "osm")
++                else:
++                    user_name = "osm"  # DEFAULT USER IS OSM
++
++                os_profile = {
++                    'computer_name': vm_name,
++                    'admin_username': user_name,
++                    'linux_configuration': {
++                        "disable_password_authentication": True,
++                        "ssh": {
++                            "public_keys": [{
++                                "path": "/home/{}/.ssh/authorized_keys".format(user_name),
++                                "key_data": key_data
++                            }]
++                        }
++                    },
++                    'custom_data': custom_data
++                }
++            else:
++                os_profile = {
++                    'computer_name': vm_name,
++                    'admin_username': 'osm',
++                    'admin_password': 'Osm4u!',
++                }
++
+             vm_parameters = {
+                 'location': self.region,
 -                    'vm_size':flavor_id
++                'os_profile': os_profile,
+                 'hardware_profile': {
 -                    'image_reference': image_id
 -                },
 -                'network_profile': {
 -                    'network_interfaces': [
 -                        vm_nics[0]
 -                    ]
++                    'vm_size': flavor_id
+                 },
+                 'storage_profile': {
 -                vm_name, 
++                    'image_reference': image_reference
+                 }
+             }
++
++            # Add data disks if they are provided
++            if disk_list:
++                data_disks = []
++                for lun_name, disk in enumerate(disk_list):
++                    self.logger.debug("add disk size: %s, image: %s", disk.get("size"), disk.get("image_id"))
++                    if not disk.get("image_id"):
++                        data_disks.append({
++                            'lun': lun_name,  # You choose the value, depending of what is available for you
++                            'name': vm_name + "_data_disk-" + str(lun_name),
++                            'create_option': DiskCreateOption.empty,
++                            'disk_size_gb': disk.get("size")
++                        })
++                    else:
++                        # self.logger.debug("currently not able to create data disks from image for azure, ignoring")
++                        data_disks.append({
++                            'lun': lun_name,  # You choose the value, depending of what is available for you
++                            'name': vm_name + "_data_disk-" + str(lun_name),
++                            'create_option': 'Attach',
++                            'disk_size_gb': disk.get("size"),
++                            'managed_disk': {
++                                'id': disk.get("image_id")
++                            }
++                        })
++
++                if data_disks:
++                    vm_parameters["storage_profile"]["data_disks"] = data_disks
++
++            # If the machine has several networks one must be marked as primary
++            # As it is not indicated in the interface the first interface will be marked as primary
++            if len(vm_nics) > 1:
++                for idx, vm_nic in enumerate(vm_nics):
++                    if idx == 0:
++                        vm_nics[0]['Primary'] = True
++                    else:
++                        vm_nics[idx]['Primary'] = False
++
++            vm_parameters['network_profile'] = {'network_interfaces': vm_nics}
++
++            self.logger.debug("create vm name: %s", vm_name)
+             creation_result = self.conn_compute.virtual_machines.create_or_update(
+                 self.resource_group, 
 -            run_command_parameters = {
 -                'command_id': 'RunShellScript', # For linux, don't change it
 -                'script': [
 -                'date > /home/sergio/test.txt'
 -                ]
++                vm_name,
+                 vm_parameters
+             )
++            # creation_result.wait()
++            result = creation_result.result()
++            self.logger.debug("created vm name: %s", vm_name)
++
++            if start:
++                self.conn_compute.virtual_machines.start(
++                    self.resource_group,
++                    vm_name)
++            # start_result.wait()
++
++            return result.id, None
+             
 -            poller = self.conn_compute.virtual_machines.run_command(
 -                self.resource_group, 
 -                vm_name, 
 -                run_command_parameters
 -            )
 -            # TODO return a tuple (vm-ID, None)
++            # run_command_parameters = {
++            #     'command_id': 'RunShellScript', # For linux, don't change it
++            #     'script': [
++            #     'date > /tmp/test.txt'
++            #     ]
++            # }
++        except Exception as e:
++            self.logger.debug('Exception creating new vminstance: %s', e, exc_info=True)
++            self._format_vimconn_exception(e)
++
++    def _get_unused_vm_name(self, vm_name):
++        """
++        Checks the vm name and in case it is used adds a suffix to the name to allow creation
++        :return:
++        """
++        all_vms = self.conn_compute.virtual_machines.list(self.resource_group)
++        # Filter to vms starting with the indicated name
++        vms = list(filter(lambda vm: (vm.name.startswith(vm_name)), all_vms))
++        vm_names = [str(vm.name) for vm in vms]
++
++        # get the name with the first not used suffix
++        name_suffix = 0
++        # name = subnet_name + "-" + str(name_suffix)
++        name = vm_name  # first subnet created will have no prefix
++        while name in vm_names:
++            name_suffix += 1
++            name = vm_name + "-" + str(name_suffix)
++        return name
++
++    # It is necesary extract from image_id data to create the VM with this format
++    #        'image_reference': {
++    #           'publisher': vm_reference['publisher'],
++    #           'offer': vm_reference['offer'],
++    #           'sku': vm_reference['sku'],
++    #           'version': vm_reference['version']
++    #        },
++    def _get_image_reference(self, image_id):
++
++        try:
++            # The data input format example:
++            # /Subscriptions/ca3d18ab-d373-4afb-a5d6-7c44f098d16a/Providers/Microsoft.Compute/Locations/westeurope/
++            # Publishers/Canonical/ArtifactTypes/VMImage/
++            # Offers/UbuntuServer/
++            # Skus/18.04-LTS/
++            # Versions/18.04.201809110
++            publisher = str(image_id.split('/')[8])
++            offer = str(image_id.split('/')[12])
++            sku = str(image_id.split('/')[14])
++            version = str(image_id.split('/')[16])
++
++            return {
++                'publisher': publisher,
++                'offer': offer,
++                'sku': sku,
++                'version': version
+             }
 -            self.format_vimconn_exception(e)
+         except Exception as e:
 -        self.logger.debug("Getting flavor id from data")
 -        self._reload_connection()
 -        vm_sizes_list = [vm_size.serialize() for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region)]
++            raise vimconn.vimconnException(
++                "Unable to get image_reference from invalid image_id format: '{}'".format(image_id))
++
++    # Azure VM names can not have some special characters
++    def _check_vm_name(self, vm_name):
++        """
++        Checks vm name, in case the vm has not allowed characters they are removed, not error raised
++        """
++
++        chars_not_allowed_list = "~!@#$%^&*()=+_[]{}|;:<>/?."
++
++        # First: the VM name max length is 64 characters
++        vm_name_aux = vm_name[:64]
++
++        # Second: replace not allowed characters
++        for elem in chars_not_allowed_list:
++            # Check if string is in the main string
++            if elem in vm_name_aux:
++                # self.logger.debug('Dentro del IF')
++                # Replace the string
++                vm_name_aux = vm_name_aux.replace(elem, '-')
++
++        return vm_name_aux
+     def get_flavor_id_from_data(self, flavor_dict):
 -        cpus = flavor_dict['vcpus']
 -        memMB = flavor_dict['ram']
 -        filteredSizes = [size for size in vm_sizes_list if size['numberOfCores'] > cpus and size['memoryInMB'] > memMB]
 -        listedFilteredSizes = sorted(filteredSizes, key=lambda k: k['numberOfCores'])
++        self.logger.debug("getting flavor id from data, flavor_dict: %s", flavor_dict)
++        filter_dict = flavor_dict or {}
++        try:
++            self._reload_connection()
++            vm_sizes_list = [vm_size.serialize() for vm_size in
++                             self.conn_compute.virtual_machine_sizes.list(self.region)]
++
++            cpus = filter_dict.get('vcpus') or 0
++            memMB = filter_dict.get('ram') or 0
++
++            # Filter
++            if self._config.get("flavors_pattern"):
++                filtered_sizes = [size for size in vm_sizes_list if size['numberOfCores'] >= cpus and
++                                  size['memoryInMB'] >= memMB and
++                                  re.search(self._config.get("flavors_pattern"), size["name"])]
++            else:
++                filtered_sizes = [size for size in vm_sizes_list if size['numberOfCores'] >= cpus and
++                                  size['memoryInMB'] >= memMB]
 -        return listedFilteredSizes[0]['name']
++            # Sort
++            listedFilteredSizes = sorted(filtered_sizes, key=lambda k: (k['numberOfCores'], k['memoryInMB'],
++                                                                        k['resourceDiskSizeInMB']))
 -        resGroup = self._get_resource_group_name_from_resource_id(net_id)
 -        resName = self._get_resource_name_from_resource_id(net_id)
 -        
++            if listedFilteredSizes:
++                return listedFilteredSizes[0]['name']
++            raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict)))
++
++        except Exception as e:
++            self._format_vimconn_exception(e)
++
++    def _get_flavor_id_from_flavor_name(self, flavor_name):
++
++        # self.logger.debug("getting flavor id from flavor name {}".format(flavor_name))
++        try:
++            self._reload_connection()
++            vm_sizes_list = [vm_size.serialize() for vm_size in
++                             self.conn_compute.virtual_machine_sizes.list(self.region)]
++
++            output_flavor = None
++            for size in vm_sizes_list:
++                if size['name'] == flavor_name:
++                    output_flavor = size
++
++            # None is returned if not found anything
++            return output_flavor
++
++        except Exception as e:
++            self._format_vimconn_exception(e)
+     def check_vim_connectivity(self):
+         try:
+             self._reload_connection()
+             return True
+         except Exception as e:
+             raise vimconn.vimconnException("Connectivity issue with Azure API: {}".format(e))
+     def get_network(self, net_id):
 -        vnet = self.conn_vnet.virtual_networks.get(resGroup, resName)
++
++        # self.logger.debug('get network id: {}'.format(net_id))
++        # res_name = self._get_resource_name_from_resource_id(net_id)
+         self._reload_connection()
 -        return vnet
 -    def delete_network(self, net_id):
 -        resGroup = self._get_resource_group_name_from_resource_id(net_id)
 -        resName = self._get_resource_name_from_resource_id(net_id)
 -        
++        filter_dict = {'name': net_id}
++        network_list = self.get_network_list(filter_dict)
++
++        if not network_list:
++            raise vimconn.vimconnNotFoundException("network '{}' not found".format(net_id))
++        else:
++            return network_list[0]
++
++    def delete_network(self, net_id, created_items=None):
++
++        self.logger.debug('deleting network {} - {}'.format(self.resource_group, net_id))
 -        self.conn_vnet.virtual_networks.delete(resGroup, resName)
+         self._reload_connection()
 -    def delete_vminstance(self, vm_id):
 -        resGroup = self._get_resource_group_name_from_resource_id(net_id)
 -        resName = self._get_resource_name_from_resource_id(net_id)
 -        
++        res_name = self._get_resource_name_from_resource_id(net_id)
++        filter_dict = {'name': res_name}
++        network_list = self.get_network_list(filter_dict)
++        if not network_list:
++            raise vimconn.vimconnNotFoundException("network '{}' not found".format(net_id))
++
++        try:
++            # Subnet API fails (CloudError: Azure Error: ResourceNotFound)
++            # Put the initial virtual_network API
++            async_delete = self.conn_vnet.subnets.delete(self.resource_group, self.vnet_name, res_name)
++            async_delete.wait()
++            return net_id
++
++        except CloudError as e:
++            if e.error.error and "notfound" in e.error.error.lower():
++                raise vimconn.vimconnNotFoundException("network '{}' not found".format(net_id))
++            else:
++                self._format_vimconn_exception(e)
++        except Exception as e:
++            self._format_vimconn_exception(e)
 -        self.conn_compute.virtual_machines.delete(resGroup, resName)
++    def delete_vminstance(self, vm_id, created_items=None):
++        """ Deletes a vm instance from the vim.
++        """
++        self.logger.debug('deleting VM instance {} - {}'.format(self.resource_group, vm_id))
+         self._reload_connection()
 -        resGroup = self._get_resource_group_name_from_resource_id(net_id)
 -        resName = self._get_resource_name_from_resource_id(net_id)
 -        
++
++        try:
++
++            res_name = self._get_resource_name_from_resource_id(vm_id)
++            vm = self.conn_compute.virtual_machines.get(self.resource_group, res_name)
++
++            # Shuts down the virtual machine and releases the compute resources
++            # vm_stop = self.conn_compute.virtual_machines.power_off(self.resource_group, resName)
++            # vm_stop.wait()
++
++            vm_delete = self.conn_compute.virtual_machines.delete(self.resource_group, res_name)
++            vm_delete.wait()
++            self.logger.debug('deleted VM name: %s', res_name)
++
++            # Delete OS Disk
++            os_disk_name = vm.storage_profile.os_disk.name
++            self.logger.debug('delete OS DISK: %s', os_disk_name)
++            self.conn_compute.disks.delete(self.resource_group, os_disk_name)
++            self.logger.debug('deleted OS DISK name: %s', os_disk_name)
++
++            for data_disk in vm.storage_profile.data_disks:
++                self.logger.debug('delete data_disk: %s', data_disk.name)
++                self.conn_compute.disks.delete(self.resource_group, data_disk.name)
++                self.logger.debug('deleted OS DISK name: %s', data_disk.name)
++
++            # After deleting VM, it is necessary to delete NIC, because if is not deleted delete_network
++            # does not work because Azure says that is in use the subnet
++            network_interfaces = vm.network_profile.network_interfaces
++
++            for network_interface in network_interfaces:
++
++                nic_name = self._get_resource_name_from_resource_id(network_interface.id)
++                nic_data = self.conn_vnet.network_interfaces.get(
++                    self.resource_group,
++                    nic_name)
++
++                public_ip_name = None
++                exist_public_ip = nic_data.ip_configurations[0].public_ip_address
++                if exist_public_ip:
++                    public_ip_id = nic_data.ip_configurations[0].public_ip_address.id
++
++                    # Delete public_ip
++                    public_ip_name = self._get_resource_name_from_resource_id(public_ip_id)
++
++                    # Public ip must be deleted afterwards of nic that is attached
++
++                self.logger.debug('delete NIC name: %s', nic_name)
++                nic_delete = self.conn_vnet.network_interfaces.delete(self.resource_group, nic_name)
++                nic_delete.wait()
++                self.logger.debug('deleted NIC name: %s', nic_name)
++
++                # Delete list of public ips
++                if public_ip_name:
++                    self.logger.debug('delete PUBLIC IP - ' + public_ip_name)
++                    self.conn_vnet.public_ip_addresses.delete(self.resource_group, public_ip_name)
++
++        except CloudError as e:
++            if e.error.error and "notfound" in e.error.error.lower():
++                raise vimconn.vimconnNotFoundException("No vm instance found '{}'".format(vm_id))
++            else:
++                self._format_vimconn_exception(e)
++        except Exception as e:
++            self._format_vimconn_exception(e)
++
++    def action_vminstance(self, vm_id, action_dict, created_items={}):
++        """Send and action over a VM instance from VIM
++        Returns the vm_id if the action was successfully sent to the VIM
++        """
++
++        self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
++        try:
++            self._reload_connection()
++            resName = self._get_resource_name_from_resource_id(vm_id)
++            if "start" in action_dict:
++                self.conn_compute.virtual_machines.start(self.resource_group, resName)
++            elif "stop" in action_dict or "shutdown" in action_dict or "shutoff" in action_dict:
++                self.conn_compute.virtual_machines.power_off(self.resource_group, resName)
++            elif "terminate" in action_dict:
++                self.conn_compute.virtual_machines.delete(self.resource_group, resName)
++            elif "reboot" in action_dict:
++                self.conn_compute.virtual_machines.restart(self.resource_group, resName)
++            return None
++        except CloudError as e:
++            if e.error.error and "notfound" in e.error.error.lower():
++                raise vimconn.vimconnNotFoundException("No vm found '{}'".format(vm_id))
++            else:
++                self._format_vimconn_exception(e)
++        except Exception as e:
++            self._format_vimconn_exception(e)
++
++    def delete_flavor(self, flavor_id):
++        raise vimconn.vimconnAuthException("It is not possible to delete a FLAVOR in AZURE")
++
++    def delete_tenant(self, tenant_id,):
++        raise vimconn.vimconnAuthException("It is not possible to delete a TENANT in AZURE")
++
++    def delete_image(self, image_id):
++        raise vimconn.vimconnAuthException("It is not possible to delete a IMAGE in AZURE")
+     def get_vminstance(self, vm_id):
 -        vm=self.conn_compute.virtual_machines.get(resGroup, resName)
++        """
++        Obtaing the vm instance data from v_id
++        """
++        self.logger.debug("get vm instance: %s", vm_id)
+         self._reload_connection()
 -        for vm_size in self.conn_compute.virtual_machine_sizes.list(self.region):
 -            if vm_size.name == flavor_id :
 -                return vm_size
++        try:
++            resName = self._get_resource_name_from_resource_id(vm_id)
++            vm = self.conn_compute.virtual_machines.get(self.resource_group, resName)
++        except CloudError as e:
++            if e.error.error and "notfound" in e.error.error.lower():
++                raise vimconn.vimconnNotFoundException("No vminstance found '{}'".format(vm_id))
++            else:
++                self._format_vimconn_exception(e)
++        except Exception as e:
++            self._format_vimconn_exception(e)
+         return vm
+     def get_flavor(self, flavor_id):
++        """
++        Obtains the flavor_data from the flavor_id
++        """
+         self._reload_connection()
 -# TODO refresh_nets_status ver estado activo
 -# TODO refresh_vms_status  ver estado activo
 -# TODO get_vminstance_console  for getting console
++        self.logger.debug("get flavor from id: %s", flavor_id)
++        flavor_data = self._get_flavor_id_from_flavor_name(flavor_id)
++        if flavor_data:
++            flavor = {
++                'id': flavor_id,
++                'name': flavor_id,
++                'ram': flavor_data['memoryInMB'],
++                'vcpus': flavor_data['numberOfCores'],
++                'disk': flavor_data['resourceDiskSizeInMB']/1024
++            }
++            return flavor
++        else:
++            raise vimconn.vimconnNotFoundException("flavor '{}' not found".format(flavor_id))
++
++    def get_tenant_list(self, filter_dict={}):
++        """ Obtains the list of tenants
++            For the azure connector only the azure tenant will be returned if it is compatible
++            with filter_dict
++        """
++        tenants_azure = [{'name': self.tenant, 'id': self.tenant}]
++        tenant_list = []
++
++        self.logger.debug("get tenant list: %s", filter_dict)
++        for tenant_azure in tenants_azure:
++            if filter_dict:
++                if filter_dict.get("id") and str(tenant_azure.get("id")) != filter_dict["id"]:
++                    continue
++                if filter_dict.get("name") and str(tenant_azure.get("name")) != filter_dict["name"]:
++                    continue
++
++            tenant_list.append(tenant_azure)
++
++        return tenant_list
++    def refresh_nets_status(self, net_list):
++        """Get the status of the networks
++            Params: the list of network identifiers
++            Returns a dictionary with:
++                net_id:  #VIM id of this network
++                status:  #Mandatory. Text with one of:
++                         #  DELETED (not found at vim)
++                         #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
++                         #  OTHER (Vim reported other status not understood)
++                         #  ERROR (VIM indicates an ERROR status)
++                         #  ACTIVE, INACTIVE, DOWN (admin down),
++                         #  BUILD (on building process)
++                         #
++                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
++                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
++
++        """
++
++        out_nets = {}
++        self._reload_connection()
++
++        self.logger.debug("reload nets status net_list: %s", net_list)
++        for net_id in net_list:
++            try:
++                netName = self._get_net_name_from_resource_id(net_id)
++                resName = self._get_resource_name_from_resource_id(net_id)
++
++                net = self.conn_vnet.subnets.get(self.resource_group, netName, resName)
++
++                out_nets[net_id] = {
++                    "status": self.provision_state2osm[net.provisioning_state],
++                    "vim_info": str(net)
++                }
++            except CloudError as e:
++                if e.error.error and "notfound" in e.error.error.lower():
++                    self.logger.info("Not found subnet net_name: %s, subnet_name: %s", netName, resName)
++                    out_nets[net_id] = {
++                        "status": "DELETED",
++                        "error_msg": str(e)
++                    }
++                else:
++                    self.logger.error("CloudError Exception %s when searching subnet", e)
++                    out_nets[net_id] = {
++                        "status": "VIM_ERROR",
++                        "error_msg": str(e)
++                    }
++            except vimconn.vimconnNotFoundException as e:
++                self.logger.error("VimConnNotFoundException %s when searching subnet", e)
++                out_nets[net_id] = {
++                    "status": "DELETED",
++                    "error_msg": str(e)
++                }
++            except Exception as e:
++                self.logger.error("Exception %s when searching subnet", e, exc_info=True)
++                out_nets[net_id] = {
++                    "status": "VIM_ERROR",
++                    "error_msg": str(e)
++                }
++        return out_nets
++
++    def refresh_vms_status(self, vm_list):
++        """ Get the status of the virtual machines and their interfaces/ports
++        Params: the list of VM identifiers
++        Returns a dictionary with:
++            vm_id:          # VIM id of this Virtual Machine
++                status:     # Mandatory. Text with one of:
++                            #  DELETED (not found at vim)
++                            #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
++                            #  OTHER (Vim reported other status not understood)
++                            #  ERROR (VIM indicates an ERROR status)
++                            #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
++                            #  BUILD (on building process), ERROR
++                            #  ACTIVE:NoMgmtIP (Active but none of its interfaces has an IP address
++                            #     (ACTIVE:NoMgmtIP is not returned for Azure)
++                            #
++                error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
++                vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
++                interfaces: list with interface info. Each item a dictionary with:
++                    vim_interface_id -  The ID of the interface
++                    mac_address - The MAC address of the interface.
++                    ip_address - The IP address of the interface within the subnet.
++        """
++
++        out_vms = {}
++        self._reload_connection()
++
++        self.logger.debug("refresh vm status vm_list: %s", vm_list)
++        search_vm_list = vm_list or {}
++
++        for vm_id in search_vm_list:
++            out_vm = {}
++            try:
++                res_name = self._get_resource_name_from_resource_id(vm_id)
++
++                vm = self.conn_compute.virtual_machines.get(self.resource_group, res_name)
++                out_vm['vim_info'] = str(vm)
++                out_vm['status'] = self.provision_state2osm.get(vm.provisioning_state, 'OTHER')
++                if vm.provisioning_state == 'Succeeded':
++                    # check if machine is running or stopped
++                    instance_view = self.conn_compute.virtual_machines.instance_view(self.resource_group,
++                                                                                     res_name)
++                    for status in instance_view.statuses:
++                        splitted_status = status.code.split("/")
++                        if len(splitted_status) == 2 and splitted_status[0] == 'PowerState':
++                            out_vm['status'] = self.power_state2osm.get(splitted_status[1], 'OTHER')
++
++                network_interfaces = vm.network_profile.network_interfaces
++                out_vm['interfaces'] = self._get_vm_interfaces_status(vm_id, network_interfaces)
++
++            except CloudError as e:
++                if e.error.error and "notfound" in e.error.error.lower():
++                    self.logger.debug("Not found vm id: %s", vm_id)
++                    out_vm['status'] = "DELETED"
++                    out_vm['error_msg'] = str(e)
++                    out_vm['vim_info'] = None
++                else:
++                    # maybe connection error or another type of error, return vim error
++                    self.logger.error("Exception %s refreshing vm_status", e)
++                    out_vm['status'] = "VIM_ERROR"
++                    out_vm['error_msg'] = str(e)
++                    out_vm['vim_info'] = None
++            except Exception as e:
++                self.logger.error("Exception %s refreshing vm_status", e, exc_info=True)
++                out_vm['status'] = "VIM_ERROR"
++                out_vm['error_msg'] = str(e)
++                out_vm['vim_info'] = None
++
++            out_vms[vm_id] = out_vm
++
++        return out_vms
++
++    def _get_vm_interfaces_status(self, vm_id, interfaces):
++        """
++        Gets the interfaces detail for a vm
++        :param interfaces: List of interfaces.
++        :return: Dictionary with list of interfaces including, vim_interface_id, mac_address and ip_address
++        """
++        try:
++            interface_list = []
++            for network_interface in interfaces:
++                interface_dict = {}
++                nic_name = self._get_resource_name_from_resource_id(network_interface.id)
++                interface_dict['vim_interface_id'] = network_interface.id
++
++                nic_data = self.conn_vnet.network_interfaces.get(
++                    self.resource_group,
++                    nic_name)
++
++                ips = []
++                if nic_data.ip_configurations[0].public_ip_address:
++                    self.logger.debug("Obtain public ip address")
++                    public_ip_name = self._get_resource_name_from_resource_id(
++                        nic_data.ip_configurations[0].public_ip_address.id)
++                    public_ip = self.conn_vnet.public_ip_addresses.get(self.resource_group, public_ip_name)
++                    self.logger.debug("Public ip address is: %s", public_ip.ip_address)
++                    ips.append(public_ip.ip_address)
++
++                private_ip = nic_data.ip_configurations[0].private_ip_address
++                ips.append(private_ip)
++
++                interface_dict['mac_address'] = nic_data.mac_address
++                interface_dict['ip_address'] = ";".join(ips)
++                interface_list.append(interface_dict)
++
++            return interface_list
++        except Exception as e:
++            self.logger.error("Exception %s obtaining interface data for vm: %s, error: %s", vm_id, e, exc_info=True)
++            self._format_vimconn_exception(e)
 -    vim_id='azure'
 -    vim_name='azure'
+ if __name__ == "__main__":
+     # Making some basic test
 -            'region_name': getenv("AZURE_REGION_NAME", 'westeurope'),
 -            'resource_group': getenv("AZURE_RESOURCE_GROUP"),
 -            'subscription_id': getenv("AZURE_SUBSCRIPTION_ID"),
 -            'pub_key': getenv("AZURE_PUB_KEY", None),
 -            'vnet_name': getenv("AZURE_VNET_NAME", 'myNetwork'),
++    vim_id = 'azure'
++    vim_name = 'azure'
+     needed_test_params = {
+         "client_id": "AZURE_CLIENT_ID",
+         "secret": "AZURE_SECRET",
+         "tenant": "AZURE_TENANT",
+         "resource_group": "AZURE_RESOURCE_GROUP",
+         "subscription_id": "AZURE_SUBSCRIPTION_ID",
+         "vnet_name": "AZURE_VNET_NAME",
+     }
+     test_params = {}
+     for param, env_var in needed_test_params.items():
+         value = getenv(env_var)
+         if not value:
+             raise Exception("Provide a valid value for env '{}'".format(env_var))
+         test_params[param] = value
+     config = {
 -        #'subnet_name': 'subnet-oam'
++        'region_name': getenv("AZURE_REGION_NAME", 'westeurope'),
++        'resource_group': getenv("AZURE_RESOURCE_GROUP"),
++        'subscription_id': getenv("AZURE_SUBSCRIPTION_ID"),
++        'pub_key': getenv("AZURE_PUB_KEY", None),
++        'vnet_name': getenv("AZURE_VNET_NAME", 'myNetwork'),
+     }
+     virtualMachine = {
+         'name': 'sergio',
+         'description': 'new VM',
+         'status': 'running',
+         'image': {
+             'publisher': 'Canonical',
+             'offer': 'UbuntuServer',
+             'sku': '16.04.0-LTS',
+             'version': 'latest'
+         },
+         'hardware_profile': {
+             'vm_size': 'Standard_DS1_v2'
+         },
+         'networks': [
+             'sergio'
+         ]
+     }
+     vnet_config = {
+         'subnet_address': '10.1.2.0/24',
 -    azure.get_flavor("Standard_A11")
++        # 'subnet_name': 'subnet-oam'
+     }
+     ###########################
+     azure = vimconnector(vim_id, vim_name, tenant_id=test_params["tenant"], tenant_name=None, url=None, url_admin=None,
+                          user=test_params["client_id"], passwd=test_params["secret"], log_level=None, config=config)
+     # azure.get_flavor_id_from_data("here")
+     # subnets=azure.get_network_list()
+     # azure.new_vminstance(virtualMachine['name'], virtualMachine['description'], virtualMachine['status'],
+     #                      virtualMachine['image'], virtualMachine['hardware_profile']['vm_size'], subnets)
++    azure.new_network("mynet", None)
++    net_id = "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/Microsoft."\
++             "Network/virtualNetworks/test"
++    net_id_not_found = "/subscriptions/82f80cc1-876b-4591-9911-1fb5788384fd/resourceGroups/osmRG/providers/"\
++                       "Microsoft.Network/virtualNetworks/testALF"
++    azure.refresh_nets_status([net_id, net_id_not_found])
index 0000000,d5273fd..6cfff52
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,20 +1,20 @@@
 -git+https://osm.etsi.org/gerrit/osm/RO.git@py3#egg=osm-ro&subdirectory=RO
+ ##
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+ #
+ #    http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ # implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ ##
+ PyYAML
+ requests
+ netaddr
+ azure
++git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
index 0000000,90b0e7e..c30c1f1
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,879 +1,878 @@@
 -    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
+ # -*- coding: utf-8 -*-
+ ##
+ # Copyright 2019 ADLINK Technology Inc..
+ # This file is part of ETSI OSM
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ #
+ """
+ Eclipse fog05 connector, implements methods to interact with fog05 using REST Client + REST Proxy
+ Manages LXD containers on x86_64 by default, currently missing EPA and VF/PF
+ Support config dict:
+     - arch : cpu architecture for the VIM
+     - hypervisor: virtualization technology supported by the VIM, can
+                 can be one of: LXD, KVM, BARE, XEN, DOCKER, MCU
+                 the selected VIM need to have at least a node with support
+                 for the selected hypervisor
+ """
+ __author__="Gabriele Baldoni"
+ __date__ ="$13-may-2019 10:35:12$"
+ import uuid
+ import socket
+ import struct
+ from . import vimconn
+ import random
+ import yaml
+ from functools import partial
+ from fog05rest import FIMAPI
+ from fog05rest import fimerrors
+ class vimconnector(vimconn.vimconnector):
+     def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
+                  config={}, persistent_info={}):
+         """Constructor of VIM
+         Params:
+             'uuid': id asigned to this VIM
+             'name': name assigned to this VIM, can be used for logging
+             'tenant_id', 'tenant_name': (only one of them is mandatory) VIM tenant to be used
+             'url_admin': (optional), url used for administrative tasks
+             'user', 'passwd': credentials of the VIM user
+             'log_level': provider if it should use a different log_level than the general one
+             'config': dictionary with extra VIM information. This contains a consolidate version of general VIM config
+                     at creation and particular VIM config at teh attachment
+             'persistent_info': dict where the class can store information that will be available among class
+                     destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
+                     empty dict. Useful to store login/tokens information for speed up communication
+         Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity
+             check against the VIM
+         """
+         vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+                                       config, persistent_info)
+         self.logger.debug('vimconn_fos init with config: {}'.format(config))
+         self.arch = config.get('arch', 'x86_64')
+         self.hv = config.get('hypervisor', 'LXD')
+         self.nodes = config.get('nodes', [])
+         self.fdu_node_map = {}
+         self.fos_api = FIMAPI(locator=self.url)
+     def __get_ip_range(self, first, count):
+         int_first = struct.unpack('!L', socket.inet_aton(first))[0]
+         int_last = int_first + count
+         last = socket.inet_ntoa(struct.pack('!L', int_last))
+         return (first, last)
+     def __name_filter(self, desc, filter_name=None):
+         if filter_name is None:
+             return True
+         return desc.get('name') == filter_name
+     def __id_filter(self, desc, filter_id=None):
+         if filter_id is None:
+             return True
+         return desc.get('uuid') == filter_id
+     def __checksum_filter(self, desc, filter_checksum=None):
+         if filter_checksum is None:
+             return True
+         return desc.get('checksum') == filter_checksum
+     def check_vim_connectivity(self):
+         """Checks VIM can be reached and user credentials are ok.
+         Returns None if success or raised vimconnConnectionException, vimconnAuthException, ...
+         """
+         try:
+             self.fos_api.check()
+             return None
+         except fimerrors.FIMAuthExcetpion as fae:
+             raise vimconn.vimconnAuthException("Unable to authenticate to the VIM. Error {}".format(fae))
+         except Exception as e:
+             raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
 -            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
++    def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+         """Adds a tenant network to VIM
+         Params:
+             'net_name': name of the network
+             'net_type': one of:
+                 'bridge': overlay isolated network
+                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+             'ip_profile': is a dict containing the IP parameters of the network
+                 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                 'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                 'dhcp_enabled': True or False
+                 'dhcp_start_address': ip_schema, first IP to grant
+                 'dhcp_count': number of IPs to grant.
+             'shared': if this network can be seen/use by other tenants/organization
 -        return net_uuid
+         Returns the network identifier on success or raises and exception on failure
+         """
+         self.logger.debug('new_network: {}'.format(locals()))
+         if net_type in ['data','ptp']:
+             raise vimconn.vimconnNotImplemented('{} type of network not supported'.format(net_type))
+         net_uuid = '{}'.format(uuid.uuid4())
+         desc = {
+             'uuid':net_uuid,
+             'name':net_name,
+             'net_type':'ELAN',
+             'is_mgmt':False
+             }
+         if ip_profile is not None:
+             ip = {}
+             if ip_profile.get('ip_version') == 'IPv4':
+                 ip_info = {}
+                 ip_range = self.__get_ip_range(ip_profile.get('dhcp_start_address'), ip_profile.get('dhcp_count'))
+                 dhcp_range = '{},{}'.format(ip_range[0],ip_range[1])
+                 ip.update({'subnet':ip_profile.get('subnet_address')})
+                 ip.update({'dns':ip_profile.get('dns', None)})
+                 ip.update({'dhcp_enable':ip_profile.get('dhcp_enabled', False)})
+                 ip.update({'dhcp_range': dhcp_range})
+                 ip.update({'gateway':ip_profile.get('gateway_address', None)})
+                 desc.update({'ip_configuration':ip_info})
+             else:
+                 raise vimconn.vimconnNotImplemented('IPV6 network is not implemented at VIM')
+             desc.update({'ip_configuration':ip})
+         self.logger.debug('VIM new_network args: {} - Generated Eclipse fog05 Descriptor {}'.format(locals(), desc))
+         try:
+             self.fos_api.network.add_network(desc)
+         except fimerrors.FIMAResouceExistingException as free:
+             raise vimconn.vimconnConflictException("Network already exists at VIM. Error {}".format(free))
+         except Exception as e:
+             raise vimconn.vimconnException("Unable to create network {}. Error {}".format(net_name, e))
+             # No way from the current rest service to get the actual error, most likely it will be an already existing error
 -    def delete_network(self, net_id):
++        return net_uuid,{}
+     def get_network_list(self, filter_dict={}):
+         """Obtain tenant networks of VIM
+         Params:
+             'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
+                 name: string  => returns only networks with this name
+                 id:   string  => returns networks with this VIM id, this imply returns one network at most
+                 shared: boolean >= returns only networks that are (or are not) shared
+                 tenant_id: sting => returns only networks that belong to this tenant/project
+                 ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
+                 #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
+         Returns the network list of dictionaries. each dictionary contains:
+             'id': (mandatory) VIM network id
+             'name': (mandatory) VIM network name
+             'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'
+             'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
+             authorization, or some other unspecific error
+         """
+         self.logger.debug('get_network_list: {}'.format(filter_dict))
+         res = []
+         try:
+             nets = self.fos_api.network.list()
+         except Exception as e:
+             raise vimconn.vimconnConnectionException("Cannot get network list from VIM, connection error. Error {}".format(e))
+         filters = [
+             partial(self.__name_filter, filter_name=filter_dict.get('name')),
+             partial(self.__id_filter,filter_id=filter_dict.get('id'))
+         ]
+         r1 = []
+         for n in nets:
+             match = True
+             for f in filters:
+                 match = match and f(n)
+             if match:
+                 r1.append(n)
+         for n in r1:
+             osm_net = {
+                 'id':n.get('uuid'),
+                 'name':n.get('name'),
+                 'status':'ACTIVE'
+             }
+             res.append(osm_net)
+         return res
+     def get_network(self, net_id):
+         """Obtain network details from the 'net_id' VIM network
+         Return a dict that contains:
+             'id': (mandatory) VIM network id, that is, net_id
+             'name': (mandatory) VIM network name
+             'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         Raises an exception upon error or when network is not found
+         """
+         self.logger.debug('get_network: {}'.format(net_id))
+         res = self.get_network_list(filter_dict={'id':net_id})
+         if len(res) == 0:
+             raise vimconn.vimconnNotFoundException("Network {} not found at VIM".format(net_id))
+         return res[0]
++    def delete_network(self, net_id, created_items=None):
+         """Deletes a tenant network from VIM
+         Returns the network identifier or raises an exception upon error or when network is not found
+         """
+         self.logger.debug('delete_network: {}'.format(net_id))
+         try:
+             self.fos_api.network.remove_network(net_id)
+         except fimerrors.FIMNotFoundException as fnfe:
+             raise vimconn.vimconnNotFoundException("Network {} not found at VIM (already deleted?). Error {}".format(net_id, fnfe))
+         except Exception as e:
+             raise vimconn.vimconnException("Cannot delete network {} from VIM. Error {}".format(net_id, e))
+         return net_id
+     def refresh_nets_status(self, net_list):
+         """Get the status of the networks
+         Params:
+             'net_list': a list with the VIM network id to be get the status
+         Returns a dictionary with:
+             'net_id':         #VIM id of this network
+                 status:     #Mandatory. Text with one of:
+                     #  DELETED (not found at vim)
+                     #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
+                     #  OTHER (Vim reported other status not understood)
+                     #  ERROR (VIM indicates an ERROR status)
+                     #  ACTIVE, INACTIVE, DOWN (admin down),
+                     #  BUILD (on building process)
+                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+             'net_id2': ...
+         """
+         self.logger.debug('Refeshing network status with args: {}'.format(locals()))
+         r = {}
+         for n in net_list:
+             try:
+                 osm_n = self.get_network(n)
+                 r.update({
+                     osm_n.get('id'):{'status':osm_n.get('status')}
+                 })
+             except vimconn.vimconnNotFoundException:
+                 r.update({
+                     n:{'status':'VIM_ERROR'}
+                 })
+         return r
+     def get_flavor(self, flavor_id):
+         """Obtain flavor details from the VIM
+         Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
+         Raises an exception upon error or if not found
+         """
+         self.logger.debug('VIM get_flavor with args: {}'.format(locals()))
+         try:
+             r = self.fos_api.flavor.get(flavor_id)
+         except Exception as e:
+             raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+         if r is None:
+             raise vimconn.vimconnNotFoundException("Flavor not found at VIM")
+         return {'id':r.get('uuid'), 'name':r.get('name'), 'fos':r}
+     def get_flavor_id_from_data(self, flavor_dict):
+         """Obtain flavor id that match the flavor description
+         Params:
+             'flavor_dict': dictionary that contains:
+                 'disk': main hard disk in GB
+                 'ram': meomry in MB
+                 'vcpus': number of virtual cpus
+                 #TODO: complete parameters for EPA
+         Returns the flavor_id or raises a vimconnNotFoundException
+         """
+         self.logger.debug('VIM get_flavor_id_from_data with args : {}'.format(locals()))
+         try:
+             flvs = self.fos_api.flavor.list()
+         except Exception as e:
+             raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+         r = [x.get('uuid') for x in flvs if (x.get('cpu_min_count') == flavor_dict.get('vcpus') and x.get('ram_size_mb') == flavor_dict.get('ram') and x.get('storage_size_gb') == flavor_dict.get('disk'))]
+         if len(r) == 0:
+             raise vimconn.vimconnNotFoundException ( "No flavor found" )
+         return r[0]
+     def new_flavor(self, flavor_data):
+         """Adds a tenant flavor to VIM
+             flavor_data contains a dictionary with information, keys:
+                 name: flavor name
+                 ram: memory (cloud type) in MBytes
+                 vpcus: cpus (cloud type)
+                 extended: EPA parameters
+                   - numas: #items requested in same NUMA
+                         memory: number of 1G huge pages memory
+                         paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+                         interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+                           - name: interface name
+                             dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
+                             bandwidth: X Gbps; requested guarantee bandwidth
+                             vpci: requested virtual PCI address
+                 disk: disk size
+                 is_public:
+                  #TODO to concrete
+         Returns the flavor identifier"""
+         self.logger.debug('VIM new_flavor with args: {}'.format(locals()))
+         flv_id = '{}'.format(uuid.uuid4())
+         desc = {
+             'uuid':flv_id,
+             'name':flavor_data.get('name'),
+             'cpu_arch': self.arch,
+             'cpu_min_count': flavor_data.get('vcpus'),
+             'cpu_min_freq': 0.0,
+             'ram_size_mb':float(flavor_data.get('ram')),
+             'storage_size_gb':float(flavor_data.get('disk'))
+         }
+         try:
+             self.fos_api.flavor.add(desc)
+         except fimerrors.FIMAResouceExistingException as free:
+             raise vimconn.vimconnConflictException("Flavor {} already exist at VIM. Error {}".format(flv_id, free))
+         except Exception as e:
+             raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+         return flv_id
+     def delete_flavor(self, flavor_id):
+         """Deletes a tenant flavor from VIM identify by its id
+         Returns the used id or raise an exception"""
+         try:
+             self.fos_api.flavor.remove(flavor_id)
+         except fimerrors.FIMNotFoundException as fnfe:
+             raise vimconn.vimconnNotFoundException("Flavor {} not found at VIM (already deleted?). Error {}".format(flavor_id, fnfe))
+         except Exception as e:
+             raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+         return flavor_id
+     def new_image(self, image_dict):
+         """ Adds a tenant image to VIM. imge_dict is a dictionary with:
+             name: name
+             disk_format: qcow2, vhd, vmdk, raw (by default), ...
+             location: path or URI
+             public: "yes" or "no"
+             metadata: metadata of the image
+         Returns the image id or raises an exception if failed
+         """
+         self.logger.debug('VIM new_image with args: {}'.format(locals()))
+         img_id = '{}'.format(uuid.uuid4())
+         desc = {
+             'name':image_dict.get('name'),
+             'uuid':img_id,
+             'uri':image_dict.get('location')
+         }
+         try:
+             self.fos_api.image.add(desc)
+         except fimerrors.FIMAResouceExistingException as free:
+             raise vimconn.vimconnConflictException("Image {} already exist at VIM. Error {}".format(img_id, free))
+         except Exception as e:
+             raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+         return img_id
+     def get_image_id_from_path(self, path):
+         """Get the image id from image path in the VIM database.
+            Returns the image_id or raises a vimconnNotFoundException
+         """
+         self.logger.debug('VIM get_image_id_from_path with args: {}'.format(locals()))
+         try:
+             imgs = self.fos_api.image.list()
+         except Exception as e:
+             raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+         res = [x.get('uuid') for x in imgs if x.get('uri')==path]
+         if len(res) == 0:
+             raise vimconn.vimconnNotFoundException("Image with this path was not found")
+         return res[0]
+     def get_image_list(self, filter_dict={}):
+         """Obtain tenant images from VIM
+         Filter_dict can be:
+             name: image name
+             id: image uuid
+             checksum: image checksum
+             location: image path
+         Returns the image list of dictionaries:
+             [{<the fields at Filter_dict plus some VIM specific>}, ...]
+             List can be empty
+         """
+         self.logger.debug('VIM get_image_list args: {}'.format(locals()))
+         r = []
+         try:
+             fimgs = self.fos_api.image.list()
+         except Exception as e:
+             raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+         filters = [
+             partial(self.__name_filter, filter_name=filter_dict.get('name')),
+             partial(self.__id_filter,filter_id=filter_dict.get('id')),
+             partial(self.__checksum_filter,filter_checksum=filter_dict.get('checksum'))
+         ]
+         r1 = []
+         for i in fimgs:
+             match = True
+             for f in filters:
+                 match = match and f(i)
+             if match:
+                 r1.append(i)
+         for i in r1:
+             img_info = {
+                 'name':i.get('name'),
+                 'id':i.get('uuid'),
+                 'checksum':i.get('checksum'),
+                 'location':i.get('uri'),
+                 'fos':i
+             }
+             r.append(img_info)
+         return r
+         #raise vimconnNotImplemented( "Should have implemented this" )
+     def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+         availability_zone_index=None, availability_zone_list=None):
+         """Adds a VM instance to VIM
+         Params:
+             'start': (boolean) indicates if VM must start or created in pause mode.
+             'image_id','flavor_id': image and flavor VIM id to use for the VM
+             'net_list': list of interfaces, each one is a dictionary with:
+                 'name': (optional) name for the interface.
+                 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
+                 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                 'mac_address': (optional) mac address to assign to this interface
+                 'ip_address': (optional) IP address to assign to this interface
+                 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
+                     the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
+                 'type': (mandatory) can be one of:
+                     'virtual', in this case always connected to a network of type 'net_type=bridge'
+                      'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                            can created unconnected
+                      'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                      'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                             are allocated on the same physical NIC
+                 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
+                                 or True, it must apply the default VIM behaviour
+                 After execution the method will add the key:
+                 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
+                         interface. 'net_list' is modified
+             'cloud_config': (optional) dictionary with:
+                 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                 'users': (optional) list of users to be inserted, each item is a dict with:
+                     'name': (mandatory) user name,
+                     'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                     or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                 'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                     'dest': (mandatory) string with the destination absolute path
+                     'encoding': (optional, by default text). Can be one of:
+                         'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                     'content' (mandatory): string with the content of the file
+                     'permissions': (optional) string with file permissions, typically octal notation '0644'
+                     'owner': (optional) file owner, string with the format 'owner:group'
+                 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+             'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                 'size': (mandatory) string with the size of the disk in GB
+             availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+             availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                 availability_zone_index is None
+         Returns a tuple with the instance identifier and created_items or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
+         self.logger.debug('new_vminstance with rgs: {}'.format(locals()))
+         fdu_uuid = '{}'.format(uuid.uuid4())
+         flv = self.fos_api.flavor.get(flavor_id)
+         img = self.fos_api.image.get(image_id)
+         if flv is None:
+             raise vimconn.vimconnNotFoundException("Flavor {} not found at VIM".format(flavor_id))
+         if img is None:
+             raise vimconn.vimconnNotFoundException("Image {} not found at VIM".format(image_id))
+         created_items = {
+             'fdu_id':'',
+             'node_id':'',
+             'connection_points':[]
+             }
+         fdu_desc = {
+             'name':name,
+             'uuid':fdu_uuid,
+             'computation_requirements':flv,
+             'image':img,
+             'hypervisor':self.hv,
+             'migration_kind':'LIVE',
+             'interfaces':[],
+             'io_ports':[],
+             'connection_points':[],
+             'depends_on':[]
+         }
+         nets = []
+         cps = []
+         intf_id = 0
+         for n in net_list:
+             cp_id = '{}'.format(uuid.uuid4())
+             n.update({'vim_id':cp_id})
+             pair_id = n.get('net_id')
+             cp_d = {
+                 'uuid':cp_id,
+                 'pair_id':pair_id
+             }
+             intf_d = {
+                 'name':n.get('name','eth{}'.format(intf_id)),
+                 'is_mgmt':False,
+                 'if_type':'INTERNAL',
+                 'virtual_interface':{
+                     'intf_type':n.get('model','VIRTIO'),
+                     'vpci':n.get('vpci','0:0:0'),
+                     'bandwidth':int(n.get('bw', 100))
+                 }
+             }
+             if n.get('mac_address', None) is not None:
+                 intf_d['mac_address'] = n['mac_address']
+             created_items['connection_points'].append(cp_id)
+             fdu_desc['connection_points'].append(cp_d)
+             fdu_desc['interfaces'].append(intf_d)
+             intf_id = intf_id + 1
+         if cloud_config is not None:
+             configuration = {
+                     'conf_type':'CLOUD_INIT'
+                 }
+             if cloud_config.get('user-data') is not None:
+                 configuration.update({'script':cloud_config.get('user-data')})
+             if cloud_config.get('key-pairs') is not None:
+                 configuration.update({'ssh_keys':cloud_config.get('key-pairs')})
+             if 'script' in configuration:
+                 fdu_desc.update({'configuration':configuration})
+         ### NODE Selection ###
+         # Infrastructure info
+         #   nodes dict with
+         #        uuid -> node uuid
+         #        computational capabilities -> cpu, ram, and disk available
+         #        hypervisors -> list of available hypervisors (eg. KVM, LXD, BARE)
+         #
+         #
+         # UPDATING AVAILABLE INFRASTRUCTURE
+         if len(self.nodes) == 0:
+             nodes_id = self.fos_api.node.list()
+         else:
+             nodes_id = self.nodes
+         nodes = []
+         for n in nodes_id:
+             n_info = self.fos_api.node.info(n)
+             if n_info is None:
+                 continue
+             n_plugs = []
+             for p in self.fos_api.node.plugins(n):
+                 n_plugs.append(self.fos_api.plugin.info(n,p))
+             n_cpu_number =  len(n_info.get('cpu'))
+             n_cpu_arch = n_info.get('cpu')[0].get('arch')
+             n_cpu_freq = n_info.get('cpu')[0].get('frequency')
+             n_ram = n_info.get('ram').get('size')
+             n_disk_size = sorted(list(filter(lambda x: 'sda' in x['local_address'], n_info.get('disks'))), key= lambda k: k['dimension'])[-1].get('dimension')
+             hvs = []
+             for p in n_plugs:
+                 if p.get('type') == 'runtime':
+                     hvs.append(p.get('name'))
+             ni = {
+                 'uuid':n,
+                 'computational_capabilities':{
+                     'cpu_count':n_cpu_number,
+                     'cpu_arch':n_cpu_arch,
+                     'cpu_freq':n_cpu_freq,
+                     'ram_size':n_ram,
+                     'disk_size':n_disk_size
+                 },
+                 'hypervisors':hvs
+             }
+             nodes.append(ni)
+         # NODE SELECTION
+         compatible_nodes = []
+         for n in nodes:
+             if fdu_desc.get('hypervisor') in n.get('hypervisors'):
+                 n_comp = n.get('computational_capabilities')
+                 f_comp = fdu_desc.get('computation_requirements')
+                 if f_comp.get('cpu_arch') == n_comp.get('cpu_arch'):
+                     if f_comp.get('cpu_min_count') <= n_comp.get('cpu_count') and f_comp.get('ram_size_mb') <= n_comp.get('ram_size'):
+                         if f_comp.get('disk_size_gb') <= n_comp.get('disk_size'):
+                             compatible_nodes.append(n)
+         if len(compatible_nodes) == 0:
+             raise vimconn.vimconnConflictException("No available nodes at VIM")
+         selected_node = random.choice(compatible_nodes)
+         created_items.update({'fdu_id':fdu_uuid, 'node_id': selected_node.get('uuid')})
+         self.logger.debug('FOS Node {} FDU Descriptor: {}'.format(selected_node.get('uuid'), fdu_desc))
+         try:
+             self.fos_api.fdu.onboard(fdu_desc)
+             instanceid = self.fos_api.fdu.instantiate(fdu_uuid, selected_node.get('uuid'))
+             created_items.update({'instance_id':instanceid})
+             self.fdu_node_map.update({instanceid: selected_node.get('uuid')})
+             self.logger.debug('new_vminstance return: {}'.format((fdu_uuid, created_items)))
+             return (instanceid, created_items)
+         except fimerrors.FIMAResouceExistingException as free:
+             raise vimconn.vimconnConflictException("VM already exists at VIM. Error {}".format(free))
+         except Exception as e:
+             raise vimconn.vimconnException("Error while instantiating VM {}. Error {}".format(name, e))
+     def get_vminstance(self,vm_id):
+         """Returns the VM instance information from VIM"""
+         self.logger.debug('VIM get_vminstance with args: {}'.format(locals()))
+         try:
+             intsinfo = self.fos_api.fdu.instance_info(vm_id)
+         except Exception as e:
+             raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
+         if intsinfo is None:
+             raise vimconn.vimconnNotFoundException('VM with id {} not found!'.format(vm_id))
+         return intsinfo
+     def delete_vminstance(self, vm_id, created_items=None):
+         """
+         Removes a VM instance from VIM and each associate elements
+         :param vm_id: VIM identifier of the VM, provided by method new_vminstance
+         :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method
+             action_vminstance
+         :return: None or the same vm_id. Raises an exception on fail
+         """
+         self.logger.debug('FOS delete_vminstance with args: {}'.format(locals()))
+         fduid =  created_items.get('fdu_id')
+         try:
+             self.fos_api.fdu.terminate(vm_id)
+             self.fos_api.fdu.offload(fduid)
+         except Exception as e:
+             raise vimconn.vimconnException("Error on deletting VM with id {}. Error {}".format(vm_id,e))
+         return vm_id
+         #raise vimconnNotImplemented( "Should have implemented this" )
+     def refresh_vms_status(self, vm_list):
+         """Get the status of the virtual machines and their interfaces/ports
+            Params: the list of VM identifiers
+            Returns a dictionary with:
+                 vm_id:          #VIM id of this Virtual Machine
+                     status:     #Mandatory. Text with one of:
+                                 #  DELETED (not found at vim)
+                                 #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                 #  OTHER (Vim reported other status not understood)
+                                 #  ERROR (VIM indicates an ERROR status)
+                                 #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                                 #  BUILD (on building process), ERROR
+                                 #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                 #
+                     error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                     interfaces: list with interface info. Each item a dictionary with:
+                         vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                         mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                         vim_net_id:       #network id where this interface is connected, if provided at creation
+                         vim_interface_id: #interface/port VIM id
+                         ip_address:       #null, or text with IPv4, IPv6 address
+                         compute_node:     #identification of compute node where PF,VF interface is allocated
+                         pci:              #PCI address of the NIC that hosts the PF,VF
+                         vlan:             #physical VLAN used for VF
+         """
+         self.logger.debug('FOS refresh_vms_status with args: {}'.format(locals()))
+         fos2osm_status = {
+             'DEFINE':'OTHER',
+             'CONFIGURE':'INACTIVE',
+             'RUN':'ACTIVE',
+             'PAUSE':'PAUSED',
+             'ERROR':'ERROR'
+         }
+         r = {}
+         for vm in vm_list:
+             self.logger.debug('FOS refresh_vms_status for {}'.format(vm))
+             info = {}
+             nid = self.fdu_node_map.get(vm)
+             if nid is None:
+                 r.update({vm:{
+                     'status':'VIM_ERROR',
+                     'error_msg':'Not compute node associated for VM'
+                 }})
+                 continue
+             try:
+                 vm_info = self.fos_api.fdu.instance_info(vm)
+             except:
+                 r.update({vm:{
+                     'status':'VIM_ERROR',
+                     'error_msg':'unable to connect to VIM'
+                 }})
+                 continue
+             if vm_info is None:
+                 r.update({vm:{'status':'DELETED'}})
+                 continue
+             desc = self.fos_api.fdu.info(vm_info['fdu_uuid'])
+             osm_status = fos2osm_status.get(vm_info.get('status'))
+             self.logger.debug('FOS status info {}'.format(vm_info))
+             self.logger.debug('FOS status is {} <-> OSM Status {}'.format(vm_info.get('status'), osm_status))
+             info.update({'status':osm_status})
+             if vm_info.get('status') == 'ERROR':
+                 info.update({'error_msg':vm_info.get('error_code')})
+             info.update({'vim_info':yaml.safe_dump(vm_info)})
+             faces = []
+             i = 0
+             for intf_name in vm_info.get('hypervisor_info').get('network',[]):
+                 intf_info = vm_info.get('hypervisor_info').get('network').get(intf_name)
+                 face = {}
+                 face['compute_node'] = nid
+                 face['vim_info'] = yaml.safe_dump(intf_info)
+                 face['mac_address'] = intf_info.get('hwaddr')
+                 addrs = []
+                 for a in intf_info.get('addresses'):
+                     addrs.append(a.get('address'))
+                 if len(addrs) >= 0:
+                     face['ip_address'] = ','.join(addrs)
+                 else:
+                     face['ip_address'] = ''
+                 face['pci'] = '0:0:0.0'
+                 # getting net id by CP
+                 try:
+                     cp_info = vm_info.get('connection_points')[i]
+                 except IndexError:
+                     cp_info = None
+                 if cp_info is not None:
+                     cp_id = cp_info['cp_uuid']
+                     cps_d = desc['connection_points']
+                     matches = [x for x in cps_d if x['uuid'] == cp_id]
+                     if len(matches) > 0:
+                         cpd = matches[0]
+                         face['vim_net_id'] = cpd.get('pair_id','')
+                     else:
+                         face['vim_net_id'] = ''
+                     face['vim_interface_id'] = cp_id
+                     # cp_info.get('uuid')
+                 else:
+                     face['vim_net_id'] = ''
+                     face['vim_interface_id'] = intf_name
+                 faces.append(face)
+                 i += 1
+             info.update({'interfaces':faces})
+             r.update({vm:info})
+             self.logger.debug('FOS refresh_vms_status res for {} is {}'.format(vm, info))
+         self.logger.debug('FOS refresh_vms_status res is {}'.format(r))
+         return r
+         #raise vimconnNotImplemented( "Should have implemented this" )
+     def action_vminstance(self, vm_id, action_dict, created_items={}):
+         """
+         Send and action over a VM instance. Returns created_items if the action was successfully sent to the VIM.
+         created_items is a dictionary with items that
+         :param vm_id: VIM identifier of the VM, provided by method new_vminstance
+         :param action_dict: dictionary with the action to perform
+         :param created_items: provided by method new_vminstance is a dictionary with key-values that will be passed to
+             the method delete_vminstance. Can be used to store created ports, volumes, etc. Format is vimconnector
+             dependent, but do not use nested dictionaries and a value of None should be the same as not present. This
+             method can modify this value
+         :return: None, or a console dict
+         """
+         self.logger.debug('VIM action_vminstance with args: {}'.format(locals()))
+         nid = self.fdu_node_map.get(vm_id)
+         if nid is None:
+             raise vimconn.vimconnNotFoundException('No node for this VM')
+         try:
+             fdu_info = self.fos_api.fdu.instance_info(vm_id)
+             if "start" in action_dict:
+                 if fdu_info.get('status') == 'CONFIGURE':
+                     self.fos_api.fdu.start(vm_id)
+                 elif fdu_info.get('status') == 'PAUSE':
+                     self.fos_api.fdu.resume(vm_id)
+                 else:
+                     raise vimconn.vimconnConflictException("Cannot start from this state")
+             elif "pause" in action_dict:
+                 if fdu_info.get('status') == 'RUN':
+                     self.fos_api.fdu.pause(vm_id)
+                 else:
+                     raise vimconn.vimconnConflictException("Cannot pause from this state")
+             elif "resume" in action_dict:
+                 if fdu_info.get('status') == 'PAUSE':
+                     self.fos_api.fdu.resume(vm_id)
+                 else:
+                     raise vimconn.vimconnConflictException("Cannot resume from this state")
+             elif "shutoff" in action_dict or "shutdown" or "forceOff" in action_dict:
+                 if fdu_info.get('status') == 'RUN':
+                     self.fos_api.fdu.stop(vm_id)
+                 else:
+                     raise vimconn.vimconnConflictException("Cannot shutoff from this state")
+             elif "terminate" in action_dict:
+                 if fdu_info.get('status') == 'RUN':
+                     self.fos_api.fdu.stop(vm_id)
+                     self.fos_api.fdu.clean(vm_id)
+                     self.fos_api.fdu.undefine(vm_id)
+                     # self.fos_api.fdu.offload(vm_id)
+                 elif fdu_info.get('status') == 'CONFIGURE':
+                     self.fos_api.fdu.clean(vm_id)
+                     self.fos_api.fdu.undefine(vm_id)
+                     # self.fos_api.fdu.offload(vm_id)
+                 elif fdu_info.get('status') == 'PAUSE':
+                     self.fos_api.fdu.resume(vm_id)
+                     self.fos_api.fdu.stop(vm_id)
+                     self.fos_api.fdu.clean(vm_id)
+                     self.fos_api.fdu.undefine(vm_id)
+                     # self.fos_api.fdu.offload(vm_id)
+                 else:
+                     raise vimconn.vimconnConflictException("Cannot terminate from this state")
+             elif "rebuild" in action_dict:
+                 raise vimconnNotImplemented("Rebuild not implememnted")
+             elif "reboot" in action_dict:
+                 if fdu_info.get('status') == 'RUN':
+                     self.fos_api.fdu.stop(vm_id)
+                     self.fos_api.fdu.start(vm_id)
+                 else:
+                     raise vimconn.vimconnConflictException("Cannot reboot from this state")
+         except Exception as e:
+             raise vimconn.vimconnConnectionException("VIM not reachable. Error {}".format(e))
index 0000000,6702506..0164a30
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,20 +1,20 @@@
 -git+https://osm.etsi.org/gerrit/osm/RO.git@py3#egg=osm-ro&subdirectory=RO
+ ##
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+ #
+ #    http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ # implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ ##
+ PyYAML
+ requests
+ netaddr
+ fog05rest>=0.0.4
++git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
index 0000000,440b1cb..d788dcb
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,684 +1,687 @@@
 -    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):  # , **vim_specific):
+ # -*- coding: utf-8 -*-
+ ##
+ # Copyright 2017  Telefonica Digital Spain S.L.U.
+ # This file is part of ETSI OSM
+ #  All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: patent-office@telefonica.com
+ ##
+ """
+ vimconnector implements all the methods to interact with OpenNebula using the XML-RPC API.
+ """
+ __author__ = "Jose Maria Carmona Perez,Juan Antonio Hernando Labajo, Emilio Abraham Garrido Garcia,Alberto Florez " \
+              "Pages, Andres Pozo Munoz, Santiago Perez Marin, Onlife Networks Telefonica I+D Product Innovation "
+ __date__ = "$13-dec-2017 11:09:29$"
+ from osm_ro import vimconn
+ import requests
+ import logging
+ import oca
+ import untangle
+ import math
+ import random
+ import pyone
+ class vimconnector(vimconn.vimconnector):
+     def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
+                  log_level="DEBUG", config={}, persistent_info={}):
+         """Constructor of VIM
+         Params:
+             'uuid': id asigned to this VIM
+             'name': name assigned to this VIM, can be used for logging
+             'tenant_id', 'tenant_name': (only one of them is mandatory) VIM tenant to be used
+             'url_admin': (optional), url used for administrative tasks
+             'user', 'passwd': credentials of the VIM user
+             'log_level': provider if it should use a different log_level than the general one
+             'config': dictionary with extra VIM information. This contains a consolidate version of general VIM config
+                     at creation and particular VIM config at teh attachment
+             'persistent_info': dict where the class can store information that will be available among class
+                     destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
+                     empty dict. Useful to store login/tokens information for speed up communication
+         Returns: Raise an exception is some needed parameter is missing, but it must not do any connectivity
+             check against the VIM
+         """
+         vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+                                       config)
+     def _new_one_connection(self):
+         return pyone.OneServer(self.url, session=self.user + ':' + self.passwd)
+     def new_tenant(self, tenant_name, tenant_description):
+         # '''Adds a new tenant to VIM with this name and description, returns the tenant identifier'''
+         try:
+             client = oca.Client(self.user + ':' + self.passwd, self.url)
+             group_list = oca.GroupPool(client)
+             user_list = oca.UserPool(client)
+             group_list.info()
+             user_list.info()
+             create_primarygroup = 1
+             # create group-tenant
+             for group in group_list:
+                 if str(group.name) == str(tenant_name):
+                     create_primarygroup = 0
+                     break
+             if create_primarygroup == 1:
+                 oca.Group.allocate(client, tenant_name)
+             group_list.info()
+             # set to primary_group the tenant_group and oneadmin to secondary_group
+             for group in group_list:
+                 if str(group.name) == str(tenant_name):
+                     for user in user_list:
+                         if str(user.name) == str(self.user):
+                             if user.name == "oneadmin":
+                                 return str(0)
+                             else:
+                                 self._add_secondarygroup(user.id, group.id)
+                                 user.chgrp(group.id)
+                                 return str(group.id)
+         except Exception as e:
+             self.logger.error("Create new tenant error: " + str(e))
+             raise vimconn.vimconnException(e)
+     def delete_tenant(self, tenant_id):
+         """Delete a tenant from VIM. Returns the old tenant identifier"""
+         try:
+             client = oca.Client(self.user + ':' + self.passwd, self.url)
+             group_list = oca.GroupPool(client)
+             user_list = oca.UserPool(client)
+             group_list.info()
+             user_list.info()
+             for group in group_list:
+                 if str(group.id) == str(tenant_id):
+                     for user in user_list:
+                         if str(user.name) == str(self.user):
+                             self._delete_secondarygroup(user.id, group.id)
+                             group.delete(client)
+                     return None
+             raise vimconn.vimconnNotFoundException("Group {} not found".format(tenant_id))
+         except Exception as e:
+             self.logger.error("Delete tenant " + str(tenant_id) + " error: " + str(e))
+             raise vimconn.vimconnException(e)
+     def _add_secondarygroup(self, id_user, id_group):
+         # change secondary_group to primary_group
+         params = '<?xml version="1.0"?> \
+                    <methodCall>\
+                    <methodName>one.user.addgroup</methodName>\
+                    <params>\
+                    <param>\
+                    <value><string>{}:{}</string></value>\
+                    </param>\
+                    <param>\
+                    <value><int>{}</int></value>\
+                    </param>\
+                    <param>\
+                    <value><int>{}</int></value>\
+                    </param>\
+                    </params>\
+                    </methodCall>'.format(self.user, self.passwd, (str(id_user)), (str(id_group)))
+         requests.post(self.url, params)
+     def _delete_secondarygroup(self, id_user, id_group):
+         params = '<?xml version="1.0"?> \
+                    <methodCall>\
+                    <methodName>one.user.delgroup</methodName>\
+                    <params>\
+                    <param>\
+                    <value><string>{}:{}</string></value>\
+                    </param>\
+                    <param>\
+                    <value><int>{}</int></value>\
+                    </param>\
+                    <param>\
+                    <value><int>{}</int></value>\
+                    </param>\
+                    </params>\
+                    </methodCall>'.format(self.user, self.passwd, (str(id_user)), (str(id_group)))
+         requests.post(self.url, params)
 -            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
++    def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):  # , **vim_specific):
+         """Adds a tenant network to VIM
+         Params:
+             'net_name': name of the network
+             'net_type': one of:
+                 'bridge': overlay isolated network
+                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+             'ip_profile': is a dict containing the IP parameters of the network
+                 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                 'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                 'dhcp_enabled': True or False
+                 'dhcp_start_address': ip_schema, first IP to grant
+                 'dhcp_count': number of IPs to grant.
+             'shared': if this network can be seen/use by other tenants/organization
++            'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
+         Returns a tuple with the network identifier and created_items, or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
+         # oca library method cannot be used in this case (problem with cluster parameters)
+         try:
++            vlan = None
++            if provider_network_profile:
++                vlan = provider_network_profile.get("segmentation-id")
+             created_items = {}
+             one = self._new_one_connection()
+             size = "254"
+             if ip_profile is None:
+                 subnet_rand = random.randint(0, 255)
+                 ip_start = "192.168.{}.1".format(subnet_rand)
+             else:
+                 index = ip_profile["subnet_address"].find("/")
+                 ip_start = ip_profile["subnet_address"][:index]
+                 if "dhcp_count" in ip_profile and ip_profile["dhcp_count"] is not None:
+                     size = str(ip_profile["dhcp_count"])
+                 elif "dhcp_count" not in ip_profile and ip_profile["ip_version"] == "IPv4":
+                     prefix = ip_profile["subnet_address"][index + 1:]
+                     size = int(math.pow(2, 32 - prefix))
+                 if "dhcp_start_address" in ip_profile and ip_profile["dhcp_start_address"] is not None:
+                     ip_start = str(ip_profile["dhcp_start_address"])
+                 if ip_profile["ip_version"] == "IPv6":
+                     ip_prefix_type = "GLOBAL_PREFIX"
+             if vlan is not None:
+                 vlan_id = vlan
+             else:
+                 vlan_id = str(random.randint(100, 4095))
+             #if "internal" in net_name:
+             # OpenNebula not support two networks with same name
+             random_net_name = str(random.randint(1, 1000000))
+             net_name = net_name + random_net_name
+             net_id = one.vn.allocate({
+                         'NAME': net_name,
+                         'VN_MAD': '802.1Q',
+                         'PHYDEV': self.config["network"]["phydev"],
+                         'VLAN_ID': vlan_id
+                     }, self.config["cluster"]["id"])
+             arpool = {'AR_POOL': {
+                         'AR': {
+                             'TYPE': 'IP4',
+                             'IP': ip_start,
+                             'SIZE': size
+                         }
+                     }
+             }
+             one.vn.add_ar(net_id, arpool)
+             return net_id, created_items
+         except Exception as e:
+             self.logger.error("Create new network error: " + str(e))
+             raise vimconn.vimconnException(e)
+     def get_network_list(self, filter_dict={}):
+         """Obtain tenant networks of VIM
+         Params:
+             'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
+                 name: string  => returns only networks with this name
+                 id:   string  => returns networks with this VIM id, this imply returns one network at most
+                 shared: boolean >= returns only networks that are (or are not) shared
+                 tenant_id: sting => returns only networks that belong to this tenant/project
+                 ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
+                 #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
+         Returns the network list of dictionaries. each dictionary contains:
+             'id': (mandatory) VIM network id
+             'name': (mandatory) VIM network name
+             'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'
+             'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
+             authorization, or some other unspecific error
+         """
+         try:
+             one = self._new_one_connection()
+             net_pool = one.vnpool.info(-2, -1, -1).VNET
+             response = []
+             if "name" in filter_dict:
+                 network_name_filter = filter_dict["name"]
+             else:
+                 network_name_filter = None
+             if "id" in filter_dict:
+                 network_id_filter = filter_dict["id"]
+             else:
+                 network_id_filter = None
+             for network in net_pool:
+                 if network.NAME == network_name_filter or str(network.ID) == str(network_id_filter):
+                     net_dict = {"name": network.NAME, "id": str(network.ID), "status": "ACTIVE"}
+                     response.append(net_dict)
+             return response
+         except Exception as e:
+             self.logger.error("Get network list error: " + str(e))
+             raise vimconn.vimconnException(e)
+     def get_network(self, net_id):
+         """Obtain network details from the 'net_id' VIM network
+         Return a dict that contains:
+             'id': (mandatory) VIM network id, that is, net_id
+             'name': (mandatory) VIM network name
+             'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         Raises an exception upon error or when network is not found
+         """
+         try:
+             one = self._new_one_connection()
+             net_pool = one.vnpool.info(-2, -1, -1).VNET
+             net = {}
+             for network in net_pool:
+                 if str(network.ID) == str(net_id):
+                     net['id'] = network.ID
+                     net['name'] = network.NAME
+                     net['status'] = "ACTIVE"
+                     break
+             if net:
+                 return net
+             else:
+                 raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
+         except Exception as e:
+             self.logger.error("Get network " + str(net_id) + " error): " + str(e))
+             raise vimconn.vimconnException(e)
+     def delete_network(self, net_id, created_items=None):
+         """
+         Removes a tenant network from VIM and its associated elements
+         :param net_id: VIM identifier of the network, provided by method new_network
+         :param created_items: dictionary with extra items to be deleted. provided by method new_network
+         Returns the network identifier or raises an exception upon error or when network is not found
+         """
+         try:
+             one = self._new_one_connection()
+             one.vn.delete(int(net_id))
+             return net_id
+         except Exception as e:
+             self.logger.error("Delete network " + str(net_id) + "error: network not found" + str(e))
+             raise vimconn.vimconnException(e)
+     def refresh_nets_status(self, net_list):
+         """Get the status of the networks
+         Params:
+             'net_list': a list with the VIM network id to be get the status
+         Returns a dictionary with:
+             'net_id':         #VIM id of this network
+                 status:     #Mandatory. Text with one of:
+                     #  DELETED (not found at vim)
+                     #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
+                     #  OTHER (Vim reported other status not understood)
+                     #  ERROR (VIM indicates an ERROR status)
+                     #  ACTIVE, INACTIVE, DOWN (admin down),
+                     #  BUILD (on building process)
+                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+             'net_id2': ...
+         """
+         net_dict = {}
+         try:
+             for net_id in net_list:
+                 net = {}
+                 try:
+                     net_vim = self.get_network(net_id)
+                     net["status"] = net_vim["status"]
+                     net["vim_info"] = None
+                 except vimconn.vimconnNotFoundException as e:
+                     self.logger.error("Exception getting net status: {}".format(str(e)))
+                     net['status'] = "DELETED"
+                     net['error_msg'] = str(e)
+                 except vimconn.vimconnException as e:
+                     self.logger.error(e)
+                     net["status"] = "VIM_ERROR"
+                     net["error_msg"] = str(e)
+                 net_dict[net_id] = net
+             return net_dict
+         except vimconn.vimconnException as e:
+             self.logger.error(e)
+             for k in net_dict:
+                 net_dict[k]["status"] = "VIM_ERROR"
+                 net_dict[k]["error_msg"] = str(e)
+             return net_dict
+     def get_flavor(self, flavor_id):  # Esta correcto
+         """Obtain flavor details from the VIM
+         Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
+         Raises an exception upon error or if not found
+         """
+         try:
+             one = self._new_one_connection()
+             template = one.template.info(int(flavor_id))
+             if template is not None:
+                 return {'id': template.ID, 'name': template.NAME}
+             raise vimconn.vimconnNotFoundException("Flavor {} not found".format(flavor_id))
+         except Exception as e:
+             self.logger.error("get flavor " + str(flavor_id) + " error: " + str(e))
+             raise vimconn.vimconnException(e)
+     def new_flavor(self, flavor_data):
+         """Adds a tenant flavor to VIM
+             flavor_data contains a dictionary with information, keys:
+                 name: flavor name
+                 ram: memory (cloud type) in MBytes
+                 vpcus: cpus (cloud type)
+                 extended: EPA parameters
+                   - numas: #items requested in same NUMA
+                         memory: number of 1G huge pages memory
+                         paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+                         interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+                           - name: interface name
+                             dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
+                             bandwidth: X Gbps; requested guarantee bandwidth
+                             vpci: requested virtual PCI address
+                 disk: disk size
+                 is_public:
+                  #TODO to concrete
+         Returns the flavor identifier"""
+         disk_size = str(int(flavor_data["disk"])*1024)
+         try:
+             one = self._new_one_connection()
+             template_id = one.template.allocate({
+                 'TEMPLATE': {
+                     'NAME': flavor_data["name"],
+                     'CPU': flavor_data["vcpus"],
+                     'VCPU': flavor_data["vcpus"],
+                     'MEMORY': flavor_data["ram"],
+                     'DISK': {
+                         'SIZE': disk_size
+                     },
+                     'CONTEXT': {
+                         'NETWORK': "YES",
+                         'SSH_PUBLIC_KEY': '$USER[SSH_PUBLIC_KEY]'
+                     },
+                     'GRAPHICS': {
+                         'LISTEN': '0.0.0.0',
+                         'TYPE': 'VNC'
+                     },
+                     'CLUSTER_ID': self.config["cluster"]["id"]
+                 }
+             })
+             return template_id
+         except Exception as e:
+             self.logger.error("Create new flavor error: " + str(e))
+             raise vimconn.vimconnException(e)
+     def delete_flavor(self, flavor_id):
+         """ Deletes a tenant flavor from VIM
+             Returns the old flavor_id
+         """
+         try:
+             one = self._new_one_connection()
+             one.template.delete(int(flavor_id), False)
+             return flavor_id
+         except Exception as e:
+             self.logger.error("Error deleting flavor " + str(flavor_id) + ". Flavor not found")
+             raise vimconn.vimconnException(e)
+     def get_image_list(self, filter_dict={}):
+         """Obtain tenant images from VIM
+         Filter_dict can be:
+             name: image name
+             id: image uuid
+             checksum: image checksum
+             location: image path
+         Returns the image list of dictionaries:
+             [{<the fields at Filter_dict plus some VIM specific>}, ...]
+             List can be empty
+         """
+         try:
+             one = self._new_one_connection()
+             image_pool = one.imagepool.info(-2, -1, -1).IMAGE
+             images = []
+             if "name" in filter_dict:
+                 image_name_filter = filter_dict["name"]
+             else:
+                 image_name_filter = None
+             if "id" in filter_dict:
+                 image_id_filter = filter_dict["id"]
+             else:
+                 image_id_filter = None
+             for image in image_pool:
+                 if str(image_name_filter) == str(image.NAME) or str(image.ID) == str(image_id_filter):
+                     images_dict = {"name": image.NAME, "id": str(image.ID)}
+                     images.append(images_dict)
+             return images
+         except Exception as e:
+             self.logger.error("Get image list error: " + str(e))
+             raise vimconn.vimconnException(e)
+     def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+                        availability_zone_index=None, availability_zone_list=None):
+         """Adds a VM instance to VIM
+             Params:
+                 'start': (boolean) indicates if VM must start or created in pause mode.
+                 'image_id','flavor_id': image and flavor VIM id to use for the VM
+                 'net_list': list of interfaces, each one is a dictionary with:
+                     'name': (optional) name for the interface.
+                     'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
+                     'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                     'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                     'mac_address': (optional) mac address to assign to this interface
+                     'ip_address': (optional) IP address to assign to this interface
+                     #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
+                         the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
+                     'type': (mandatory) can be one of:
+                         'virtual', in this case always connected to a network of type 'net_type=bridge'
+                         'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                             can created unconnected
+                         'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                         'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                                 are allocated on the same physical NIC
+                     'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                     'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
+                                     or True, it must apply the default VIM behaviour
+                     After execution the method will add the key:
+                     'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
+                             interface. 'net_list' is modified
+                 'cloud_config': (optional) dictionary with:
+                     'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                     'users': (optional) list of users to be inserted, each item is a dict with:
+                         'name': (mandatory) user name,
+                         'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                     'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                         or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                     'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                         'dest': (mandatory) string with the destination absolute path
+                         'encoding': (optional, by default text). Can be one of:
+                             'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                         'content' (mandatory): string with the content of the file
+                         'permissions': (optional) string with file permissions, typically octal notation '0644'
+                         'owner': (optional) file owner, string with the format 'owner:group'
+                     'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+                 'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                     'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                     'size': (mandatory) string with the size of the disk in GB
+                 availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+                 availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                     availability_zone_index is None
+             Returns a tuple with the instance identifier and created_items or raises an exception on error
+                 created_items can be None or a dictionary where this method can include key-values that will be passed to
+                 the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+                 Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+                 as not present.
+             """
+         self.logger.debug(
+             "new_vminstance input: image='{}' flavor='{}' nics='{}'".format(image_id, flavor_id, str(net_list)))
+         try:
+             one = self._new_one_connection()
+             template_vim = one.template.info(int(flavor_id), True)
+             disk_size = str(template_vim.TEMPLATE["DISK"]["SIZE"])
+             one = self._new_one_connection()
+             template_updated = ""
+             for net in net_list:
+                 net_in_vim = one.vn.info(int(net["net_id"]))
+                 net["vim_id"] = str(net_in_vim.ID)
+                 network = 'NIC = [NETWORK = "{}",NETWORK_UNAME = "{}" ]'.format(
+                     net_in_vim.NAME, net_in_vim.UNAME)
+                 template_updated += network
+             template_updated += "DISK = [ IMAGE_ID = {},\n  SIZE = {}]".format(image_id, disk_size)
+             if isinstance(cloud_config, dict):
+                 if cloud_config.get("key-pairs"):
+                     context = 'CONTEXT = [\n  NETWORK = "YES",\n  SSH_PUBLIC_KEY = "'
+                     for key in cloud_config["key-pairs"]:
+                         context += key + '\n'
+                     # if False:
+                     #     context += '"\n  USERNAME = '
+                     context += '"]'
+                     template_updated += context
+             vm_instance_id = one.template.instantiate(int(flavor_id), name, False, template_updated)
+             self.logger.info(
+                 "Instanciating in OpenNebula a new VM name:{} id:{}".format(name, flavor_id))
+             return str(vm_instance_id), None
+         except pyone.OneNoExistsException as e:
+             self.logger.error("Network with id " + str(e) + " not found: " + str(e))
+             raise vimconn.vimconnNotFoundException(e)
+         except Exception as e:
+             self.logger.error("Create new vm instance error: " + str(e))
+             raise vimconn.vimconnException(e)
+     def get_vminstance(self, vm_id):
+         """Returns the VM instance information from VIM"""
+         try:
+             one = self._new_one_connection()
+             vm = one.vm.info(int(vm_id))
+             return vm
+         except Exception as e:
+             self.logger.error("Getting vm instance error: " + str(e) + ": VM Instance not found")
+             raise vimconn.vimconnException(e)
+     def delete_vminstance(self, vm_id, created_items=None):
+         """
+         Removes a VM instance from VIM and its associated elements
+         :param vm_id: VIM identifier of the VM, provided by method new_vminstance
+         :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method
+             action_vminstance
+         :return: None or the same vm_id. Raises an exception on fail
+         """
+         try:
+             one = self._new_one_connection()
+             one.vm.recover(int(vm_id), 3)
+             vm = None
+             while True:
+                 if vm is not None and vm.LCM_STATE == 0:
+                     break
+                 else:
+                     vm = one.vm.info(int(vm_id))
+         except pyone.OneNoExistsException as e:
+             self.logger.info("The vm " + str(vm_id) + " does not exist or is already deleted")
+             raise vimconn.vimconnNotFoundException("The vm {} does not exist or is already deleted".format(vm_id))
+         except Exception as e:
+             self.logger.error("Delete vm instance " + str(vm_id) + " error: " + str(e))
+             raise vimconn.vimconnException(e)
+     def refresh_vms_status(self, vm_list):
+         """Get the status of the virtual machines and their interfaces/ports
+            Params: the list of VM identifiers
+            Returns a dictionary with:
+                 vm_id:          #VIM id of this Virtual Machine
+                     status:     #Mandatory. Text with one of:
+                                 #  DELETED (not found at vim)
+                                 #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                 #  OTHER (Vim reported other status not understood)
+                                 #  ERROR (VIM indicates an ERROR status)
+                                 #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                                 #  BUILD (on building process), ERROR
+                                 #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                 #
+                     error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                     interfaces: list with interface info. Each item a dictionary with:
+                         vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                         mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                         vim_net_id:       #network id where this interface is connected, if provided at creation
+                         vim_interface_id: #interface/port VIM id
+                         ip_address:       #null, or text with IPv4, IPv6 address
+                         compute_node:     #identification of compute node where PF,VF interface is allocated
+                         pci:              #PCI address of the NIC that hosts the PF,VF
+                         vlan:             #physical VLAN used for VF
+         """
+         vm_dict = {}
+         try:
+             for vm_id in vm_list:
+                 vm = {}
+                 if self.get_vminstance(vm_id) is not None:
+                     vm_element = self.get_vminstance(vm_id)
+                 else:
+                     self.logger.info("The vm " + str(vm_id) + " does not exist.")
+                     vm['status'] = "DELETED"
+                     vm['error_msg'] = ("The vm " + str(vm_id) + " does not exist.")
+                     continue
+                 vm["vim_info"] = None
+                 vm_status = vm_element.LCM_STATE
+                 if vm_status == 3:
+                     vm['status'] = "ACTIVE"
+                 elif vm_status == 36:
+                     vm['status'] = "ERROR"
+                     vm['error_msg'] = "VM failure"
+                 else:
+                     vm['status'] = "BUILD"
+                 if vm_element is not None:
+                     interfaces = self._get_networks_vm(vm_element)
+                     vm["interfaces"] = interfaces
+                 vm_dict[vm_id] = vm
+             return vm_dict
+         except Exception as e:
+             self.logger.error(e)
+             for k in vm_dict:
+                 vm_dict[k]["status"] = "VIM_ERROR"
+                 vm_dict[k]["error_msg"] = str(e)
+             return vm_dict
+     def _get_networks_vm(self, vm_element):
+         interfaces = []
+         try:
+             if isinstance(vm_element.TEMPLATE["NIC"], list):
+                 for net in vm_element.TEMPLATE["NIC"]:
+                     interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]),
+                                  "vim_interface_id": str(net["NETWORK_ID"])}
+                     # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6
+                     if u'IP' in net:
+                         interface["ip_address"] = str(net["IP"])
+                     if u'IP6_GLOBAL' in net:
+                         interface["ip_address"] = str(net["IP6_GLOBAL"])
+                     interfaces.append(interface)
+             else:
+                 net = vm_element.TEMPLATE["NIC"]
+                 interface = {'vim_info': None, "mac_address": str(net["MAC"]), "vim_net_id": str(net["NETWORK_ID"]),
+                              "vim_interface_id": str(net["NETWORK_ID"])}
+                 # maybe it should be 2 different keys for ip_address if an interface has ipv4 and ipv6
+                 if u'IP' in net:
+                     interface["ip_address"] = str(net["IP"])
+                 if u'IP6_GLOBAL' in net:
+                     interface["ip_address"] = str(net["IP6_GLOBAL"])
+                 interfaces.append(interface)
+             return interfaces
+         except Exception as e:
+             self.logger.error("Error getting vm interface_information of vm_id: " + str(vm_element.ID))
index 0000000,ba6b655..71b09d8
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,23 +1,23 @@@
 -git+https://osm.etsi.org/gerrit/osm/RO.git@py3#egg=osm-ro&subdirectory=RO
+ ##
+ # Copyright 2017  Telefonica Digital Spain S.L.U.
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+ #
+ #    http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ # implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ ##
+ PyYAML
+ requests
+ netaddr
+ untangle
+ pyone
+ git+https://github.com/python-oca/python-oca#egg=oca
++git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
index 0000000,1c2b072..15ef713
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,2223 +1,2227 @@@
 -    def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None):
+ # -*- coding: utf-8 -*-
+ ##
+ # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ ##
+ '''
+ osconnector implements all the methods to interact with openstack using the python-neutronclient.
+ For the VNF forwarding graph, The OpenStack VIM connector calls the
+ networking-sfc Neutron extension methods, whose resources are mapped
+ to the VIM connector's SFC resources as follows:
+ - Classification (OSM) -> Flow Classifier (Neutron)
+ - Service Function Instance (OSM) -> Port Pair (Neutron)
+ - Service Function (OSM) -> Port Pair Group (Neutron)
+ - Service Function Path (OSM) -> Port Chain (Neutron)
+ '''
+ __author__ = "Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research, Igor D.C., Eduardo Sousa"
+ __date__  = "$22-sep-2017 23:59:59$"
+ from osm_ro import vimconn
+ # import json
+ import logging
+ import netaddr
+ import time
+ import yaml
+ import random
+ import re
+ import copy
+ from pprint import pformat
+ from novaclient import client as nClient, exceptions as nvExceptions
+ from keystoneauth1.identity import v2, v3
+ from keystoneauth1 import session
+ import keystoneclient.exceptions as ksExceptions
+ import keystoneclient.v3.client as ksClient_v3
+ import keystoneclient.v2_0.client as ksClient_v2
+ from glanceclient import client as glClient
+ import glanceclient.exc as gl1Exceptions
+ from  cinderclient import client as cClient
+ from http.client  import HTTPException   # TODO py3 check that this base exception matches python2 httplib.HTTPException
+ from neutronclient.neutron import client as neClient
+ from neutronclient.common import exceptions as neExceptions
+ from requests.exceptions import ConnectionError
+ """contain the openstack virtual machine status to openmano status"""
+ vmStatus2manoFormat={'ACTIVE':'ACTIVE',
+                      'PAUSED':'PAUSED',
+                      'SUSPENDED': 'SUSPENDED',
+                      'SHUTOFF':'INACTIVE',
+                      'BUILD':'BUILD',
+                      'ERROR':'ERROR','DELETED':'DELETED'
+                      }
+ netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED'
+                      }
+ supportedClassificationTypes = ['legacy_flow_classifier']
+ #global var to have a timeout creating and deleting volumes
+ volume_timeout = 600
+ server_timeout = 600
+ class SafeDumper(yaml.SafeDumper):
+     def represent_data(self, data):
+         # Openstack APIs use custom subclasses of dict and YAML safe dumper
+         # is designed to not handle that (reference issue 142 of pyyaml)
+         if isinstance(data, dict) and data.__class__ != dict:
+             # A simple solution is to convert those items back to dicts
+             data = dict(data.items())
+         return super(SafeDumper, self).represent_data(data)
+ class vimconnector(vimconn.vimconnector):
+     def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
+                  log_level=None, config={}, persistent_info={}):
+         '''using common constructor parameters. In this case
+         'url' is the keystone authorization url,
+         'url_admin' is not use
+         '''
+         api_version = config.get('APIversion')
+         if api_version and api_version not in ('v3.3', 'v2.0', '2', '3'):
+             raise vimconn.vimconnException("Invalid value '{}' for config:APIversion. "
+                                            "Allowed values are 'v3.3', 'v2.0', '2' or '3'".format(api_version))
+         vim_type = config.get('vim_type')
+         if vim_type and vim_type not in ('vio', 'VIO'):
+             raise vimconn.vimconnException("Invalid value '{}' for config:vim_type."
+                             "Allowed values are 'vio' or 'VIO'".format(vim_type))
+         if config.get('dataplane_net_vlan_range') is not None:
+             #validate vlan ranges provided by user
+             self._validate_vlan_ranges(config.get('dataplane_net_vlan_range'), 'dataplane_net_vlan_range')
+         if config.get('multisegment_vlan_range') is not None:
+             #validate vlan ranges provided by user
+             self._validate_vlan_ranges(config.get('multisegment_vlan_range'), 'multisegment_vlan_range')
+         vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level,
+                                       config)
+         if self.config.get("insecure") and self.config.get("ca_cert"):
+             raise vimconn.vimconnException("options insecure and ca_cert are mutually exclusive")
+         self.verify = True
+         if self.config.get("insecure"):
+             self.verify = False
+         if self.config.get("ca_cert"):
+             self.verify = self.config.get("ca_cert")
+         if not url:
+             raise TypeError('url param can not be NoneType')
+         self.persistent_info = persistent_info
+         self.availability_zone = persistent_info.get('availability_zone', None)
+         self.session = persistent_info.get('session', {'reload_client': True})
+         self.my_tenant_id = self.session.get('my_tenant_id')
+         self.nova = self.session.get('nova')
+         self.neutron = self.session.get('neutron')
+         self.cinder = self.session.get('cinder')
+         self.glance = self.session.get('glance')
+         # self.glancev1 = self.session.get('glancev1')
+         self.keystone = self.session.get('keystone')
+         self.api_version3 = self.session.get('api_version3')
+         self.vim_type = self.config.get("vim_type")
+         if self.vim_type:
+             self.vim_type = self.vim_type.upper()
+         if self.config.get("use_internal_endpoint"):
+             self.endpoint_type = "internalURL"
+         else:
+             self.endpoint_type = None
+         self.logger = logging.getLogger('openmano.vim.openstack')
+         # allow security_groups to be a list or a single string
+         if isinstance(self.config.get('security_groups'), str):
+             self.config['security_groups'] = [self.config['security_groups']]
+         self.security_groups_id = None
+         ####### VIO Specific Changes #########
+         if self.vim_type == "VIO":
+             self.logger = logging.getLogger('openmano.vim.vio')
+         if log_level:
+             self.logger.setLevel( getattr(logging, log_level))
+     def __getitem__(self, index):
+         """Get individuals parameters.
+         Throw KeyError"""
+         if index == 'project_domain_id':
+             return self.config.get("project_domain_id")
+         elif index == 'user_domain_id':
+             return self.config.get("user_domain_id")
+         else:
+             return vimconn.vimconnector.__getitem__(self, index)
+     def __setitem__(self, index, value):
+         """Set individuals parameters and it is marked as dirty so to force connection reload.
+         Throw KeyError"""
+         if index == 'project_domain_id':
+             self.config["project_domain_id"] = value
+         elif index == 'user_domain_id':
+                 self.config["user_domain_id"] = value
+         else:
+             vimconn.vimconnector.__setitem__(self, index, value)
+         self.session['reload_client'] = True
+     def serialize(self, value):
+         """Serialization of python basic types.
+         In the case value is not serializable a message will be logged and a
+         simple representation of the data that cannot be converted back to
+         python is returned.
+         """
+         if isinstance(value, str):
+             return value
+         try:
+             return yaml.dump(value, Dumper=SafeDumper,
+                              default_flow_style=True, width=256)
+         except yaml.representer.RepresenterError:
+                 self.logger.debug('The following entity cannot be serialized in YAML:\n\n%s\n\n', pformat(value),
+                                   exc_info=True)
+                 return str(value)
+     def _reload_connection(self):
+         '''Called before any operation, it check if credentials has changed
+         Throw keystoneclient.apiclient.exceptions.AuthorizationFailure
+         '''
+         #TODO control the timing and possible token timeout, but it seams that python client does this task for us :-)
+         if self.session['reload_client']:
+             if self.config.get('APIversion'):
+                 self.api_version3 = self.config['APIversion'] == 'v3.3' or self.config['APIversion'] == '3'
+             else:  # get from ending auth_url that end with v3 or with v2.0
+                 self.api_version3 =  self.url.endswith("/v3") or self.url.endswith("/v3/")
+             self.session['api_version3'] = self.api_version3
+             if self.api_version3:
+                 if self.config.get('project_domain_id') or self.config.get('project_domain_name'):
+                     project_domain_id_default = None
+                 else:
+                     project_domain_id_default = 'default'
+                 if self.config.get('user_domain_id') or self.config.get('user_domain_name'):
+                     user_domain_id_default = None
+                 else:
+                     user_domain_id_default = 'default'
+                 auth = v3.Password(auth_url=self.url,
+                                    username=self.user,
+                                    password=self.passwd,
+                                    project_name=self.tenant_name,
+                                    project_id=self.tenant_id,
+                                    project_domain_id=self.config.get('project_domain_id', project_domain_id_default),
+                                    user_domain_id=self.config.get('user_domain_id', user_domain_id_default),
+                                    project_domain_name=self.config.get('project_domain_name'),
+                                    user_domain_name=self.config.get('user_domain_name'))
+             else:
+                 auth = v2.Password(auth_url=self.url,
+                                    username=self.user,
+                                    password=self.passwd,
+                                    tenant_name=self.tenant_name,
+                                    tenant_id=self.tenant_id)
+             sess = session.Session(auth=auth, verify=self.verify)
+             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
+             region_name = self.config.get('region_name')
+             if self.api_version3:
+                 self.keystone = ksClient_v3.Client(session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+             else:
+                 self.keystone = ksClient_v2.Client(session=sess, endpoint_type=self.endpoint_type)
+             self.session['keystone'] = self.keystone
+             # In order to enable microversion functionality an explicit microversion must be specified in 'config'.
+             # This implementation approach is due to the warning message in
+             # https://developer.openstack.org/api-guide/compute/microversions.html
+             # where it is stated that microversion backwards compatibility is not guaranteed and clients should
+             # always require an specific microversion.
+             # To be able to use 'device role tagging' functionality define 'microversion: 2.32' in datacenter config
+             version = self.config.get("microversion")
+             if not version:
+                 version = "2.1"
+             # addedd region_name to keystone, nova, neutron and cinder to support distributed cloud for Wind River Titanium cloud and StarlingX
+             self.nova = self.session['nova'] = nClient.Client(str(version), session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+             self.neutron = self.session['neutron'] = neClient.Client('2.0', session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+             self.cinder = self.session['cinder'] = cClient.Client(2, session=sess, endpoint_type=self.endpoint_type, region_name=region_name)
+             try:
+                 self.my_tenant_id = self.session['my_tenant_id'] = sess.get_project_id()
+             except Exception as e:
+                 self.logger.error("Cannot get project_id from session", exc_info=True)
+             if self.endpoint_type == "internalURL":
+                 glance_service_id = self.keystone.services.list(name="glance")[0].id
+                 glance_endpoint = self.keystone.endpoints.list(glance_service_id, interface="internal")[0].url
+             else:
+                 glance_endpoint = None
+             self.glance = self.session['glance'] = glClient.Client(2, session=sess, endpoint=glance_endpoint)
+             # using version 1 of glance client in new_image()
+             # self.glancev1 = self.session['glancev1'] = glClient.Client('1', session=sess,
+             #                                                            endpoint=glance_endpoint)
+             self.session['reload_client'] = False
+             self.persistent_info['session'] = self.session
+             # add availablity zone info inside  self.persistent_info
+             self._set_availablity_zones()
+             self.persistent_info['availability_zone'] = self.availability_zone
+             self.security_groups_id = None  # force to get again security_groups_ids next time they are needed
+     def __net_os2mano(self, net_list_dict):
+         '''Transform the net openstack format to mano format
+         net_list_dict can be a list of dict or a single dict'''
+         if type(net_list_dict) is dict:
+             net_list_=(net_list_dict,)
+         elif type(net_list_dict) is list:
+             net_list_=net_list_dict
+         else:
+             raise TypeError("param net_list_dict must be a list or a dictionary")
+         for net in net_list_:
+             if net.get('provider:network_type') == "vlan":
+                 net['type']='data'
+             else:
+                 net['type']='bridge'
+     def __classification_os2mano(self, class_list_dict):
+         """Transform the openstack format (Flow Classifier) to mano format
+         (Classification) class_list_dict can be a list of dict or a single dict
+         """
+         if isinstance(class_list_dict, dict):
+             class_list_ = [class_list_dict]
+         elif isinstance(class_list_dict, list):
+             class_list_ = class_list_dict
+         else:
+             raise TypeError(
+                 "param class_list_dict must be a list or a dictionary")
+         for classification in class_list_:
+             id = classification.pop('id')
+             name = classification.pop('name')
+             description = classification.pop('description')
+             project_id = classification.pop('project_id')
+             tenant_id = classification.pop('tenant_id')
+             original_classification = copy.deepcopy(classification)
+             classification.clear()
+             classification['ctype'] = 'legacy_flow_classifier'
+             classification['definition'] = original_classification
+             classification['id'] = id
+             classification['name'] = name
+             classification['description'] = description
+             classification['project_id'] = project_id
+             classification['tenant_id'] = tenant_id
+     def __sfi_os2mano(self, sfi_list_dict):
+         """Transform the openstack format (Port Pair) to mano format (SFI)
+         sfi_list_dict can be a list of dict or a single dict
+         """
+         if isinstance(sfi_list_dict, dict):
+             sfi_list_ = [sfi_list_dict]
+         elif isinstance(sfi_list_dict, list):
+             sfi_list_ = sfi_list_dict
+         else:
+             raise TypeError(
+                 "param sfi_list_dict must be a list or a dictionary")
+         for sfi in sfi_list_:
+             sfi['ingress_ports'] = []
+             sfi['egress_ports'] = []
+             if sfi.get('ingress'):
+                 sfi['ingress_ports'].append(sfi['ingress'])
+             if sfi.get('egress'):
+                 sfi['egress_ports'].append(sfi['egress'])
+             del sfi['ingress']
+             del sfi['egress']
+             params = sfi.get('service_function_parameters')
+             sfc_encap = False
+             if params:
+                 correlation = params.get('correlation')
+                 if correlation:
+                     sfc_encap = True
+             sfi['sfc_encap'] = sfc_encap
+             del sfi['service_function_parameters']
+     def __sf_os2mano(self, sf_list_dict):
+         """Transform the openstack format (Port Pair Group) to mano format (SF)
+         sf_list_dict can be a list of dict or a single dict
+         """
+         if isinstance(sf_list_dict, dict):
+             sf_list_ = [sf_list_dict]
+         elif isinstance(sf_list_dict, list):
+             sf_list_ = sf_list_dict
+         else:
+             raise TypeError(
+                 "param sf_list_dict must be a list or a dictionary")
+         for sf in sf_list_:
+             del sf['port_pair_group_parameters']
+             sf['sfis'] = sf['port_pairs']
+             del sf['port_pairs']
+     def __sfp_os2mano(self, sfp_list_dict):
+         """Transform the openstack format (Port Chain) to mano format (SFP)
+         sfp_list_dict can be a list of dict or a single dict
+         """
+         if isinstance(sfp_list_dict, dict):
+             sfp_list_ = [sfp_list_dict]
+         elif isinstance(sfp_list_dict, list):
+             sfp_list_ = sfp_list_dict
+         else:
+             raise TypeError(
+                 "param sfp_list_dict must be a list or a dictionary")
+         for sfp in sfp_list_:
+             params = sfp.pop('chain_parameters')
+             sfc_encap = False
+             if params:
+                 correlation = params.get('correlation')
+                 if correlation:
+                     sfc_encap = True
+             sfp['sfc_encap'] = sfc_encap
+             sfp['spi'] = sfp.pop('chain_id')
+             sfp['classifications'] = sfp.pop('flow_classifiers')
+             sfp['service_functions'] = sfp.pop('port_pair_groups')
+     # placeholder for now; read TODO note below
+     def _validate_classification(self, type, definition):
+         # only legacy_flow_classifier Type is supported at this point
+         return True
+         # TODO(igordcard): this method should be an abstract method of an
+         # abstract Classification class to be implemented by the specific
+         # Types. Also, abstract vimconnector should call the validation
+         # method before the implemented VIM connectors are called.
+     def _format_exception(self, exception):
+         '''Transform a keystone, nova, neutron  exception into a vimconn exception'''
+         message_error = exception.message
+         if isinstance(exception, (neExceptions.NetworkNotFoundClient, nvExceptions.NotFound, ksExceptions.NotFound,
+                                   gl1Exceptions.HTTPNotFound)):
+             raise vimconn.vimconnNotFoundException(type(exception).__name__ + ": " + message_error)
+         elif isinstance(exception, (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError,
+                                ConnectionError, ksExceptions.ConnectionError, neExceptions.ConnectionFailed)):
+             raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error)
+         elif isinstance(exception,  (KeyError, nvExceptions.BadRequest, ksExceptions.BadRequest)):
+             raise vimconn.vimconnException(type(exception).__name__ + ": " + message_error)
+         elif isinstance(exception, (nvExceptions.ClientException, ksExceptions.ClientException,
+                                     neExceptions.NeutronException)):
+             raise vimconn.vimconnUnexpectedResponse(type(exception).__name__ + ": " + message_error)
+         elif isinstance(exception, nvExceptions.Conflict):
+             raise vimconn.vimconnConflictException(type(exception).__name__ + ": " + message_error)
+         elif isinstance(exception, vimconn.vimconnException):
+             raise exception
+         else:  # ()
+             self.logger.error("General Exception " + message_error, exc_info=True)
+             raise vimconn.vimconnConnectionException(type(exception).__name__ + ": " + message_error)
+     def _get_ids_from_name(self):
+         """
+          Obtain ids from name of tenant and security_groups. Store at self .security_groups_id"
+         :return: None
+         """
+         # get tenant_id if only tenant_name is supplied
+         self._reload_connection()
+         if not self.my_tenant_id:
+             raise vimconn.vimconnConnectionException("Error getting tenant information from name={} id={}".
+                                                      format(self.tenant_name, self.tenant_id))
+         if self.config.get('security_groups') and not self.security_groups_id:
+             # convert from name to id
+             neutron_sg_list = self.neutron.list_security_groups(tenant_id=self.my_tenant_id)["security_groups"]
+             self.security_groups_id = []
+             for sg in self.config.get('security_groups'):
+                 for neutron_sg in neutron_sg_list:
+                     if sg in (neutron_sg["id"], neutron_sg["name"]):
+                         self.security_groups_id.append(neutron_sg["id"])
+                         break
+                 else:
+                     self.security_groups_id = None
+                     raise vimconn.vimconnConnectionException("Not found security group {} for this tenant".format(sg))
+     def check_vim_connectivity(self):
+         # just get network list to check connectivity and credentials
+         self.get_network_list(filter_dict={})
+     def get_tenant_list(self, filter_dict={}):
+         '''Obtain tenants of VIM
+         filter_dict can contain the following keys:
+             name: filter by tenant name
+             id: filter by tenant uuid/id
+             <other VIM specific>
+         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
+         '''
+         self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
+         try:
+             self._reload_connection()
+             if self.api_version3:
+                 project_class_list = self.keystone.projects.list(name=filter_dict.get("name"))
+             else:
+                 project_class_list = self.keystone.tenants.findall(**filter_dict)
+             project_list=[]
+             for project in project_class_list:
+                 if filter_dict.get('id') and filter_dict["id"] != project.id:
+                     continue
+                 project_list.append(project.to_dict())
+             return project_list
+         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError) as e:
+             self._format_exception(e)
+     def new_tenant(self, tenant_name, tenant_description):
+         '''Adds a new tenant to openstack VIM. Returns the tenant identifier'''
+         self.logger.debug("Adding a new tenant name: %s", tenant_name)
+         try:
+             self._reload_connection()
+             if self.api_version3:
+                 project = self.keystone.projects.create(tenant_name, self.config.get("project_domain_id", "default"),
+                                                         description=tenant_description, is_domain=False)
+             else:
+                 project = self.keystone.tenants.create(tenant_name, tenant_description)
+             return project.id
+         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.BadRequest, ConnectionError)  as e:
+             self._format_exception(e)
+     def delete_tenant(self, tenant_id):
+         '''Delete a tenant from openstack VIM. Returns the old tenant identifier'''
+         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
+         try:
+             self._reload_connection()
+             if self.api_version3:
+                 self.keystone.projects.delete(tenant_id)
+             else:
+                 self.keystone.tenants.delete(tenant_id)
+             return tenant_id
+         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ksExceptions.NotFound, ConnectionError)  as e:
+             self._format_exception(e)
 -            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
++    def new_network(self,net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+         """Adds a tenant network to VIM
+         Params:
+             'net_name': name of the network
+             'net_type': one of:
+                 'bridge': overlay isolated network
+                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+             'ip_profile': is a dict containing the IP parameters of the network
+                 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                 'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                 'dhcp_enabled': True or False
+                 'dhcp_start_address': ip_schema, first IP to grant
+                 'dhcp_count': number of IPs to grant.
+             'shared': if this network can be seen/use by other tenants/organization
++            'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
+         Returns a tuple with the network identifier and created_items, or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
+         self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type)
+         # self.logger.debug(">>>>>>>>>>>>>>>>>> IP profile %s", str(ip_profile))
++
+         try:
++            vlan = None
++            if provider_network_profile:
++                vlan = provider_network_profile.get("segmentation-id")
+             new_net = None
+             created_items = {}
+             self._reload_connection()
+             network_dict = {'name': net_name, 'admin_state_up': True}
+             if net_type=="data" or net_type=="ptp":
+                 if self.config.get('dataplane_physical_net') == None:
+                     raise vimconn.vimconnConflictException("You must provide a 'dataplane_physical_net' at config value before creating sriov network")
+                 if not self.config.get('multisegment_support'):
+                     network_dict["provider:physical_network"] = self.config[
+                         'dataplane_physical_net']  # "physnet_sriov" #TODO physical
+                     network_dict["provider:network_type"] = "vlan"
+                     if vlan!=None:
+                         network_dict["provider:network_type"] = vlan
+                 else:
+                     ###### Multi-segment case ######
+                     segment_list = []
+                     segment1_dict = {}
+                     segment1_dict["provider:physical_network"] = ''
+                     segment1_dict["provider:network_type"]     = 'vxlan'
+                     segment_list.append(segment1_dict)
+                     segment2_dict = {}
+                     segment2_dict["provider:physical_network"] = self.config['dataplane_physical_net']
+                     segment2_dict["provider:network_type"]     = "vlan"
+                     if self.config.get('multisegment_vlan_range'):
+                         vlanID = self._generate_multisegment_vlanID()
+                         segment2_dict["provider:segmentation_id"] = vlanID
+                     # else
+                     #     raise vimconn.vimconnConflictException(
+                     #         "You must provide 'multisegment_vlan_range' at config dict before creating a multisegment network")
+                     segment_list.append(segment2_dict)
+                     network_dict["segments"] = segment_list
+                 ####### VIO Specific Changes #########
+                 if self.vim_type == "VIO":
+                     if vlan is not None:
+                         network_dict["provider:segmentation_id"] = vlan
+                     else:
+                         if self.config.get('dataplane_net_vlan_range') is None:
+                             raise vimconn.vimconnConflictException("You must provide "\
+                                 "'dataplane_net_vlan_range' in format [start_ID - end_ID]"\
+                                 "at config value before creating sriov network with vlan tag")
+                         network_dict["provider:segmentation_id"] = self._generate_vlanID()
+             network_dict["shared"] = shared
+             if self.config.get("disable_network_port_security"):
+                 network_dict["port_security_enabled"] = False
+             new_net = self.neutron.create_network({'network':network_dict})
+             # print new_net
+             # create subnetwork, even if there is no profile
+             if not ip_profile:
+                 ip_profile = {}
+             if not ip_profile.get('subnet_address'):
+                 #Fake subnet is required
+                 subnet_rand = random.randint(0, 255)
+                 ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
+             if 'ip_version' not in ip_profile:
+                 ip_profile['ip_version'] = "IPv4"
+             subnet = {"name": net_name+"-subnet",
+                     "network_id": new_net["network"]["id"],
+                     "ip_version": 4 if ip_profile['ip_version']=="IPv4" else 6,
+                     "cidr": ip_profile['subnet_address']
+                     }
+             # Gateway should be set to None if not needed. Otherwise openstack assigns one by default
+             if ip_profile.get('gateway_address'):
+                 subnet['gateway_ip'] = ip_profile['gateway_address']
+             else:
+                 subnet['gateway_ip'] = None
+             if ip_profile.get('dns_address'):
+                 subnet['dns_nameservers'] = ip_profile['dns_address'].split(";")
+             if 'dhcp_enabled' in ip_profile:
+                 subnet['enable_dhcp'] = False if \
+                     ip_profile['dhcp_enabled']=="false" or ip_profile['dhcp_enabled']==False else True
+             if ip_profile.get('dhcp_start_address'):
+                 subnet['allocation_pools'] = []
+                 subnet['allocation_pools'].append(dict())
+                 subnet['allocation_pools'][0]['start'] = ip_profile['dhcp_start_address']
+             if ip_profile.get('dhcp_count'):
+                 #parts = ip_profile['dhcp_start_address'].split('.')
+                 #ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
+                 ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address']))
+                 ip_int += ip_profile['dhcp_count'] - 1
+                 ip_str = str(netaddr.IPAddress(ip_int))
+                 subnet['allocation_pools'][0]['end'] = ip_str
+             #self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
+             self.neutron.create_subnet({"subnet": subnet} )
+             if net_type == "data" and self.config.get('multisegment_support'):
+                 if self.config.get('l2gw_support'):
+                     l2gw_list = self.neutron.list_l2_gateways().get("l2_gateways", ())
+                     for l2gw in l2gw_list:
+                         l2gw_conn = {}
+                         l2gw_conn["l2_gateway_id"] = l2gw["id"]
+                         l2gw_conn["network_id"] = new_net["network"]["id"]
+                         l2gw_conn["segmentation_id"] = str(vlanID)
+                         new_l2gw_conn = self.neutron.create_l2_gateway_connection({"l2_gateway_connection": l2gw_conn})
+                         created_items["l2gwconn:" + str(new_l2gw_conn["l2_gateway_connection"]["id"])] = True
+             return new_net["network"]["id"], created_items
+         except Exception as e:
+             #delete l2gw connections (if any) before deleting the network
+             for k, v in created_items.items():
+                 if not v:  # skip already deleted
+                     continue
+                 try:
+                     k_item, _, k_id = k.partition(":")
+                     if k_item == "l2gwconn":
+                         self.neutron.delete_l2_gateway_connection(k_id)
+                 except Exception as e2:
+                     self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e2).__name__, e2))
+             if new_net:
+                 self.neutron.delete_network(new_net['network']['id'])
+             self._format_exception(e)
+     def get_network_list(self, filter_dict={}):
+         '''Obtain tenant networks of VIM
+         Filter_dict can be:
+             name: network name
+             id: network uuid
+             shared: boolean
+             tenant_id: tenant
+             admin_state_up: boolean
+             status: 'ACTIVE'
+         Returns the network list of dictionaries
+         '''
+         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
+         try:
+             self._reload_connection()
+             filter_dict_os = filter_dict.copy()
+             if self.api_version3 and "tenant_id" in filter_dict_os:
+                 filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')  #T ODO check
+             net_dict = self.neutron.list_networks(**filter_dict_os)
+             net_list = net_dict["networks"]
+             self.__net_os2mano(net_list)
+             return net_list
+         except (neExceptions.ConnectionFailed, ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
+             self._format_exception(e)
+     def get_network(self, net_id):
+         '''Obtain details of network from VIM
+         Returns the network information from a network id'''
+         self.logger.debug(" Getting tenant network %s from VIM", net_id)
+         filter_dict={"id": net_id}
+         net_list = self.get_network_list(filter_dict)
+         if len(net_list)==0:
+             raise vimconn.vimconnNotFoundException("Network '{}' not found".format(net_id))
+         elif len(net_list)>1:
+             raise vimconn.vimconnConflictException("Found more than one network with this criteria")
+         net = net_list[0]
+         subnets=[]
+         for subnet_id in net.get("subnets", () ):
+             try:
+                 subnet = self.neutron.show_subnet(subnet_id)
+             except Exception as e:
+                 self.logger.error("osconnector.get_network(): Error getting subnet %s %s" % (net_id, str(e)))
+                 subnet = {"id": subnet_id, "fault": str(e)}
+             subnets.append(subnet)
+         net["subnets"] = subnets
+         net["encapsulation"] = net.get('provider:network_type')
+         net["encapsulation_type"] = net.get('provider:network_type')
+         net["segmentation_id"] = net.get('provider:segmentation_id')
+         net["encapsulation_id"] = net.get('provider:segmentation_id')
+         return net
+     def delete_network(self, net_id, created_items=None):
+         """
+         Removes a tenant network from VIM and its associated elements
+         :param net_id: VIM identifier of the network, provided by method new_network
+         :param created_items: dictionary with extra items to be deleted. provided by method new_network
+         Returns the network identifier or raises an exception upon error or when network is not found
+         """
+         self.logger.debug("Deleting network '%s' from VIM", net_id)
+         if created_items == None:
+             created_items = {}
+         try:
+             self._reload_connection()
+             #delete l2gw connections (if any) before deleting the network
+             for k, v in created_items.items():
+                 if not v:  # skip already deleted
+                     continue
+                 try:
+                     k_item, _, k_id = k.partition(":")
+                     if k_item == "l2gwconn":
+                         self.neutron.delete_l2_gateway_connection(k_id)
+                 except Exception as e:
+                     self.logger.error("Error deleting l2 gateway connection: {}: {}".format(type(e).__name__, e))
+             #delete VM ports attached to this networks before the network
+             ports = self.neutron.list_ports(network_id=net_id)
+             for p in ports['ports']:
+                 try:
+                     self.neutron.delete_port(p["id"])
+                 except Exception as e:
+                     self.logger.error("Error deleting port %s: %s", p["id"], str(e))
+             self.neutron.delete_network(net_id)
+             return net_id
+         except (neExceptions.ConnectionFailed, neExceptions.NetworkNotFoundClient, neExceptions.NeutronException,
+                 ksExceptions.ClientException, neExceptions.NeutronException, ConnectionError) as e:
+             self._format_exception(e)
+     def refresh_nets_status(self, net_list):
+         '''Get the status of the networks
+            Params: the list of network identifiers
+            Returns a dictionary with:
+                 net_id:         #VIM id of this network
+                     status:     #Mandatory. Text with one of:
+                                 #  DELETED (not found at vim)
+                                 #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                 #  OTHER (Vim reported other status not understood)
+                                 #  ERROR (VIM indicates an ERROR status)
+                                 #  ACTIVE, INACTIVE, DOWN (admin down),
+                                 #  BUILD (on building process)
+                                 #
+                     error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+         '''
+         net_dict={}
+         for net_id in net_list:
+             net = {}
+             try:
+                 net_vim = self.get_network(net_id)
+                 if net_vim['status'] in netStatus2manoFormat:
+                     net["status"] = netStatus2manoFormat[ net_vim['status'] ]
+                 else:
+                     net["status"] = "OTHER"
+                     net["error_msg"] = "VIM status reported " + net_vim['status']
+                 if net['status'] == "ACTIVE" and not net_vim['admin_state_up']:
+                     net['status'] = 'DOWN'
+                 net['vim_info'] = self.serialize(net_vim)
+                 if net_vim.get('fault'):  #TODO
+                     net['error_msg'] = str(net_vim['fault'])
+             except vimconn.vimconnNotFoundException as e:
+                 self.logger.error("Exception getting net status: %s", str(e))
+                 net['status'] = "DELETED"
+                 net['error_msg'] = str(e)
+             except vimconn.vimconnException as e:
+                 self.logger.error("Exception getting net status: %s", str(e))
+                 net['status'] = "VIM_ERROR"
+                 net['error_msg'] = str(e)
+             net_dict[net_id] = net
+         return net_dict
+     def get_flavor(self, flavor_id):
+         '''Obtain flavor details from the  VIM. Returns the flavor dict details'''
+         self.logger.debug("Getting flavor '%s'", flavor_id)
+         try:
+             self._reload_connection()
+             flavor = self.nova.flavors.find(id=flavor_id)
+             #TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+             return flavor.to_dict()
+         except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
+             self._format_exception(e)
+     def get_flavor_id_from_data(self, flavor_dict):
+         """Obtain flavor id that match the flavor description
+            Returns the flavor_id or raises a vimconnNotFoundException
+            flavor_dict: contains the required ram, vcpus, disk
+            If 'use_existing_flavors' is set to True at config, the closer flavor that provides same or more ram, vcpus
+                 and disk is returned. Otherwise a flavor with exactly same ram, vcpus and disk is returned or a
+                 vimconnNotFoundException is raised
+         """
+         exact_match = False if self.config.get('use_existing_flavors') else True
+         try:
+             self._reload_connection()
+             flavor_candidate_id = None
+             flavor_candidate_data = (10000, 10000, 10000)
+             flavor_target = (flavor_dict["ram"], flavor_dict["vcpus"], flavor_dict["disk"])
+             # numa=None
+             extended = flavor_dict.get("extended", {})
+             if extended:
+                 #TODO
+                 raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemented")
+                 # if len(numas) > 1:
+                 #     raise vimconn.vimconnNotFoundException("Cannot find any flavor with more than one numa")
+                 # numa=numas[0]
+                 # numas = extended.get("numas")
+             for flavor in self.nova.flavors.list():
+                 epa = flavor.get_keys()
+                 if epa:
+                     continue
+                     # TODO
+                 flavor_data = (flavor.ram, flavor.vcpus, flavor.disk)
+                 if flavor_data == flavor_target:
+                     return flavor.id
+                 elif not exact_match and flavor_target < flavor_data < flavor_candidate_data:
+                     flavor_candidate_id = flavor.id
+                     flavor_candidate_data = flavor_data
+             if not exact_match and flavor_candidate_id:
+                 return flavor_candidate_id
+             raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict)))
+         except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
+             self._format_exception(e)
+     def process_resource_quota(self, quota, prefix, extra_specs):
+         """
+         :param prefix:
+         :param extra_specs: 
+         :return:
+         """
+         if 'limit' in quota:
+             extra_specs["quota:" + prefix + "_limit"] = quota['limit']
+         if 'reserve' in quota:
+             extra_specs["quota:" + prefix + "_reservation"] = quota['reserve']
+         if 'shares' in quota:
+             extra_specs["quota:" + prefix + "_shares_level"] = "custom"
+             extra_specs["quota:" + prefix + "_shares_share"] = quota['shares']
+     def new_flavor(self, flavor_data, change_name_if_used=True):
+         '''Adds a tenant flavor to openstack VIM
+         if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition
+         Returns the flavor identifier
+         '''
+         self.logger.debug("Adding flavor '%s'", str(flavor_data))
+         retry=0
+         max_retries=3
+         name_suffix = 0
+         try:
+             name=flavor_data['name']
+             while retry<max_retries:
+                 retry+=1
+                 try:
+                     self._reload_connection()
+                     if change_name_if_used:
+                         #get used names
+                         fl_names=[]
+                         fl=self.nova.flavors.list()
+                         for f in fl:
+                             fl_names.append(f.name)
+                         while name in fl_names:
+                             name_suffix += 1
+                             name = flavor_data['name']+"-" + str(name_suffix)
+                     ram = flavor_data.get('ram',64)
+                     vcpus = flavor_data.get('vcpus',1)
+                     extra_specs={}
+                     extended = flavor_data.get("extended")
+                     if extended:
+                         numas=extended.get("numas")
+                         if numas:
+                             numa_nodes = len(numas)
+                             if numa_nodes > 1:
+                                 return -1, "Can not add flavor with more than one numa"
+                             extra_specs["hw:numa_nodes"] = str(numa_nodes)
+                             extra_specs["hw:mem_page_size"] = "large"
+                             extra_specs["hw:cpu_policy"] = "dedicated"
+                             extra_specs["hw:numa_mempolicy"] = "strict"
+                             if self.vim_type == "VIO":
+                                 extra_specs["vmware:extra_config"] = '{"numa.nodeAffinity":"0"}'
+                                 extra_specs["vmware:latency_sensitivity_level"] = "high"
+                             for numa in numas:
+                                 #overwrite ram and vcpus
+                                 #check if key 'memory' is present in numa else use ram value at flavor
+                                 if 'memory' in numa:
+                                     ram = numa['memory']*1024
+                                 #See for reference: https://specs.openstack.org/openstack/nova-specs/specs/mitaka/implemented/virt-driver-cpu-thread-pinning.html
+                                 extra_specs["hw:cpu_sockets"] = 1
+                                 if 'paired-threads' in numa:
+                                     vcpus = numa['paired-threads']*2
+                                     #cpu_thread_policy "require" implies that the compute node must have an STM architecture
+                                     extra_specs["hw:cpu_thread_policy"] = "require"
+                                     extra_specs["hw:cpu_policy"] = "dedicated"
+                                 elif 'cores' in numa:
+                                     vcpus = numa['cores']
+                                     # cpu_thread_policy "prefer" implies that the host must not have an SMT architecture, or a non-SMT architecture will be emulated
+                                     extra_specs["hw:cpu_thread_policy"] = "isolate"
+                                     extra_specs["hw:cpu_policy"] = "dedicated"
+                                 elif 'threads' in numa:
+                                     vcpus = numa['threads']
+                                     # cpu_thread_policy "prefer" implies that the host may or may not have an SMT architecture
+                                     extra_specs["hw:cpu_thread_policy"] = "prefer"
+                                     extra_specs["hw:cpu_policy"] = "dedicated"
+                                 # for interface in numa.get("interfaces",() ):
+                                 #     if interface["dedicated"]=="yes":
+                                 #         raise vimconn.vimconnException("Passthrough interfaces are not supported for the openstack connector", http_code=vimconn.HTTP_Service_Unavailable)
+                                 #     #TODO, add the key 'pci_passthrough:alias"="<label at config>:<number ifaces>"' when a way to connect it is available
+                         elif extended.get("cpu-quota"):
+                             self.process_resource_quota(extended.get("cpu-quota"), "cpu", extra_specs)
+                         if extended.get("mem-quota"):
+                             self.process_resource_quota(extended.get("mem-quota"), "memory", extra_specs)
+                         if extended.get("vif-quota"):
+                             self.process_resource_quota(extended.get("vif-quota"), "vif", extra_specs)
+                         if extended.get("disk-io-quota"):
+                             self.process_resource_quota(extended.get("disk-io-quota"), "disk_io", extra_specs)
+                     #create flavor
+                     new_flavor=self.nova.flavors.create(name,
+                                     ram,
+                                     vcpus,
+                                     flavor_data.get('disk',0),
+                                     is_public=flavor_data.get('is_public', True)
+                                 )
+                     #add metadata
+                     if extra_specs:
+                         new_flavor.set_keys(extra_specs)
+                     return new_flavor.id
+                 except nvExceptions.Conflict as e:
+                     if change_name_if_used and retry < max_retries:
+                         continue
+                     self._format_exception(e)
+         #except nvExceptions.BadRequest as e:
+         except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError, KeyError) as e:
+             self._format_exception(e)
+     def delete_flavor(self,flavor_id):
+         '''Deletes a tenant flavor from openstack VIM. Returns the old flavor_id
+         '''
+         try:
+             self._reload_connection()
+             self.nova.flavors.delete(flavor_id)
+             return flavor_id
+         #except nvExceptions.BadRequest as e:
+         except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+             self._format_exception(e)
+     def new_image(self,image_dict):
+         '''
+         Adds a tenant image to VIM. imge_dict is a dictionary with:
+             name: name
+             disk_format: qcow2, vhd, vmdk, raw (by default), ...
+             location: path or URI
+             public: "yes" or "no"
+             metadata: metadata of the image
+         Returns the image_id
+         '''
+         retry=0
+         max_retries=3
+         while retry<max_retries:
+             retry+=1
+             try:
+                 self._reload_connection()
+                 #determine format  http://docs.openstack.org/developer/glance/formats.html
+                 if "disk_format" in image_dict:
+                     disk_format=image_dict["disk_format"]
+                 else: #autodiscover based on extension
+                     if image_dict['location'].endswith(".qcow2"):
+                         disk_format="qcow2"
+                     elif image_dict['location'].endswith(".vhd"):
+                         disk_format="vhd"
+                     elif image_dict['location'].endswith(".vmdk"):
+                         disk_format="vmdk"
+                     elif image_dict['location'].endswith(".vdi"):
+                         disk_format="vdi"
+                     elif image_dict['location'].endswith(".iso"):
+                         disk_format="iso"
+                     elif image_dict['location'].endswith(".aki"):
+                         disk_format="aki"
+                     elif image_dict['location'].endswith(".ari"):
+                         disk_format="ari"
+                     elif image_dict['location'].endswith(".ami"):
+                         disk_format="ami"
+                     else:
+                         disk_format="raw"
+                 self.logger.debug("new_image: '%s' loading from '%s'", image_dict['name'], image_dict['location'])
+                 if self.vim_type == "VIO":
+                     container_format = "bare"
+                     if 'container_format' in image_dict:
+                         container_format = image_dict['container_format']
+                     new_image = self.glance.images.create(name=image_dict['name'], container_format=container_format,
+                                                           disk_format=disk_format)
+                 else:
+                     new_image = self.glance.images.create(name=image_dict['name'])
+                 if image_dict['location'].startswith("http"):
+                     # TODO there is not a method to direct download. It must be downloaded locally with requests
+                     raise vimconn.vimconnNotImplemented("Cannot create image from URL")
+                 else: #local path
+                     with open(image_dict['location']) as fimage:
+                         self.glance.images.upload(new_image.id, fimage)
+                         #new_image = self.glancev1.images.create(name=image_dict['name'], is_public=image_dict.get('public',"yes")=="yes",
+                         #    container_format="bare", data=fimage, disk_format=disk_format)
+                 metadata_to_load = image_dict.get('metadata')
+                 # TODO location is a reserved word for current openstack versions. fixed for VIO please check for openstack
+                 if self.vim_type == "VIO":
+                     metadata_to_load['upload_location'] = image_dict['location']
+                 else:
+                     metadata_to_load['location'] = image_dict['location']
+                 self.glance.images.update(new_image.id, **metadata_to_load)
+                 return new_image.id
+             except (nvExceptions.Conflict, ksExceptions.ClientException, nvExceptions.ClientException) as e:
+                 self._format_exception(e)
+             except (HTTPException, gl1Exceptions.HTTPException, gl1Exceptions.CommunicationError, ConnectionError) as e:
+                 if retry==max_retries:
+                     continue
+                 self._format_exception(e)
+             except IOError as e:  #can not open the file
+                 raise vimconn.vimconnConnectionException(type(e).__name__ + ": " + str(e)+ " for " + image_dict['location'],
+                                                          http_code=vimconn.HTTP_Bad_Request)
+     def delete_image(self, image_id):
+         '''Deletes a tenant image from openstack VIM. Returns the old id
+         '''
+         try:
+             self._reload_connection()
+             self.glance.images.delete(image_id)
+             return image_id
+         except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, gl1Exceptions.HTTPNotFound, ConnectionError) as e: #TODO remove
+             self._format_exception(e)
+     def get_image_id_from_path(self, path):
+         '''Get the image id from image path in the VIM database. Returns the image_id'''
+         try:
+             self._reload_connection()
+             images = self.glance.images.list()
+             for image in images:
+                 if image.metadata.get("location")==path:
+                     return image.id
+             raise vimconn.vimconnNotFoundException("image with location '{}' not found".format( path))
+         except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
+             self._format_exception(e)
+     def get_image_list(self, filter_dict={}):
+         '''Obtain tenant images from VIM
+         Filter_dict can be:
+             id: image id
+             name: image name
+             checksum: image checksum
+         Returns the image list of dictionaries:
+             [{<the fields at Filter_dict plus some VIM specific>}, ...]
+             List can be empty
+         '''
+         self.logger.debug("Getting image list from VIM filter: '%s'", str(filter_dict))
+         try:
+             self._reload_connection()
+             filter_dict_os = filter_dict.copy()
+             #First we filter by the available filter fields: name, id. The others are removed.
+             image_list = self.glance.images.list()
+             filtered_list = []
+             for image in image_list:
+                 try:
+                     if filter_dict.get("name") and image["name"] != filter_dict["name"]:
+                         continue
+                     if filter_dict.get("id") and image["id"] != filter_dict["id"]:
+                         continue
+                     if filter_dict.get("checksum") and image["checksum"] != filter_dict["checksum"]:
+                         continue
+                     filtered_list.append(image.copy())
+                 except gl1Exceptions.HTTPNotFound:
+                     pass
+             return filtered_list
+         except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
+             self._format_exception(e)
+     def __wait_for_vm(self, vm_id, status):
+         """wait until vm is in the desired status and return True.
+         If the VM gets in ERROR status, return false.
+         If the timeout is reached generate an exception"""
+         elapsed_time = 0
+         while elapsed_time < server_timeout:
+             vm_status = self.nova.servers.get(vm_id).status
+             if vm_status == status:
+                 return True
+             if vm_status == 'ERROR':
+                 return False
+             time.sleep(5)
+             elapsed_time += 5
+         # if we exceeded the timeout rollback
+         if elapsed_time >= server_timeout:
+             raise vimconn.vimconnException('Timeout waiting for instance ' + vm_id + ' to get ' + status,
+                                            http_code=vimconn.HTTP_Request_Timeout)
+     def _get_openstack_availablity_zones(self):
+         """
+         Get from openstack availability zones available
+         :return:
+         """
+         try:
+             openstack_availability_zone = self.nova.availability_zones.list()
+             openstack_availability_zone = [str(zone.zoneName) for zone in openstack_availability_zone
+                                            if zone.zoneName != 'internal']
+             return openstack_availability_zone
+         except Exception as e:
+             return None
+     def _set_availablity_zones(self):
+         """
+         Set vim availablity zone
+         :return:
+         """
+         if 'availability_zone' in self.config:
+             vim_availability_zones = self.config.get('availability_zone')
+             if isinstance(vim_availability_zones, str):
+                 self.availability_zone = [vim_availability_zones]
+             elif isinstance(vim_availability_zones, list):
+                 self.availability_zone = vim_availability_zones
+         else:
+             self.availability_zone = self._get_openstack_availablity_zones()
+     def _get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
+         """
+         Return thge availability zone to be used by the created VM.
+         :return: The VIM availability zone to be used or None
+         """
+         if availability_zone_index is None:
+             if not self.config.get('availability_zone'):
+                 return None
+             elif isinstance(self.config.get('availability_zone'), str):
+                 return self.config['availability_zone']
+             else:
+                 # TODO consider using a different parameter at config for default AV and AV list match
+                 return self.config['availability_zone'][0]
+         vim_availability_zones = self.availability_zone
+         # check if VIM offer enough availability zones describe in the VNFD
+         if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+             # check if all the names of NFV AV match VIM AV names
+             match_by_index = False
+             for av in availability_zone_list:
+                 if av not in vim_availability_zones:
+                     match_by_index = True
+                     break
+             if match_by_index:
+                 return vim_availability_zones[availability_zone_index]
+             else:
+                 return availability_zone_list[availability_zone_index]
+         else:
+             raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
+     def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+                        availability_zone_index=None, availability_zone_list=None):
+         """Adds a VM instance to VIM
+         Params:
+             start: indicates if VM must start or boot in pause mode. Ignored
+             image_id,flavor_id: iamge and flavor uuid
+             net_list: list of interfaces, each one is a dictionary with:
+                 name:
+                 net_id: network uuid to connect
+                 vpci: virtual vcpi to assign, ignored because openstack lack #TODO
+                 model: interface model, ignored #TODO
+                 mac_address: used for  SR-IOV ifaces #TODO for other types
+                 use: 'data', 'bridge',  'mgmt'
+                 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
+                 vim_id: filled/added by this function
+                 floating_ip: True/False (or it can be None)
+             'cloud_config': (optional) dictionary with:
+             'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+             'users': (optional) list of users to be inserted, each item is a dict with:
+                 'name': (mandatory) user name,
+                 'key-pairs': (optional) list of strings with the public key to be inserted to the user
+             'user-data': (optional) string is a text script to be passed directly to cloud-init
+             'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                 'dest': (mandatory) string with the destination absolute path
+                 'encoding': (optional, by default text). Can be one of:
+                     'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                 'content' (mandatory): string with the content of the file
+                 'permissions': (optional) string with file permissions, typically octal notation '0644'
+                 'owner': (optional) file owner, string with the format 'owner:group'
+             'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+             'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                 'size': (mandatory) string with the size of the disk in GB
+                 'vim_id' (optional) should use this existing volume id
+             availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+             availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                 availability_zone_index is None
+                 #TODO ip, security groups
+         Returns a tuple with the instance identifier and created_items or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
+         self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list))
+         try:
+             server = None
+             created_items = {}
+             # metadata = {}
+             net_list_vim = []
+             external_network = []   # list of external networks to be connected to instance, later on used to create floating_ip
+             no_secured_ports = []   # List of port-is with port-security disabled
+             self._reload_connection()
+             # metadata_vpci = {}   # For a specific neutron plugin
+             block_device_mapping = None
+             for net in net_list:
+                 if not net.get("net_id"):   # skip non connected iface
+                     continue
+                 port_dict = {
+                     "network_id": net["net_id"],
+                     "name": net.get("name"),
+                     "admin_state_up": True
+                 }
+                 if self.config.get("security_groups") and net.get("port_security") is not False and \
+                         not self.config.get("no_port_security_extension"):
+                     if not self.security_groups_id:
+                         self._get_ids_from_name()
+                     port_dict["security_groups"] = self.security_groups_id
+                 if net["type"]=="virtual":
+                     pass
+                     # if "vpci" in net:
+                     #     metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
+                 elif net["type"] == "VF" or net["type"] == "SR-IOV":  # for VF
+                     # if "vpci" in net:
+                     #     if "VF" not in metadata_vpci:
+                     #         metadata_vpci["VF"]=[]
+                     #     metadata_vpci["VF"].append([ net["vpci"], "" ])
+                     port_dict["binding:vnic_type"]="direct"
+                     # VIO specific Changes
+                     if self.vim_type == "VIO":
+                         # Need to create port with port_security_enabled = False and no-security-groups
+                         port_dict["port_security_enabled"]=False
+                         port_dict["provider_security_groups"]=[]
+                         port_dict["security_groups"]=[]
+                 else:   # For PT PCI-PASSTHROUGH
+                     # VIO specific Changes
+                     # Current VIO release does not support port with type 'direct-physical'
+                     # So no need to create virtual port in case of PCI-device.
+                     # Will update port_dict code when support gets added in next VIO release
+                     if self.vim_type == "VIO":
+                         raise vimconn.vimconnNotSupportedException(
+                             "Current VIO release does not support full passthrough (PT)")
+                     # if "vpci" in net:
+                     #     if "PF" not in metadata_vpci:
+                     #         metadata_vpci["PF"]=[]
+                     #     metadata_vpci["PF"].append([ net["vpci"], "" ])
+                     port_dict["binding:vnic_type"]="direct-physical"
+                 if not port_dict["name"]:
+                     port_dict["name"]=name
+                 if net.get("mac_address"):
+                     port_dict["mac_address"]=net["mac_address"]
+                 if net.get("ip_address"):
+                     port_dict["fixed_ips"] = [{'ip_address': net["ip_address"]}]
+                     # TODO add 'subnet_id': <subnet_id>
+                 new_port = self.neutron.create_port({"port": port_dict })
+                 created_items["port:" + str(new_port["port"]["id"])] = True
+                 net["mac_adress"] = new_port["port"]["mac_address"]
+                 net["vim_id"] = new_port["port"]["id"]
+                 # if try to use a network without subnetwork, it will return a emtpy list
+                 fixed_ips = new_port["port"].get("fixed_ips")
+                 if fixed_ips:
+                     net["ip"] = fixed_ips[0].get("ip_address")
+                 else:
+                     net["ip"] = None
+                 port = {"port-id": new_port["port"]["id"]}
+                 if float(self.nova.api_version.get_string()) >= 2.32:
+                     port["tag"] = new_port["port"]["name"]
+                 net_list_vim.append(port)
+                 if net.get('floating_ip', False):
+                     net['exit_on_floating_ip_error'] = True
+                     external_network.append(net)
+                 elif net['use'] == 'mgmt' and self.config.get('use_floating_ip'):
+                     net['exit_on_floating_ip_error'] = False
+                     external_network.append(net)
+                     net['floating_ip'] = self.config.get('use_floating_ip')
+                 # If port security is disabled when the port has not yet been attached to the VM, then all vm traffic is dropped.
+                 # As a workaround we wait until the VM is active and then disable the port-security
+                 if net.get("port_security") == False and not self.config.get("no_port_security_extension"):
+                     no_secured_ports.append(new_port["port"]["id"])
+             # if metadata_vpci:
+             #     metadata = {"pci_assignement": json.dumps(metadata_vpci)}
+             #     if len(metadata["pci_assignement"]) >255:
+             #         #limit the metadata size
+             #         #metadata["pci_assignement"] = metadata["pci_assignement"][0:255]
+             #         self.logger.warn("Metadata deleted since it exceeds the expected length (255) ")
+             #         metadata = {}
+             self.logger.debug("name '%s' image_id '%s'flavor_id '%s' net_list_vim '%s' description '%s'",
+                               name, image_id, flavor_id, str(net_list_vim), description)
+             # cloud config
+             config_drive, userdata = self._create_user_data(cloud_config)
+             # Create additional volumes in case these are present in disk_list
+             base_disk_index = ord('b')
+             if disk_list:
+                 block_device_mapping = {}
+                 for disk in disk_list:
+                     if disk.get('vim_id'):
+                         block_device_mapping['_vd' + chr(base_disk_index)] = disk['vim_id']
+                     else:
+                         if 'image_id' in disk:
+                             volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
+                                                                 chr(base_disk_index), imageRef=disk['image_id'])
+                         else:
+                             volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
+                                                                 chr(base_disk_index))
+                         created_items["volume:" + str(volume.id)] = True
+                         block_device_mapping['_vd' + chr(base_disk_index)] = volume.id
+                     base_disk_index += 1
+                 # Wait until created volumes are with status available
+                 elapsed_time = 0
+                 while elapsed_time < volume_timeout:
+                     for created_item in created_items:
+                         v, _, volume_id = created_item.partition(":")
+                         if v == 'volume':
+                             if self.cinder.volumes.get(volume_id).status != 'available':
+                                 break
+                     else:  # all ready: break from while
+                         break
+                     time.sleep(5)
+                     elapsed_time += 5
+                 # If we exceeded the timeout rollback
+                 if elapsed_time >= volume_timeout:
+                     raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
+                                                    http_code=vimconn.HTTP_Request_Timeout)
+             # get availability Zone
+             vm_av_zone = self._get_vm_availability_zone(availability_zone_index, availability_zone_list)
+             self.logger.debug("nova.servers.create({}, {}, {}, nics={}, security_groups={}, "
+                               "availability_zone={}, key_name={}, userdata={}, config_drive={}, "
+                               "block_device_mapping={})".format(name, image_id, flavor_id, net_list_vim,
+                                                                 self.config.get("security_groups"), vm_av_zone,
+                                                                 self.config.get('keypair'), userdata, config_drive,
+                                                                 block_device_mapping))
+             server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim,
+                                               security_groups=self.config.get("security_groups"),
+                                               # TODO remove security_groups in future versions. Already at neutron port
+                                               availability_zone=vm_av_zone,
+                                               key_name=self.config.get('keypair'),
+                                               userdata=userdata,
+                                               config_drive=config_drive,
+                                               block_device_mapping=block_device_mapping
+                                               )  # , description=description)
+             vm_start_time = time.time()
+             # Previously mentioned workaround to wait until the VM is active and then disable the port-security
+             if no_secured_ports:
+                 self.__wait_for_vm(server.id, 'ACTIVE')
+             for port_id in no_secured_ports:
+                 try:
+                     self.neutron.update_port(port_id,
+                                              {"port": {"port_security_enabled": False, "security_groups": None}})
+                 except Exception as e:
+                     raise vimconn.vimconnException("It was not possible to disable port security for port {}".format(
+                         port_id))
+             # print "DONE :-)", server
+             # pool_id = None
+             if external_network:
+                 floating_ips = self.neutron.list_floatingips().get("floatingips", ())
+             for floating_network in external_network:
+                 try:
+                     assigned = False
+                     while not assigned:
+                         if floating_ips:
+                             ip = floating_ips.pop(0)
+                             if ip.get("port_id", False) or ip.get('tenant_id') != server.tenant_id:
+                                 continue
+                             if isinstance(floating_network['floating_ip'], str):
+                                 if ip.get("floating_network_id") != floating_network['floating_ip']:
+                                     continue
+                             free_floating_ip = ip["id"]
+                         else:
+                             if isinstance(floating_network['floating_ip'], str) and \
+                                 floating_network['floating_ip'].lower() != "true":
+                                 pool_id = floating_network['floating_ip']
+                             else:
+                                 # Find the external network
+                                 external_nets = list()
+                                 for net in self.neutron.list_networks()['networks']:
+                                     if net['router:external']:
+                                             external_nets.append(net)
+                                 if len(external_nets) == 0:
+                                     raise vimconn.vimconnException("Cannot create floating_ip automatically since no external "
+                                                                    "network is present",
+                                                                     http_code=vimconn.HTTP_Conflict)
+                                 if len(external_nets) > 1:
+                                     raise vimconn.vimconnException("Cannot create floating_ip automatically since multiple "
+                                                                    "external networks are present",
+                                                                    http_code=vimconn.HTTP_Conflict)
+                                 pool_id = external_nets[0].get('id')
+                             param = {'floatingip': {'floating_network_id': pool_id, 'tenant_id': server.tenant_id}}
+                             try:
+                                 # self.logger.debug("Creating floating IP")
+                                 new_floating_ip = self.neutron.create_floatingip(param)
+                                 free_floating_ip = new_floating_ip['floatingip']['id']
+                             except Exception as e:
+                                 raise vimconn.vimconnException(type(e).__name__ + ": Cannot create new floating_ip " +
+                                                                str(e), http_code=vimconn.HTTP_Conflict)
+                         while not assigned:
+                             try:
+                                 # the vim_id key contains the neutron.port_id
+                                 self.neutron.update_floatingip(free_floating_ip,
+                                                                {"floatingip": {"port_id": floating_network["vim_id"]}})
+                                 # Using nove is deprecated on nova client 10.0
+                                 assigned = True
+                             except Exception as e:
+                                 # openstack need some time after VM creation to asign an IP. So retry if fails
+                                 vm_status = self.nova.servers.get(server.id).status
+                                 if vm_status != 'ACTIVE' and vm_status != 'ERROR':
+                                     if time.time() - vm_start_time < server_timeout:
+                                         time.sleep(5)
+                                         continue
+                                 raise vimconn.vimconnException(
+                                     "Cannot create floating_ip: {} {}".format(type(e).__name__, e),
+                                     http_code=vimconn.HTTP_Conflict)
+                 except Exception as e:
+                     if not floating_network['exit_on_floating_ip_error']:
+                         self.logger.warning("Cannot create floating_ip. %s", str(e))
+                         continue
+                     raise
+             return server.id, created_items
+ #        except nvExceptions.NotFound as e:
+ #            error_value=-vimconn.HTTP_Not_Found
+ #            error_text= "vm instance %s not found" % vm_id
+ #        except TypeError as e:
+ #            raise vimconn.vimconnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
+         except Exception as e:
+             server_id = None
+             if server:
+                 server_id = server.id
+             try:
+                 self.delete_vminstance(server_id, created_items)
+             except Exception as e2:
+                 self.logger.error("new_vminstance rollback fail {}".format(e2))
+             self._format_exception(e)
+     def get_vminstance(self,vm_id):
+         '''Returns the VM instance information from VIM'''
+         #self.logger.debug("Getting VM from VIM")
+         try:
+             self._reload_connection()
+             server = self.nova.servers.find(id=vm_id)
+             #TODO parse input and translate to VIM format (openmano_schemas.new_vminstance_response_schema)
+             return server.to_dict()
+         except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound, ConnectionError) as e:
+             self._format_exception(e)
+     def get_vminstance_console(self,vm_id, console_type="vnc"):
+         '''
+         Get a console for the virtual machine
+         Params:
+             vm_id: uuid of the VM
+             console_type, can be:
+                 "novnc" (by default), "xvpvnc" for VNC types,
+                 "rdp-html5" for RDP types, "spice-html5" for SPICE types
+         Returns dict with the console parameters:
+                 protocol: ssh, ftp, http, https, ...
+                 server:   usually ip address
+                 port:     the http, ssh, ... port
+                 suffix:   extra text, e.g. the http path and query string
+         '''
+         self.logger.debug("Getting VM CONSOLE from VIM")
+         try:
+             self._reload_connection()
+             server = self.nova.servers.find(id=vm_id)
+             if console_type == None or console_type == "novnc":
+                 console_dict = server.get_vnc_console("novnc")
+             elif console_type == "xvpvnc":
+                 console_dict = server.get_vnc_console(console_type)
+             elif console_type == "rdp-html5":
+                 console_dict = server.get_rdp_console(console_type)
+             elif console_type == "spice-html5":
+                 console_dict = server.get_spice_console(console_type)
+             else:
+                 raise vimconn.vimconnException("console type '{}' not allowed".format(console_type), http_code=vimconn.HTTP_Bad_Request)
+             console_dict1 = console_dict.get("console")
+             if console_dict1:
+                 console_url = console_dict1.get("url")
+                 if console_url:
+                     #parse console_url
+                     protocol_index = console_url.find("//")
+                     suffix_index = console_url[protocol_index+2:].find("/") + protocol_index+2
+                     port_index = console_url[protocol_index+2:suffix_index].find(":") + protocol_index+2
+                     if protocol_index < 0 or port_index<0 or suffix_index<0:
+                         return -vimconn.HTTP_Internal_Server_Error, "Unexpected response from VIM"
+                     console_dict={"protocol": console_url[0:protocol_index],
+                                   "server":   console_url[protocol_index+2:port_index],
+                                   "port":     console_url[port_index:suffix_index],
+                                   "suffix":   console_url[suffix_index+1:]
+                                   }
+                     protocol_index += 2
+                     return console_dict
+             raise vimconn.vimconnUnexpectedResponse("Unexpected response from VIM")
+         except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.BadRequest, ConnectionError) as e:
+             self._format_exception(e)
+     def delete_vminstance(self, vm_id, created_items=None):
+         '''Removes a VM instance from VIM. Returns the old identifier
+         '''
+         #print "osconnector: Getting VM from VIM"
+         if created_items == None:
+             created_items = {}
+         try:
+             self._reload_connection()
+             # delete VM ports attached to this networks before the virtual machine
+             for k, v in created_items.items():
+                 if not v:  # skip already deleted
+                     continue
+                 try:
+                     k_item, _, k_id = k.partition(":")
+                     if k_item == "port":
+                         self.neutron.delete_port(k_id)
+                 except Exception as e:
+                     self.logger.error("Error deleting port: {}: {}".format(type(e).__name__, e))
+             # #commented because detaching the volumes makes the servers.delete not work properly ?!?
+             # #dettach volumes attached
+             # server = self.nova.servers.get(vm_id)
+             # volumes_attached_dict = server._info['os-extended-volumes:volumes_attached']   #volume['id']
+             # #for volume in volumes_attached_dict:
+             # #    self.cinder.volumes.detach(volume['id'])
+             if vm_id:
+                 self.nova.servers.delete(vm_id)
+             # delete volumes. Although having detached, they should have in active status before deleting
+             # we ensure in this loop
+             keep_waiting = True
+             elapsed_time = 0
+             while keep_waiting and elapsed_time < volume_timeout:
+                 keep_waiting = False
+                 for k, v in created_items.items():
+                     if not v:  # skip already deleted
+                         continue
+                     try:
+                         k_item, _, k_id = k.partition(":")
+                         if k_item == "volume":
+                             if self.cinder.volumes.get(k_id).status != 'available':
+                                 keep_waiting = True
+                             else:
+                                 self.cinder.volumes.delete(k_id)
+                     except Exception as e:
+                         self.logger.error("Error deleting volume: {}: {}".format(type(e).__name__, e))
+                 if keep_waiting:
+                     time.sleep(1)
+                     elapsed_time += 1
+             return None
+         except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+             self._format_exception(e)
+     def refresh_vms_status(self, vm_list):
+         '''Get the status of the virtual machines and their interfaces/ports
+            Params: the list of VM identifiers
+            Returns a dictionary with:
+                 vm_id:          #VIM id of this Virtual Machine
+                     status:     #Mandatory. Text with one of:
+                                 #  DELETED (not found at vim)
+                                 #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                 #  OTHER (Vim reported other status not understood)
+                                 #  ERROR (VIM indicates an ERROR status)
+                                 #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                                 #  CREATING (on building process), ERROR
+                                 #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                 #
+                     error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                     interfaces:
+                      -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                         mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                         vim_net_id:       #network id where this interface is connected
+                         vim_interface_id: #interface/port VIM id
+                         ip_address:       #null, or text with IPv4, IPv6 address
+                         compute_node:     #identification of compute node where PF,VF interface is allocated
+                         pci:              #PCI address of the NIC that hosts the PF,VF
+                         vlan:             #physical VLAN used for VF
+         '''
+         vm_dict={}
+         self.logger.debug("refresh_vms status: Getting tenant VM instance information from VIM")
+         for vm_id in vm_list:
+             vm={}
+             try:
+                 vm_vim = self.get_vminstance(vm_id)
+                 if vm_vim['status'] in vmStatus2manoFormat:
+                     vm['status']    =  vmStatus2manoFormat[ vm_vim['status'] ]
+                 else:
+                     vm['status']    = "OTHER"
+                     vm['error_msg'] = "VIM status reported " + vm_vim['status']
+                 vm['vim_info'] = self.serialize(vm_vim)
+                 vm["interfaces"] = []
+                 if vm_vim.get('fault'):
+                     vm['error_msg'] = str(vm_vim['fault'])
+                 #get interfaces
+                 try:
+                     self._reload_connection()
+                     port_dict = self.neutron.list_ports(device_id=vm_id)
+                     for port in port_dict["ports"]:
+                         interface={}
+                         interface['vim_info'] = self.serialize(port)
+                         interface["mac_address"] = port.get("mac_address")
+                         interface["vim_net_id"] = port["network_id"]
+                         interface["vim_interface_id"] = port["id"]
+                         # check if OS-EXT-SRV-ATTR:host is there,
+                         # in case of non-admin credentials, it will be missing
+                         if vm_vim.get('OS-EXT-SRV-ATTR:host'):
+                             interface["compute_node"] = vm_vim['OS-EXT-SRV-ATTR:host']
+                         interface["pci"] = None
+                         # check if binding:profile is there,
+                         # in case of non-admin credentials, it will be missing
+                         if port.get('binding:profile'):
+                             if port['binding:profile'].get('pci_slot'):
+                                 # TODO: At the moment sr-iov pci addresses are converted to PF pci addresses by setting the slot to 0x00
+                                 # TODO: This is just a workaround valid for niantinc. Find a better way to do so
+                                 #   CHANGE DDDD:BB:SS.F to DDDD:BB:00.(F%2)   assuming there are 2 ports per nic
+                                 pci = port['binding:profile']['pci_slot']
+                                 # interface["pci"] = pci[:-4] + "00." + str(int(pci[-1]) % 2)
+                                 interface["pci"] = pci
+                         interface["vlan"] = None
+                         #if network is of type vlan and port is of type direct (sr-iov) then set vlan id
+                         network = self.neutron.show_network(port["network_id"])
+                         if network['network'].get('provider:network_type') == 'vlan' and \
+                             port.get("binding:vnic_type") == "direct":
+                             interface["vlan"] = network['network'].get('provider:segmentation_id')
+                         ips=[]
+                         #look for floating ip address
+                         try:
+                             floating_ip_dict = self.neutron.list_floatingips(port_id=port["id"])
+                             if floating_ip_dict.get("floatingips"):
+                                 ips.append(floating_ip_dict["floatingips"][0].get("floating_ip_address") )
+                         except Exception:
+                             pass
+                         for subnet in port["fixed_ips"]:
+                             ips.append(subnet["ip_address"])
+                         interface["ip_address"] = ";".join(ips)
+                         vm["interfaces"].append(interface)
+                 except Exception as e:
+                     self.logger.error("Error getting vm interface information {}: {}".format(type(e).__name__, e),
+                                       exc_info=True)
+             except vimconn.vimconnNotFoundException as e:
+                 self.logger.error("Exception getting vm status: %s", str(e))
+                 vm['status'] = "DELETED"
+                 vm['error_msg'] = str(e)
+             except vimconn.vimconnException as e:
+                 self.logger.error("Exception getting vm status: %s", str(e))
+                 vm['status'] = "VIM_ERROR"
+                 vm['error_msg'] = str(e)
+             vm_dict[vm_id] = vm
+         return vm_dict
+     def action_vminstance(self, vm_id, action_dict, created_items={}):
+         '''Send and action over a VM instance from VIM
+         Returns None or the console dict if the action was successfully sent to the VIM'''
+         self.logger.debug("Action over VM '%s': %s", vm_id, str(action_dict))
+         try:
+             self._reload_connection()
+             server = self.nova.servers.find(id=vm_id)
+             if "start" in action_dict:
+                 if action_dict["start"]=="rebuild":
+                     server.rebuild()
+                 else:
+                     if server.status=="PAUSED":
+                         server.unpause()
+                     elif server.status=="SUSPENDED":
+                         server.resume()
+                     elif server.status=="SHUTOFF":
+                         server.start()
+             elif "pause" in action_dict:
+                 server.pause()
+             elif "resume" in action_dict:
+                 server.resume()
+             elif "shutoff" in action_dict or "shutdown" in action_dict:
+                 server.stop()
+             elif "forceOff" in action_dict:
+                 server.stop() #TODO
+             elif "terminate" in action_dict:
+                 server.delete()
+             elif "createImage" in action_dict:
+                 server.create_image()
+                 #"path":path_schema,
+                 #"description":description_schema,
+                 #"name":name_schema,
+                 #"metadata":metadata_schema,
+                 #"imageRef": id_schema,
+                 #"disk": {"oneOf":[{"type": "null"}, {"type":"string"}] },
+             elif "rebuild" in action_dict:
+                 server.rebuild(server.image['id'])
+             elif "reboot" in action_dict:
+                 server.reboot() #reboot_type='SOFT'
+             elif "console" in action_dict:
+                 console_type = action_dict["console"]
+                 if console_type == None or console_type == "novnc":
+                     console_dict = server.get_vnc_console("novnc")
+                 elif console_type == "xvpvnc":
+                     console_dict = server.get_vnc_console(console_type)
+                 elif console_type == "rdp-html5":
+                     console_dict = server.get_rdp_console(console_type)
+                 elif console_type == "spice-html5":
+                     console_dict = server.get_spice_console(console_type)
+                 else:
+                     raise vimconn.vimconnException("console type '{}' not allowed".format(console_type),
+                                                    http_code=vimconn.HTTP_Bad_Request)
+                 try:
+                     console_url = console_dict["console"]["url"]
+                     #parse console_url
+                     protocol_index = console_url.find("//")
+                     suffix_index = console_url[protocol_index+2:].find("/") + protocol_index+2
+                     port_index = console_url[protocol_index+2:suffix_index].find(":") + protocol_index+2
+                     if protocol_index < 0 or port_index<0 or suffix_index<0:
+                         raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict))
+                     console_dict2={"protocol": console_url[0:protocol_index],
+                                   "server":   console_url[protocol_index+2 : port_index],
+                                   "port":     int(console_url[port_index+1 : suffix_index]),
+                                   "suffix":   console_url[suffix_index+1:]
+                                   }
+                     return console_dict2
+                 except Exception as e:
+                     raise vimconn.vimconnException("Unexpected response from VIM " + str(console_dict))
+             return None
+         except (ksExceptions.ClientException, nvExceptions.ClientException, nvExceptions.NotFound, ConnectionError) as e:
+             self._format_exception(e)
+         #TODO insert exception vimconn.HTTP_Unauthorized
+     ####### VIO Specific Changes #########
+     def _generate_vlanID(self):
+         """
+          Method to get unused vlanID
+             Args:
+                 None
+             Returns:
+                 vlanID
+         """
+         #Get used VLAN IDs
+         usedVlanIDs = []
+         networks = self.get_network_list()
+         for net in networks:
+             if net.get('provider:segmentation_id'):
+                 usedVlanIDs.append(net.get('provider:segmentation_id'))
+         used_vlanIDs = set(usedVlanIDs)
+         #find unused VLAN ID
+         for vlanID_range in self.config.get('dataplane_net_vlan_range'):
+             try:
+                 start_vlanid , end_vlanid = map(int, vlanID_range.replace(" ", "").split("-"))
+                 for vlanID in range(start_vlanid, end_vlanid + 1):
+                     if vlanID not in used_vlanIDs:
+                         return vlanID
+             except Exception as exp:
+                 raise vimconn.vimconnException("Exception {} occurred while generating VLAN ID.".format(exp))
+         else:
+             raise vimconn.vimconnConflictException("Unable to create the SRIOV VLAN network."\
+                 " All given Vlan IDs {} are in use.".format(self.config.get('dataplane_net_vlan_range')))
+     def _generate_multisegment_vlanID(self):
+         """
+          Method to get unused vlanID
+             Args:
+                 None
+             Returns:
+                 vlanID
+         """
+         #Get used VLAN IDs
+         usedVlanIDs = []
+         networks = self.get_network_list()
+         for net in networks:
+             if net.get('provider:network_type') == "vlan" and net.get('provider:segmentation_id'):
+                 usedVlanIDs.append(net.get('provider:segmentation_id'))
+             elif net.get('segments'):
+                 for segment in net.get('segments'):
+                     if segment.get('provider:network_type') == "vlan" and segment.get('provider:segmentation_id'):
+                         usedVlanIDs.append(segment.get('provider:segmentation_id'))
+         used_vlanIDs = set(usedVlanIDs)
+         #find unused VLAN ID
+         for vlanID_range in self.config.get('multisegment_vlan_range'):
+             try:
+                 start_vlanid , end_vlanid = map(int, vlanID_range.replace(" ", "").split("-"))
+                 for vlanID in range(start_vlanid, end_vlanid + 1):
+                     if vlanID not in used_vlanIDs:
+                         return vlanID
+             except Exception as exp:
+                 raise vimconn.vimconnException("Exception {} occurred while generating VLAN ID.".format(exp))
+         else:
+             raise vimconn.vimconnConflictException("Unable to create the VLAN segment."\
+                 " All VLAN IDs {} are in use.".format(self.config.get('multisegment_vlan_range')))
+     def _validate_vlan_ranges(self, input_vlan_range, text_vlan_range):
+         """
+         Method to validate user given vlanID ranges
+             Args:  None
+             Returns: None
+         """
+         for vlanID_range in input_vlan_range:
+             vlan_range = vlanID_range.replace(" ", "")
+             #validate format
+             vlanID_pattern = r'(\d)*-(\d)*$'
+             match_obj = re.match(vlanID_pattern, vlan_range)
+             if not match_obj:
+                 raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}.You must provide "\
+                 "'{}' in format [start_ID - end_ID].".format(text_vlan_range, vlanID_range, text_vlan_range))
+             start_vlanid , end_vlanid = map(int,vlan_range.split("-"))
+             if start_vlanid <= 0 :
+                 raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
+                 "Start ID can not be zero. For VLAN "\
+                 "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
+             if end_vlanid > 4094 :
+                 raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
+                 "End VLAN ID can not be greater than 4094. For VLAN "\
+                 "networks valid IDs are 1 to 4094 ".format(text_vlan_range, vlanID_range))
+             if start_vlanid > end_vlanid:
+                 raise vimconn.vimconnConflictException("Invalid VLAN range for {}: {}."\
+                     "You must provide '{}' in format start_ID - end_ID and "\
+                     "start_ID < end_ID ".format(text_vlan_range, vlanID_range, text_vlan_range))
+ #NOT USED FUNCTIONS
+     def new_external_port(self, port_data):
+         #TODO openstack if needed
+         '''Adds a external port to VIM'''
+         '''Returns the port identifier'''
+         return -vimconn.HTTP_Internal_Server_Error, "osconnector.new_external_port() not implemented"
+     def connect_port_network(self, port_id, network_id, admin=False):
+         #TODO openstack if needed
+         '''Connects a external port to a network'''
+         '''Returns status code of the VIM response'''
+         return -vimconn.HTTP_Internal_Server_Error, "osconnector.connect_port_network() not implemented"
+     def new_user(self, user_name, user_passwd, tenant_id=None):
+         '''Adds a new user to openstack VIM'''
+         '''Returns the user identifier'''
+         self.logger.debug("osconnector: Adding a new user to VIM")
+         try:
+             self._reload_connection()
+             user=self.keystone.users.create(user_name, password=user_passwd, default_project=tenant_id)
+             #self.keystone.tenants.add_user(self.k_creds["username"], #role)
+             return user.id
+         except ksExceptions.ConnectionError as e:
+             error_value=-vimconn.HTTP_Bad_Request
+             error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+         except ksExceptions.ClientException as e: #TODO remove
+             error_value=-vimconn.HTTP_Bad_Request
+             error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+         #TODO insert exception vimconn.HTTP_Unauthorized
+         #if reaching here is because an exception
+         self.logger.debug("new_user " + error_text)
+         return error_value, error_text
+     def delete_user(self, user_id):
+         '''Delete a user from openstack VIM'''
+         '''Returns the user identifier'''
+         if self.debug:
+             print("osconnector: Deleting  a  user from VIM")
+         try:
+             self._reload_connection()
+             self.keystone.users.delete(user_id)
+             return 1, user_id
+         except ksExceptions.ConnectionError as e:
+             error_value=-vimconn.HTTP_Bad_Request
+             error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+         except ksExceptions.NotFound as e:
+             error_value=-vimconn.HTTP_Not_Found
+             error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+         except ksExceptions.ClientException as e: #TODO remove
+             error_value=-vimconn.HTTP_Bad_Request
+             error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+         #TODO insert exception vimconn.HTTP_Unauthorized
+         #if reaching here is because an exception
+             self.logger.debug("delete_tenant " + error_text)
+         return error_value, error_text
+     def get_hosts_info(self):
+         '''Get the information of deployed hosts
+         Returns the hosts content'''
+         if self.debug:
+             print("osconnector: Getting Host info from VIM")
+         try:
+             h_list=[]
+             self._reload_connection()
+             hypervisors = self.nova.hypervisors.list()
+             for hype in hypervisors:
+                 h_list.append( hype.to_dict() )
+             return 1, {"hosts":h_list}
+         except nvExceptions.NotFound as e:
+             error_value=-vimconn.HTTP_Not_Found
+             error_text= (str(e) if len(e.args)==0 else str(e.args[0]))
+         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+             error_value=-vimconn.HTTP_Bad_Request
+             error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+         #TODO insert exception vimconn.HTTP_Unauthorized
+         #if reaching here is because an exception
+         self.logger.debug("get_hosts_info " + error_text)
+         return error_value, error_text
+     def get_hosts(self, vim_tenant):
+         '''Get the hosts and deployed instances
+         Returns the hosts content'''
+         r, hype_dict = self.get_hosts_info()
+         if r<0:
+             return r, hype_dict
+         hypervisors = hype_dict["hosts"]
+         try:
+             servers = self.nova.servers.list()
+             for hype in hypervisors:
+                 for server in servers:
+                     if server.to_dict()['OS-EXT-SRV-ATTR:hypervisor_hostname']==hype['hypervisor_hostname']:
+                         if 'vm' in hype:
+                             hype['vm'].append(server.id)
+                         else:
+                             hype['vm'] = [server.id]
+             return 1, hype_dict
+         except nvExceptions.NotFound as e:
+             error_value=-vimconn.HTTP_Not_Found
+             error_text= (str(e) if len(e.args)==0 else str(e.args[0]))
+         except (ksExceptions.ClientException, nvExceptions.ClientException) as e:
+             error_value=-vimconn.HTTP_Bad_Request
+             error_text= type(e).__name__ + ": "+  (str(e) if len(e.args)==0 else str(e.args[0]))
+         #TODO insert exception vimconn.HTTP_Unauthorized
+         #if reaching here is because an exception
+         self.logger.debug("get_hosts " + error_text)
+         return error_value, error_text
+     def new_classification(self, name, ctype, definition):
+         self.logger.debug('Adding a new (Traffic) Classification to VIM, named %s', name)
+         try:
+             new_class = None
+             self._reload_connection()
+             if ctype not in supportedClassificationTypes:
+                 raise vimconn.vimconnNotSupportedException(
+                         'OpenStack VIM connector doesn\'t support provided '
+                         'Classification Type {}, supported ones are: '
+                         '{}'.format(ctype, supportedClassificationTypes))
+             if not self._validate_classification(ctype, definition):
+                 raise vimconn.vimconnException(
+                     'Incorrect Classification definition '
+                     'for the type specified.')
+             classification_dict = definition
+             classification_dict['name'] = name
+             new_class = self.neutron.create_sfc_flow_classifier(
+                 {'flow_classifier': classification_dict})
+             return new_class['flow_classifier']['id']
+         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                 neExceptions.NeutronException, ConnectionError) as e:
+             self.logger.error(
+                 'Creation of Classification failed.')
+             self._format_exception(e)
+     def get_classification(self, class_id):
+         self.logger.debug(" Getting Classification %s from VIM", class_id)
+         filter_dict = {"id": class_id}
+         class_list = self.get_classification_list(filter_dict)
+         if len(class_list) == 0:
+             raise vimconn.vimconnNotFoundException(
+                 "Classification '{}' not found".format(class_id))
+         elif len(class_list) > 1:
+             raise vimconn.vimconnConflictException(
+                 "Found more than one Classification with this criteria")
+         classification = class_list[0]
+         return classification
+     def get_classification_list(self, filter_dict={}):
+         self.logger.debug("Getting Classifications from VIM filter: '%s'",
+                           str(filter_dict))
+         try:
+             filter_dict_os = filter_dict.copy()
+             self._reload_connection()
+             if self.api_version3 and "tenant_id" in filter_dict_os:
+                 filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+             classification_dict = self.neutron.list_sfc_flow_classifiers(
+                 **filter_dict_os)
+             classification_list = classification_dict["flow_classifiers"]
+             self.__classification_os2mano(classification_list)
+             return classification_list
+         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                 neExceptions.NeutronException, ConnectionError) as e:
+             self._format_exception(e)
+     def delete_classification(self, class_id):
+         self.logger.debug("Deleting Classification '%s' from VIM", class_id)
+         try:
+             self._reload_connection()
+             self.neutron.delete_sfc_flow_classifier(class_id)
+             return class_id
+         except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+                 ksExceptions.ClientException, neExceptions.NeutronException,
+                 ConnectionError) as e:
+             self._format_exception(e)
+     def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
+         self.logger.debug("Adding a new Service Function Instance to VIM, named '%s'", name)
+         try:
+             new_sfi = None
+             self._reload_connection()
+             correlation = None
+             if sfc_encap:
+                 correlation = 'nsh'
+             if len(ingress_ports) != 1:
+                 raise vimconn.vimconnNotSupportedException(
+                     "OpenStack VIM connector can only have "
+                     "1 ingress port per SFI")
+             if len(egress_ports) != 1:
+                 raise vimconn.vimconnNotSupportedException(
+                     "OpenStack VIM connector can only have "
+                     "1 egress port per SFI")
+             sfi_dict = {'name': name,
+                         'ingress': ingress_ports[0],
+                         'egress': egress_ports[0],
+                         'service_function_parameters': {
+                             'correlation': correlation}}
+             new_sfi = self.neutron.create_sfc_port_pair({'port_pair': sfi_dict})
+             return new_sfi['port_pair']['id']
+         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                 neExceptions.NeutronException, ConnectionError) as e:
+             if new_sfi:
+                 try:
+                     self.neutron.delete_sfc_port_pair(
+                         new_sfi['port_pair']['id'])
+                 except Exception:
+                     self.logger.error(
+                         'Creation of Service Function Instance failed, with '
+                         'subsequent deletion failure as well.')
+             self._format_exception(e)
+     def get_sfi(self, sfi_id):
+         self.logger.debug('Getting Service Function Instance %s from VIM', sfi_id)
+         filter_dict = {"id": sfi_id}
+         sfi_list = self.get_sfi_list(filter_dict)
+         if len(sfi_list) == 0:
+             raise vimconn.vimconnNotFoundException("Service Function Instance '{}' not found".format(sfi_id))
+         elif len(sfi_list) > 1:
+             raise vimconn.vimconnConflictException(
+                 'Found more than one Service Function Instance '
+                 'with this criteria')
+         sfi = sfi_list[0]
+         return sfi
+     def get_sfi_list(self, filter_dict={}):
+         self.logger.debug("Getting Service Function Instances from VIM filter: '%s'", str(filter_dict))
+         try:
+             self._reload_connection()
+             filter_dict_os = filter_dict.copy()
+             if self.api_version3 and "tenant_id" in filter_dict_os:
+                 filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+             sfi_dict = self.neutron.list_sfc_port_pairs(**filter_dict_os)
+             sfi_list = sfi_dict["port_pairs"]
+             self.__sfi_os2mano(sfi_list)
+             return sfi_list
+         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                 neExceptions.NeutronException, ConnectionError) as e:
+             self._format_exception(e)
+     def delete_sfi(self, sfi_id):
+         self.logger.debug("Deleting Service Function Instance '%s' "
+                           "from VIM", sfi_id)
+         try:
+             self._reload_connection()
+             self.neutron.delete_sfc_port_pair(sfi_id)
+             return sfi_id
+         except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+                 ksExceptions.ClientException, neExceptions.NeutronException,
+                 ConnectionError) as e:
+             self._format_exception(e)
+     def new_sf(self, name, sfis, sfc_encap=True):
+         self.logger.debug("Adding a new Service Function to VIM, named '%s'", name)
+         try:
+             new_sf = None
+             self._reload_connection()
+             # correlation = None
+             # if sfc_encap:
+             #     correlation = 'nsh'
+             for instance in sfis:
+                 sfi = self.get_sfi(instance)
+                 if sfi.get('sfc_encap') != sfc_encap:
+                     raise vimconn.vimconnNotSupportedException(
+                         "OpenStack VIM connector requires all SFIs of the "
+                         "same SF to share the same SFC Encapsulation")
+             sf_dict = {'name': name,
+                        'port_pairs': sfis}
+             new_sf = self.neutron.create_sfc_port_pair_group({
+                 'port_pair_group': sf_dict})
+             return new_sf['port_pair_group']['id']
+         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                 neExceptions.NeutronException, ConnectionError) as e:
+             if new_sf:
+                 try:
+                     self.neutron.delete_sfc_port_pair_group(
+                         new_sf['port_pair_group']['id'])
+                 except Exception:
+                     self.logger.error(
+                         'Creation of Service Function failed, with '
+                         'subsequent deletion failure as well.')
+             self._format_exception(e)
+     def get_sf(self, sf_id):
+         self.logger.debug("Getting Service Function %s from VIM", sf_id)
+         filter_dict = {"id": sf_id}
+         sf_list = self.get_sf_list(filter_dict)
+         if len(sf_list) == 0:
+             raise vimconn.vimconnNotFoundException(
+                 "Service Function '{}' not found".format(sf_id))
+         elif len(sf_list) > 1:
+             raise vimconn.vimconnConflictException(
+                 "Found more than one Service Function with this criteria")
+         sf = sf_list[0]
+         return sf
+     def get_sf_list(self, filter_dict={}):
+         self.logger.debug("Getting Service Function from VIM filter: '%s'",
+                           str(filter_dict))
+         try:
+             self._reload_connection()
+             filter_dict_os = filter_dict.copy()
+             if self.api_version3 and "tenant_id" in filter_dict_os:
+                 filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+             sf_dict = self.neutron.list_sfc_port_pair_groups(**filter_dict_os)
+             sf_list = sf_dict["port_pair_groups"]
+             self.__sf_os2mano(sf_list)
+             return sf_list
+         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                 neExceptions.NeutronException, ConnectionError) as e:
+             self._format_exception(e)
+     def delete_sf(self, sf_id):
+         self.logger.debug("Deleting Service Function '%s' from VIM", sf_id)
+         try:
+             self._reload_connection()
+             self.neutron.delete_sfc_port_pair_group(sf_id)
+             return sf_id
+         except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+                 ksExceptions.ClientException, neExceptions.NeutronException,
+                 ConnectionError) as e:
+             self._format_exception(e)
+     def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
+         self.logger.debug("Adding a new Service Function Path to VIM, named '%s'", name)
+         try:
+             new_sfp = None
+             self._reload_connection()
+             # In networking-sfc the MPLS encapsulation is legacy
+             # should be used when no full SFC Encapsulation is intended
+             correlation = 'mpls'
+             if sfc_encap:
+                 correlation = 'nsh'
+             sfp_dict = {'name': name,
+                         'flow_classifiers': classifications,
+                         'port_pair_groups': sfs,
+                         'chain_parameters': {'correlation': correlation}}
+             if spi:
+                 sfp_dict['chain_id'] = spi
+             new_sfp = self.neutron.create_sfc_port_chain({'port_chain': sfp_dict})
+             return new_sfp["port_chain"]["id"]
+         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                 neExceptions.NeutronException, ConnectionError) as e:
+             if new_sfp:
+                 try:
+                     self.neutron.delete_sfc_port_chain(new_sfp['port_chain']['id'])
+                 except Exception:
+                     self.logger.error(
+                         'Creation of Service Function Path failed, with '
+                         'subsequent deletion failure as well.')
+             self._format_exception(e)
+     def get_sfp(self, sfp_id):
+         self.logger.debug(" Getting Service Function Path %s from VIM", sfp_id)
+         filter_dict = {"id": sfp_id}
+         sfp_list = self.get_sfp_list(filter_dict)
+         if len(sfp_list) == 0:
+             raise vimconn.vimconnNotFoundException(
+                 "Service Function Path '{}' not found".format(sfp_id))
+         elif len(sfp_list) > 1:
+             raise vimconn.vimconnConflictException(
+                 "Found more than one Service Function Path with this criteria")
+         sfp = sfp_list[0]
+         return sfp
+     def get_sfp_list(self, filter_dict={}):
+         self.logger.debug("Getting Service Function Paths from VIM filter: '%s'", str(filter_dict))
+         try:
+             self._reload_connection()
+             filter_dict_os = filter_dict.copy()
+             if self.api_version3 and "tenant_id" in filter_dict_os:
+                 filter_dict_os['project_id'] = filter_dict_os.pop('tenant_id')
+             sfp_dict = self.neutron.list_sfc_port_chains(**filter_dict_os)
+             sfp_list = sfp_dict["port_chains"]
+             self.__sfp_os2mano(sfp_list)
+             return sfp_list
+         except (neExceptions.ConnectionFailed, ksExceptions.ClientException,
+                 neExceptions.NeutronException, ConnectionError) as e:
+             self._format_exception(e)
+     def delete_sfp(self, sfp_id):
+         self.logger.debug("Deleting Service Function Path '%s' from VIM", sfp_id)
+         try:
+             self._reload_connection()
+             self.neutron.delete_sfc_port_chain(sfp_id)
+             return sfp_id
+         except (neExceptions.ConnectionFailed, neExceptions.NeutronException,
+                 ksExceptions.ClientException, neExceptions.NeutronException,
+                 ConnectionError) as e:
+             self._format_exception(e)
index 0000000,3d646ac..9c7dd5c
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,26 +1,26 @@@
 -git+https://osm.etsi.org/gerrit/osm/RO.git@py3#egg=osm-ro&subdirectory=RO
+ ##
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+ #
+ #    http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ # implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ ##
+ PyYAML
+ python-openstackclient
+ python-neutronclient
+ requests
+ netaddr
+ #TODO py3 networking-l2gw
+ #TODO py3 python-novaclient
+ #TODO py3 python-keystoneclient
+ #TODO py3 python-glanceclient
+ #TODO py3 python-cinderclient
++git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
index 0000000,86ee02f..9490485
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,1385 +1,1388 @@@
 -    def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None): #, **vim_specific):
+ # -*- coding: utf-8 -*-
+ ##
+ # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+ '''
+ vimconnector implements all the methods to interact with openvim using the openvim API.
+ '''
+ __author__="Alfonso Tierno, Gerardo Garcia"
+ __date__ ="$26-aug-2014 11:09:29$"
+ from osm_ro import vimconn
+ import requests
+ import json
+ import yaml
+ import logging
+ import math
+ from osm_ro.openmano_schemas import id_schema, name_schema, nameshort_schema, description_schema, \
+                             vlan1000_schema, integer0_schema
+ from jsonschema import validate as js_v, exceptions as js_e
+ from urllib.parse import quote
+ '''contain the openvim virtual machine status to openmano status'''
+ vmStatus2manoFormat={'ACTIVE':'ACTIVE',
+                      'PAUSED':'PAUSED',
+                      'SUSPENDED': 'SUSPENDED',
+                      'INACTIVE':'INACTIVE',
+                      'CREATING':'BUILD',
+                      'ERROR':'ERROR','DELETED':'DELETED'
+                      }
+ netStatus2manoFormat={'ACTIVE':'ACTIVE','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED', 'DOWN':'DOWN'
+                      }
+ host_schema = {
+     "type":"object",
+     "properties":{
+         "id": id_schema,
+         "name": name_schema,
+     },
+     "required": ["id"]
+ }
+ image_schema = {
+     "type":"object",
+     "properties":{
+         "id": id_schema,
+         "name": name_schema,
+     },
+     "required": ["id","name"]
+ }
+ server_schema = {
+     "type":"object",
+     "properties":{
+         "id":id_schema,
+         "name": name_schema,
+     },
+     "required": ["id","name"]
+ }
+ new_host_response_schema = {
+     "title":"host response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "host": host_schema
+     },
+     "required": ["host"],
+     "additionalProperties": False
+ }
+ get_images_response_schema = {
+     "title":"openvim images response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "images":{
+             "type":"array",
+             "items": image_schema,
+         }
+     },
+     "required": ["images"],
+     "additionalProperties": False
+ }
+ get_hosts_response_schema = {
+     "title":"openvim hosts response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "hosts":{
+             "type":"array",
+             "items": host_schema,
+         }
+     },
+     "required": ["hosts"],
+     "additionalProperties": False
+ }
+ get_host_detail_response_schema = new_host_response_schema # TODO: Content is not parsed yet
+ get_server_response_schema = {
+     "title":"openvim server response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "servers":{
+             "type":"array",
+             "items": server_schema,
+         }
+     },
+     "required": ["servers"],
+     "additionalProperties": False
+ }
+ new_tenant_response_schema = {
+     "title":"tenant response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "tenant":{
+             "type":"object",
+             "properties":{
+                 "id": id_schema,
+                 "name": nameshort_schema,
+                 "description":description_schema,
+                 "enabled":{"type" : "boolean"}
+             },
+             "required": ["id"]
+         }
+     },
+     "required": ["tenant"],
+     "additionalProperties": False
+ }
+ new_network_response_schema = {
+     "title":"network response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "network":{
+             "type":"object",
+             "properties":{
+                 "id":id_schema,
+                 "name":name_schema,
+                 "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
+                 "shared":{"type":"boolean"},
+                 "tenant_id":id_schema,
+                 "admin_state_up":{"type":"boolean"},
+                 "vlan":vlan1000_schema
+             },
+             "required": ["id"]
+         }
+     },
+     "required": ["network"],
+     "additionalProperties": False
+ }
+ # get_network_response_schema = {
+ #     "title":"get network response information schema",
+ #     "$schema": "http://json-schema.org/draft-04/schema#",
+ #     "type":"object",
+ #     "properties":{
+ #         "network":{
+ #             "type":"object",
+ #             "properties":{
+ #                 "id":id_schema,
+ #                 "name":name_schema,
+ #                 "type":{"type":"string", "enum":["bridge_man","bridge_data","data", "ptp"]},
+ #                 "shared":{"type":"boolean"},
+ #                 "tenant_id":id_schema,
+ #                 "admin_state_up":{"type":"boolean"},
+ #                 "vlan":vlan1000_schema
+ #             },
+ #             "required": ["id"]
+ #         }
+ #     },
+ #     "required": ["network"],
+ #     "additionalProperties": False
+ # }
+ new_port_response_schema = {
+     "title":"port response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "port":{
+             "type":"object",
+             "properties":{
+                 "id":id_schema,
+             },
+             "required": ["id"]
+         }
+     },
+     "required": ["port"],
+     "additionalProperties": False
+ }
+ get_flavor_response_schema = {
+     "title":"openvim flavors response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "flavor":{
+             "type":"object",
+             "properties":{
+                 "id":   id_schema,
+                 "name": name_schema,
+                 "extended": {"type":"object"},
+             },
+             "required": ["id", "name"],
+         }
+     },
+     "required": ["flavor"],
+     "additionalProperties": False
+ }
+ new_flavor_response_schema = {
+     "title":"flavor response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "flavor":{
+             "type":"object",
+             "properties":{
+                 "id":id_schema,
+             },
+             "required": ["id"]
+         }
+     },
+     "required": ["flavor"],
+     "additionalProperties": False
+ }
+ get_image_response_schema = {
+     "title":"openvim images response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "image":{
+             "type":"object",
+             "properties":{
+                 "id":   id_schema,
+                 "name": name_schema,
+             },
+             "required": ["id", "name"],
+         }
+     },
+     "required": ["flavor"],
+     "additionalProperties": False
+ }
+ new_image_response_schema = {
+     "title":"image response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "image":{
+             "type":"object",
+             "properties":{
+                 "id":id_schema,
+             },
+             "required": ["id"]
+         }
+     },
+     "required": ["image"],
+     "additionalProperties": False
+ }
+ new_vminstance_response_schema = {
+     "title":"server response information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "server":{
+             "type":"object",
+             "properties":{
+                 "id":id_schema,
+             },
+             "required": ["id"]
+         }
+     },
+     "required": ["server"],
+     "additionalProperties": False
+ }
+ get_processor_rankings_response_schema = {
+     "title":"processor rankings information schema",
+     "$schema": "http://json-schema.org/draft-04/schema#",
+     "type":"object",
+     "properties":{
+         "rankings":{
+             "type":"array",
+             "items":{
+                 "type":"object",
+                 "properties":{
+                     "model": description_schema,
+                     "value": integer0_schema
+                 },
+                 "additionalProperties": False,
+                 "required": ["model","value"]
+             }
+         },
+         "additionalProperties": False,
+         "required": ["rankings"]
+     }
+ }
+ class vimconnector(vimconn.vimconnector):
+     def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
+                  log_level="DEBUG", config={}, persistent_info={}):
+         vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
+         self.tenant = None
+         self.headers_req = {'content-type': 'application/json'}
+         self.logger = logging.getLogger('openmano.vim.openvim')
+         self.persistent_info = persistent_info
+         if tenant_id:
+             self.tenant = tenant_id
+     def __setitem__(self,index, value):
+         '''Set individuals parameters 
+         Throw TypeError, KeyError
+         '''
+         if index=='tenant_id':
+             self.tenant = value
+         elif index=='tenant_name':
+             self.tenant = None
+         vimconn.vimconnector.__setitem__(self,index, value)    
+     def _get_my_tenant(self):
+         '''Obtain uuid of my tenant from name
+         '''
+         if self.tenant:
+             return self.tenant
+         url = self.url+'/tenants?name='+ quote(self.tenant_name)
+         self.logger.info("Getting VIM tenant_id GET %s", url)
+         vim_response = requests.get(url, headers = self.headers_req)
+         self._check_http_request_response(vim_response)
+         try:
+             tenant_list = vim_response.json()["tenants"]
+             if len(tenant_list) == 0:
+                 raise vimconn.vimconnNotFoundException("No tenant found for name '{}'".format(self.tenant_name))
+             elif len(tenant_list) > 1:
+                 raise vimconn.vimconnConflictException ("More that one tenant found for name '{}'".format(self.tenant_name))
+             self.tenant = tenant_list[0]["id"]
+             return self.tenant
+         except Exception as e:
+             raise vimconn.vimconnUnexpectedResponse("Get VIM tenant {} '{}'".format(type(e).__name__, str(e)))
+     def _format_jsonerror(self,http_response):
+         #DEPRECATED, to delete in the future
+         try:
+             data = http_response.json()
+             return data["error"]["description"]
+         except:
+             return http_response.text
+     def _format_in(self, http_response, schema):
+         #DEPRECATED, to delete in the future
+         try:
+             client_data = http_response.json()
+             js_v(client_data, schema)
+             #print "Input data: ", str(client_data)
+             return True, client_data
+         except js_e.ValidationError as exc:
+             print("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
+             return False, ("validate_in error, jsonschema exception ", exc.message, "at", exc.path)
+     
+     def _remove_extra_items(self, data, schema):
+         deleted=[]
+         if type(data) is tuple or type(data) is list:
+             for d in data:
+                 a= self._remove_extra_items(d, schema['items'])
+                 if a is not None: deleted.append(a)
+         elif type(data) is dict:
+             to_delete = []
+             for k in data.keys():
+                 if 'properties' not in schema or k not in schema['properties'].keys():
+                     to_delete.append(k)
+                     deleted.append(k)
+                 else:
+                     a = self._remove_extra_items(data[k], schema['properties'][k])
+                     if a is not None:  deleted.append({k:a})
+             for k in to_delete:
+                 del data[k]
+         if len(deleted) == 0: return None
+         elif len(deleted) == 1: return deleted[0]
+         else: return deleted
+         
+     def _format_request_exception(self, request_exception):
+         '''Transform a request exception into a vimconn exception'''
+         if isinstance(request_exception, js_e.ValidationError):
+             raise vimconn.vimconnUnexpectedResponse("jsonschema exception '{}' at '{}'".format(request_exception.message, request_exception.path))            
+         elif isinstance(request_exception, requests.exceptions.HTTPError):
+             raise vimconn.vimconnUnexpectedResponse(type(request_exception).__name__ + ": " + str(request_exception))
+         else:
+             raise vimconn.vimconnConnectionException(type(request_exception).__name__ + ": " + str(request_exception))
+     def _check_http_request_response(self, request_response):
+         '''Raise a vimconn exception if the response is not Ok'''
+         if request_response.status_code >= 200 and  request_response.status_code < 300:
+             return
+         if request_response.status_code == vimconn.HTTP_Unauthorized:
+             raise vimconn.vimconnAuthException(request_response.text)
+         elif request_response.status_code == vimconn.HTTP_Not_Found:
+             raise vimconn.vimconnNotFoundException(request_response.text)
+         elif request_response.status_code == vimconn.HTTP_Conflict:
+             raise vimconn.vimconnConflictException(request_response.text)
+         else: 
+             raise vimconn.vimconnUnexpectedResponse("VIM HTTP_response {}, {}".format(request_response.status_code, str(request_response.text)))
+     def new_tenant(self,tenant_name,tenant_description):
+         '''Adds a new tenant to VIM with this name and description, returns the tenant identifier'''
+         #print "VIMConnector: Adding a new tenant to VIM"
+         payload_dict = {"tenant": {"name":tenant_name,"description": tenant_description, "enabled": True}}
+         payload_req = json.dumps(payload_dict)
+         try:
+             url = self.url_admin+'/tenants'
+             self.logger.info("Adding a new tenant %s", url)
+             vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             js_v(response, new_tenant_response_schema)
+             #r = self._remove_extra_items(response, new_tenant_response_schema)
+             #if r is not None: 
+             #    self.logger.warn("Warning: remove extra items %s", str(r))
+             tenant_id = response['tenant']['id']
+             return tenant_id
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     def delete_tenant(self,tenant_id):
+         '''Delete a tenant from VIM. Returns the old tenant identifier'''
+         try:
+             url = self.url_admin+'/tenants/'+tenant_id
+             self.logger.info("Delete a tenant DELETE %s", url)
+             vim_response = requests.delete(url, headers = self.headers_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             return tenant_id
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     def get_tenant_list(self, filter_dict={}):
+         '''Obtain tenants of VIM
+         filter_dict can contain the following keys:
+             name: filter by tenant name
+             id: filter by tenant uuid/id
+             <other VIM specific>
+         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
+         '''
+         filterquery=[]
+         filterquery_text=''
+         for k,v in filter_dict.items():
+             filterquery.append(str(k)+'='+str(v))
+         if len(filterquery)>0:
+             filterquery_text='?'+ '&'.join(filterquery)
+         try:
+             url = self.url+'/tenants'+filterquery_text
+             self.logger.info("get_tenant_list GET %s", url)
+             vim_response = requests.get(url, headers = self.headers_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             return vim_response.json()["tenants"]
+         except requests.exceptions.RequestException as e:
+             self._format_request_exception(e)
 -            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
++    def new_network(self,net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None): #, **vim_specific):
+         """Adds a tenant network to VIM
+         Params:
+             'net_name': name of the network
+             'net_type': one of:
+                 'bridge': overlay isolated network
+                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+             'ip_profile': is a dict containing the IP parameters of the network
+                 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                 'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                 'dhcp_enabled': True or False
+                 'dhcp_start_address': ip_schema, first IP to grant
+                 'dhcp_count': number of IPs to grant.
+             'shared': if this network can be seen/use by other tenants/organization
++            'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
+         Returns a tuple with the network identifier and created_items, or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
+         try:
++            vlan = None
++            if provider_network_profile:
++                vlan = provider_network_profile.get("segmentation-id")
+             created_items = {}
+             self._get_my_tenant()
+             if net_type=="bridge":
+                 net_type="bridge_data"
+             payload_req = {"name": net_name, "type": net_type, "tenant_id": self.tenant, "shared": shared}
+             if vlan:
+                 payload_req["provider:vlan"] = vlan
+             # payload_req.update(vim_specific)
+             url = self.url+'/networks'
+             self.logger.info("Adding a new network POST: %s  DATA: %s", url, str(payload_req))
+             vim_response = requests.post(url, headers = self.headers_req, data=json.dumps({"network": payload_req}) )
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             js_v(response, new_network_response_schema)
+             #r = self._remove_extra_items(response, new_network_response_schema)
+             #if r is not None: 
+             #    self.logger.warn("Warning: remove extra items %s", str(r))
+             network_id = response['network']['id']
+             return network_id, created_items
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+         
+     def get_network_list(self, filter_dict={}):
+         '''Obtain tenant networks of VIM
+         Filter_dict can be:
+             name: network name
+             id: network uuid
+             public: boolean
+             tenant_id: tenant
+             admin_state_up: boolean
+             status: 'ACTIVE'
+         Returns the network list of dictionaries
+         '''
+         try:
+             if 'tenant_id' not in filter_dict:
+                 filter_dict["tenant_id"] = self._get_my_tenant()
+             elif not filter_dict["tenant_id"]:
+                 del filter_dict["tenant_id"]
+             filterquery=[]
+             filterquery_text=''
+             for k,v in filter_dict.items():
+                 filterquery.append(str(k)+'='+str(v))
+             if len(filterquery)>0:
+                 filterquery_text='?'+ '&'.join(filterquery)
+             url = self.url+'/networks'+filterquery_text
+             self.logger.info("Getting network list GET %s", url)
+             vim_response = requests.get(url, headers = self.headers_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             return response['networks']
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     def get_network(self, net_id):
+         '''Obtain network details of network id'''
+         try:
+             url = self.url+'/networks/'+net_id
+             self.logger.info("Getting network GET %s", url)
+             vim_response = requests.get(url, headers = self.headers_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             return response['network']
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+             
+     def delete_network(self, net_id, created_items=None):
+         """
+         Removes a tenant network from VIM and its associated elements
+         :param net_id: VIM identifier of the network, provided by method new_network
+         :param created_items: dictionary with extra items to be deleted. provided by method new_network
+         Returns the network identifier or raises an exception upon error or when network is not found
+         """
+         try:
+             self._get_my_tenant()
+             url = self.url+'/networks/'+net_id
+             self.logger.info("Deleting VIM network DELETE %s", url)
+             vim_response = requests.delete(url, headers=self.headers_req)
+             self._check_http_request_response(vim_response)
+             #self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             return net_id
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     def get_flavor(self, flavor_id):
+         '''Obtain flavor details from the  VIM'''
+         try:
+             self._get_my_tenant()
+             url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
+             self.logger.info("Getting flavor GET %s", url)
+             vim_response = requests.get(url, headers = self.headers_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             js_v(response, get_flavor_response_schema)
+             r = self._remove_extra_items(response, get_flavor_response_schema)
+             if r is not None: 
+                 self.logger.warn("Warning: remove extra items %s", str(r))
+             return response['flavor']
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+         
+     def new_flavor(self, flavor_data):
+         '''Adds a tenant flavor to VIM'''
+         '''Returns the flavor identifier'''
+         try:
+             new_flavor_dict = flavor_data.copy()
+             for device in new_flavor_dict.get('extended', {}).get('devices', ()):
+                 if 'image name' in device:
+                     del device['image name']
+                 if 'name' in device:
+                     del device['name']
+             numas = new_flavor_dict.get('extended', {}).get('numas')
+             if numas:
+                 numa = numas[0]
+                 # translate memory, cpus to EPA
+                 if "cores" not in numa and "threads" not in numa and "paired-threads" not in numa:
+                     numa["paired-threads"] = new_flavor_dict["vcpus"]
+                 if "memory" not in numa:
+                     numa["memory"] = int(math.ceil(new_flavor_dict["ram"] / 1024.0))
+                 for iface in numa.get("interfaces", ()):
+                     if not iface.get("bandwidth"):
+                         iface["bandwidth"] = "1 Mbps"
+             new_flavor_dict["name"] = flavor_data["name"][:64]
+             self._get_my_tenant()
+             payload_req = json.dumps({'flavor': new_flavor_dict})
+             url = self.url+'/'+self.tenant+'/flavors'
+             self.logger.info("Adding a new VIM flavor POST %s", url)
+             vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             js_v(response, new_flavor_response_schema)
+             r = self._remove_extra_items(response, new_flavor_response_schema)
+             if r is not None: 
+                 self.logger.warn("Warning: remove extra items %s", str(r))
+             flavor_id = response['flavor']['id']
+             return flavor_id
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     def delete_flavor(self,flavor_id):
+         '''Deletes a tenant flavor from VIM'''
+         '''Returns the old flavor_id'''
+         try:
+             self._get_my_tenant()
+             url = self.url+'/'+self.tenant+'/flavors/'+flavor_id
+             self.logger.info("Deleting VIM flavor DELETE %s", url)
+             vim_response = requests.delete(url, headers=self.headers_req)
+             self._check_http_request_response(vim_response)
+             #self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             return flavor_id
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     def get_image(self, image_id):
+         '''Obtain image details from the  VIM'''
+         try:
+             self._get_my_tenant()
+             url = self.url+'/'+self.tenant+'/images/'+image_id
+             self.logger.info("Getting image GET %s", url)
+             vim_response = requests.get(url, headers = self.headers_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             js_v(response, get_image_response_schema)
+             r = self._remove_extra_items(response, get_image_response_schema)
+             if r is not None: 
+                 self.logger.warn("Warning: remove extra items %s", str(r))
+             return response['image']
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     def new_image(self,image_dict):
+         ''' Adds a tenant image to VIM, returns image_id'''
+         try:
+             self._get_my_tenant()
+             new_image_dict={'name': image_dict['name'][:64]}
+             if image_dict.get('description'):
+                 new_image_dict['description'] = image_dict['description']
+             if image_dict.get('metadata'):
+                 new_image_dict['metadata'] = yaml.load(image_dict['metadata'], Loader=yaml.SafeLoader)
+             if image_dict.get('location'):
+                 new_image_dict['path'] = image_dict['location']
+             payload_req = json.dumps({"image":new_image_dict})
+             url=self.url + '/' + self.tenant + '/images'
+             self.logger.info("Adding a new VIM image POST %s", url)
+             vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             js_v(response, new_image_response_schema)
+             r = self._remove_extra_items(response, new_image_response_schema)
+             if r is not None: 
+                 self.logger.warn("Warning: remove extra items %s", str(r))
+             image_id = response['image']['id']
+             return image_id
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+             
+     def delete_image(self, image_id):
+         '''Deletes a tenant image from VIM'''
+         '''Returns the deleted image_id'''
+         try:
+             self._get_my_tenant()
+             url = self.url + '/'+ self.tenant +'/images/'+image_id
+             self.logger.info("Deleting VIM image DELETE %s", url)
+             vim_response = requests.delete(url, headers=self.headers_req)
+             self._check_http_request_response(vim_response)
+             #self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             return image_id
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     def get_image_id_from_path(self, path):
+         '''Get the image id from image path in the VIM database. Returns the image_id'''
+         try:
+             self._get_my_tenant()
+             url=self.url + '/' + self.tenant + '/images?path='+quote(path)
+             self.logger.info("Getting images GET %s", url)
+             vim_response = requests.get(url)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             js_v(response, get_images_response_schema)
+             #r = self._remove_extra_items(response, get_images_response_schema)
+             #if r is not None: 
+             #    self.logger.warn("Warning: remove extra items %s", str(r))
+             if len(response['images'])==0:
+                 raise vimconn.vimconnNotFoundException("Image not found at VIM with path '{}'".format(path))
+             elif len(response['images'])>1:
+                 raise vimconn.vimconnConflictException("More than one image found at VIM with path '{}'".format(path))
+             return response['images'][0]['id']
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     def get_image_list(self, filter_dict={}):
+         '''Obtain tenant images from VIM
+         Filter_dict can be:
+             name: image name
+             id: image uuid
+             checksum: image checksum
+             location: image path
+         Returns the image list of dictionaries:
+             [{<the fields at Filter_dict plus some VIM specific>}, ...]
+             List can be empty
+         '''
+         try:
+             self._get_my_tenant()
+             filterquery=[]
+             filterquery_text=''
+             for k,v in filter_dict.items():
+                 filterquery.append(str(k)+'='+str(v))
+             if len(filterquery)>0:
+                 filterquery_text='?'+ '&'.join(filterquery)
+             url = self.url+'/'+self.tenant+'/images'+filterquery_text
+             self.logger.info("Getting image list GET %s", url)
+             vim_response = requests.get(url, headers = self.headers_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             return response['images']
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     def new_vminstancefromJSON(self, vm_data):
+         '''Adds a VM instance to VIM'''
+         '''Returns the instance identifier'''
+         try:
+             self._get_my_tenant()
+         except Exception as e:
+             return -vimconn.HTTP_Not_Found, str(e)
+         print("VIMConnector: Adding a new VM instance from JSON to VIM")
+         payload_req = vm_data
+         try:
+             vim_response = requests.post(self.url+'/'+self.tenant+'/servers', headers = self.headers_req, data=payload_req)
+         except requests.exceptions.RequestException as e:
+             print( "new_vminstancefromJSON Exception: ", e.args)
+             return -vimconn.HTTP_Not_Found, str(e.args[0])
+         # print vim_response
+         #print vim_response.status_code
+         if vim_response.status_code == 200:
+             #print vim_response.json()
+             #print json.dumps(vim_response.json(), indent=4)
+             res,http_content = self._format_in(vim_response, new_image_response_schema)
+             #print http_content
+             if res:
+                 r = self._remove_extra_items(http_content, new_image_response_schema)
+                 if r is not None: print("Warning: remove extra items ", r)
+                 #print http_content
+                 vminstance_id = http_content['server']['id']
+                 print( "Tenant image id: ",vminstance_id)
+                 return vim_response.status_code,vminstance_id
+             else: return -vimconn.HTTP_Bad_Request,http_content
+         else:
+             #print vim_response.text
+             jsonerror = self._format_jsonerror(vim_response)
+             text = 'Error in VIM "{}": not possible to add new vm instance. HTTP Response: {}. Error: {}'.format(
+                 self.url, vim_response.status_code, jsonerror)
+             #print text
+             return -vim_response.status_code,text
+     def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+                        availability_zone_index=None, availability_zone_list=None):
+         """Adds a VM instance to VIM
+         Params:
+             start: indicates if VM must start or boot in pause mode. Ignored
+             image_id,flavor_id: image and flavor uuid
+             net_list: list of interfaces, each one is a dictionary with:
+                 name:
+                 net_id: network uuid to connect
+                 vpci: virtual vcpi to assign
+                 model: interface model, virtio, e1000, ...
+                 mac_address: 
+                 use: 'data', 'bridge',  'mgmt'
+                 type: 'virtual', 'PCI-PASSTHROUGH'('PF'), 'SR-IOV'('VF'), 'VFnotShared'
+                 vim_id: filled/added by this function
+                 #TODO ip, security groups
+         Returns a tuple with the instance identifier and created_items or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
+         self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'", image_id, flavor_id, str(net_list))
+         try:
+             self._get_my_tenant()
+ #            net_list = []
+ #            for k,v in net_dict.items():
+ #                print k,v
+ #                net_list.append('{"name":"' + k + '", "uuid":"' + v + '"}')
+ #            net_list_string = ', '.join(net_list) 
+             virtio_net_list=[]
+             for net in net_list:
+                 if not net.get("net_id"):
+                     continue
+                 net_dict = {'uuid': net["net_id"]}
+                 if net.get("type"):
+                     if net["type"] == "SR-IOV":
+                         net_dict["type"] = "VF"
+                     elif net["type"] == "PCI-PASSTHROUGH":
+                         net_dict["type"] = "PF"
+                     else:
+                         net_dict["type"] = net["type"]
+                 if net.get("name"):
+                     net_dict["name"] = net["name"]
+                 if net.get("vpci"):
+                     net_dict["vpci"] = net["vpci"]
+                 if net.get("model"):
+                     if net["model"] == "VIRTIO" or net["model"] == "paravirt":
+                         net_dict["model"] = "virtio"
+                     else:
+                         net_dict["model"] = net["model"]
+                 if net.get("mac_address"):
+                     net_dict["mac_address"] = net["mac_address"]
+                 if net.get("ip_address"):
+                     net_dict["ip_address"] = net["ip_address"]
+                 virtio_net_list.append(net_dict)
+             payload_dict={  "name":        name[:64],
+                             "description": description,
+                             "imageRef":    image_id,
+                             "flavorRef":   flavor_id,
+                             "networks": virtio_net_list
+                         }
+             if start != None:
+                 payload_dict["start"] = start
+             payload_req = json.dumps({"server": payload_dict})
+             url = self.url+'/'+self.tenant+'/servers'
+             self.logger.info("Adding a new vm POST %s DATA %s", url, payload_req)
+             vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             js_v(response, new_vminstance_response_schema)
+             #r = self._remove_extra_items(response, new_vminstance_response_schema)
+             #if r is not None: 
+             #    self.logger.warn("Warning: remove extra items %s", str(r))
+             vminstance_id = response['server']['id']
+             #connect data plane interfaces to network
+             for net in net_list:
+                 if net["type"]=="virtual":
+                     if not net.get("net_id"):
+                         continue
+                     for iface in response['server']['networks']:
+                         if "name" in net:
+                             if net["name"]==iface["name"]:
+                                 net["vim_id"] = iface['iface_id']
+                                 break
+                         elif "net_id" in net:
+                             if net["net_id"]==iface["net_id"]:
+                                 net["vim_id"] = iface['iface_id']
+                                 break
+                 else: #dataplane
+                     for numa in response['server'].get('extended',{}).get('numas',() ):
+                         for iface in numa.get('interfaces',() ):
+                             if net['name'] == iface['name']:
+                                 net['vim_id'] = iface['iface_id']
+                                 #Code bellow is not needed, current openvim connect dataplane interfaces 
+                                 #if net.get("net_id"):
+                                 ##connect dataplane interface
+                                 #    result, port_id = self.connect_port_network(iface['iface_id'], net["net_id"])
+                                 #    if result < 0:
+                                 #        error_text = "Error attaching port %s to network %s: %s." % (iface['iface_id'], net["net_id"], port_id)
+                                 #        print "new_vminstance: " + error_text
+                                 #        self.delete_vminstance(vminstance_id)
+                                 #        return result, error_text
+                                 break
+         
+             return vminstance_id, None
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+         
+     def get_vminstance(self, vm_id):
+         '''Returns the VM instance information from VIM'''
+         try:
+             self._get_my_tenant()
+             url = self.url+'/'+self.tenant+'/servers/'+vm_id
+             self.logger.info("Getting vm GET %s", url)
+             vim_response = requests.get(url, headers = self.headers_req)
+             vim_response = requests.get(url, headers = self.headers_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             js_v(response, new_vminstance_response_schema)
+             #r = self._remove_extra_items(response, new_vminstance_response_schema)
+             #if r is not None: 
+             #    self.logger.warn("Warning: remove extra items %s", str(r))
+             return response['server']
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+         
+     def delete_vminstance(self, vm_id, created_items=None):
+         '''Removes a VM instance from VIM, returns the deleted vm_id'''
+         try:
+             self._get_my_tenant()
+             url = self.url+'/'+self.tenant+'/servers/'+vm_id
+             self.logger.info("Deleting VIM vm DELETE %s", url)
+             vim_response = requests.delete(url, headers=self.headers_req)
+             self._check_http_request_response(vim_response)
+             #self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             return vm_id
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     def refresh_vms_status(self, vm_list):
+         '''Refreshes the status of the virtual machines'''
+         try:
+             self._get_my_tenant()
+         except requests.exceptions.RequestException as e:
+             self._format_request_exception(e)
+         vm_dict={}
+         for vm_id in vm_list:
+             vm={}
+             #print "VIMConnector refresh_tenant_vms and nets: Getting tenant VM instance information from VIM"
+             try:
+                 url = self.url + '/' + self.tenant + '/servers/' + vm_id
+                 self.logger.info("Getting vm GET %s", url)
+                 vim_response = requests.get(url, headers = self.headers_req)
+                 self._check_http_request_response(vim_response)
+                 response = vim_response.json()
+                 js_v(response, new_vminstance_response_schema)
+                 if response['server']['status'] in vmStatus2manoFormat:
+                     vm['status'] = vmStatus2manoFormat[ response['server']['status']  ]
+                 else:
+                     vm['status'] = "OTHER"
+                     vm['error_msg'] = "VIM status reported " + response['server']['status']
+                 if response['server'].get('last_error'):
+                     vm['error_msg'] = response['server']['last_error']
+                 vm["vim_info"] = yaml.safe_dump(response['server'])
+                 #get interfaces info
+                 try:
+                     management_ip = False
+                     url2 = self.url + '/ports?device_id=' + quote(vm_id)
+                     self.logger.info("Getting PORTS GET %s", url2)
+                     vim_response2 = requests.get(url2, headers = self.headers_req)
+                     self._check_http_request_response(vim_response2)
+                     client_data = vim_response2.json()
+                     if isinstance(client_data.get("ports"), list):
+                         vm["interfaces"]=[]
+                     for port in client_data.get("ports"):
+                         interface={}
+                         interface['vim_info'] = yaml.safe_dump(port)
+                         interface["mac_address"] = port.get("mac_address")
+                         interface["vim_net_id"] = port.get("network_id")
+                         interface["vim_interface_id"] = port["id"]
+                         interface["ip_address"] = port.get("ip_address")
+                         if interface["ip_address"]:
+                             management_ip = True
+                         if interface["ip_address"] == "0.0.0.0":
+                             interface["ip_address"] = None
+                         vm["interfaces"].append(interface)
+                         
+                 except Exception as e:
+                     self.logger.error("refresh_vms_and_nets. Port get %s: %s", type(e).__name__, str(e))
+                 if vm['status'] == "ACTIVE" and not management_ip:
+                     vm['status'] = "ACTIVE:NoMgmtIP"
+                     
+             except vimconn.vimconnNotFoundException as e:
+                 self.logger.error("Exception getting vm status: %s", str(e))
+                 vm['status'] = "DELETED"
+                 vm['error_msg'] = str(e)
+             except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
+                 self.logger.error("Exception getting vm status: %s", str(e))
+                 vm['status'] = "VIM_ERROR"
+                 vm['error_msg'] = str(e)
+             vm_dict[vm_id] = vm
+         return vm_dict
+     def refresh_nets_status(self, net_list):
+         '''Get the status of the networks
+            Params: the list of network identifiers
+            Returns a dictionary with:
+                 net_id:         #VIM id of this network
+                     status:     #Mandatory. Text with one of:
+                                 #  DELETED (not found at vim)
+                                 #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                 #  OTHER (Vim reported other status not understood)
+                                 #  ERROR (VIM indicates an ERROR status)
+                                 #  ACTIVE, INACTIVE, DOWN (admin down), 
+                                 #  BUILD (on building process)
+                                 #
+                     error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+         '''
+         try:
+             self._get_my_tenant()
+         except requests.exceptions.RequestException as e:
+             self._format_request_exception(e)
+         
+         net_dict={}
+         for net_id in net_list:
+             net = {}
+             #print "VIMConnector refresh_tenant_vms_and_nets: Getting tenant network from VIM (tenant: " + str(self.tenant) + "): "
+             try:
+                 net_vim = self.get_network(net_id)
+                 if net_vim['status'] in netStatus2manoFormat:
+                     net["status"] = netStatus2manoFormat[ net_vim['status'] ]
+                 else:
+                     net["status"] = "OTHER"
+                     net["error_msg"] = "VIM status reported " + net_vim['status']
+                     
+                 if net["status"] == "ACTIVE" and not net_vim['admin_state_up']:
+                     net["status"] = "DOWN"
+                 if net_vim.get('last_error'):
+                     net['error_msg'] = net_vim['last_error']
+                 net["vim_info"] = yaml.safe_dump(net_vim)
+             except vimconn.vimconnNotFoundException as e:
+                 self.logger.error("Exception getting net status: %s", str(e))
+                 net['status'] = "DELETED"
+                 net['error_msg'] = str(e)
+             except (requests.exceptions.RequestException, js_e.ValidationError, vimconn.vimconnException) as e:
+                 self.logger.error("Exception getting net status: %s", str(e))
+                 net['status'] = "VIM_ERROR"
+                 net['error_msg'] = str(e)
+             net_dict[net_id] = net
+         return net_dict
+     
+     def action_vminstance(self, vm_id, action_dict, created_items={}):
+         '''Send and action over a VM instance from VIM'''
+         '''Returns the status'''
+         try:
+             self._get_my_tenant()
+             if "console" in action_dict:
+                 raise vimconn.vimconnException("getting console is not available at openvim", http_code=vimconn.HTTP_Service_Unavailable)
+             url = self.url+'/'+self.tenant+'/servers/'+vm_id+"/action"
+             self.logger.info("Action over VM instance POST %s", url)
+             vim_response = requests.post(url, headers = self.headers_req, data=json.dumps(action_dict) )
+             self._check_http_request_response(vim_response)
+             return None
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+ #NOT USED METHODS in current version        
+   
+     def host_vim2gui(self, host, server_dict):
+         '''Transform host dictionary from VIM format to GUI format,
+         and append to the server_dict
+         '''
+         if type(server_dict) is not dict: 
+             print( 'vimconnector.host_vim2gui() ERROR, param server_dict must be a dictionary')
+             return
+         RAD={}
+         occupation={}
+         for numa in host['host']['numas']:
+             RAD_item={}
+             occupation_item={}
+             #memory
+             RAD_item['memory']={'size': str(numa['memory'])+'GB', 'eligible': str(numa['hugepages'])+'GB'}
+             occupation_item['memory']= str(numa['hugepages_consumed'])+'GB'
+             #cpus
+             RAD_item['cpus']={}
+             RAD_item['cpus']['cores'] = []
+             RAD_item['cpus']['eligible_cores'] = []
+             occupation_item['cores']=[]
+             for _ in range(0, len(numa['cores']) // 2):
+                 RAD_item['cpus']['cores'].append( [] )
+             for core in numa['cores']:
+                 RAD_item['cpus']['cores'][core['core_id']].append(core['thread_id'])
+                 if not 'status' in core: RAD_item['cpus']['eligible_cores'].append(core['thread_id'])
+                 if 'instance_id' in core: occupation_item['cores'].append(core['thread_id'])
+             #ports
+             RAD_item['ports']={}
+             occupation_item['ports']={}
+             for iface in numa['interfaces']:
+                 RAD_item['ports'][ iface['pci'] ] = 'speed:'+str(iface['Mbps'])+'M'
+                 occupation_item['ports'][ iface['pci'] ] = { 'occupied': str(100*iface['Mbps_consumed'] // iface['Mbps']) + "%" }
+                 
+             RAD[ numa['numa_socket'] ] = RAD_item
+             occupation[ numa['numa_socket'] ] = occupation_item
+         server_dict[ host['host']['name'] ] = {'RAD':RAD, 'occupation':occupation}
+     def get_hosts_info(self):
+         '''Get the information of deployed hosts
+         Returns the hosts content'''
+     #obtain hosts list
+         url=self.url+'/hosts'
+         try:
+             vim_response = requests.get(url)
+         except requests.exceptions.RequestException as e:
+             print( "get_hosts_info Exception: ", e.args)
+             return -vimconn.HTTP_Not_Found, str(e.args[0])
+         print("vim get", url, "response:",  vim_response.status_code, vim_response.json())
+         #print vim_response.status_code
+         #print json.dumps(vim_response.json(), indent=4)
+         if vim_response.status_code != 200:
+             # TODO: get error
+             print('vimconnector.get_hosts_info error getting host list {} {}'.format(vim_response.status_code, vim_response.json()))
+             return -vim_response.status_code, "Error getting host list"
+         
+         res,hosts = self._format_in(vim_response, get_hosts_response_schema)
+             
+         if res==False:
+             print("vimconnector.get_hosts_info error parsing GET HOSTS vim response", hosts)
+             return vimconn.HTTP_Internal_Server_Error, hosts
+     #obtain hosts details
+         hosts_dict={}
+         for host in hosts['hosts']:
+             url=self.url+'/hosts/'+host['id']
+             try:
+                 vim_response = requests.get(url)
+             except requests.exceptions.RequestException as e:
+                 print( "get_hosts_info Exception: ", e.args)
+                 return -vimconn.HTTP_Not_Found, str(e.args[0])
+             print("vim get", url, "response:",  vim_response.status_code, vim_response.json())
+             if vim_response.status_code != 200:
+                 print('vimconnector.get_hosts_info error getting detailed host {} {}'.format(vim_response.status_code, vim_response.json()))
+                 continue
+             res,host_detail = self._format_in(vim_response, get_host_detail_response_schema)
+             if res==False:
+                 print ("vimconnector.get_hosts_info error parsing GET HOSTS/{} vim response {}".format(host['id']), host_detail)
+                 continue
+             #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+             self.host_vim2gui(host_detail, hosts_dict)
+         return 200, hosts_dict
+     def get_hosts(self, vim_tenant):
+         '''Get the hosts and deployed instances
+         Returns the hosts content'''
+     #obtain hosts list
+         url=self.url+'/hosts'
+         try:
+             vim_response = requests.get(url)
+         except requests.exceptions.RequestException as e:
+             print("get_hosts Exception: ", e.args)
+             return -vimconn.HTTP_Not_Found, str(e.args[0])
+         print("vim get", url, "response:",  vim_response.status_code, vim_response.json())
+         #print vim_response.status_code
+         #print json.dumps(vim_response.json(), indent=4)
+         if vim_response.status_code != 200:
+             #TODO: get error
+             print('vimconnector.get_hosts error getting host list {} {}'.format(vim_response.status_code, vim_response.json()))
+             return -vim_response.status_code, "Error getting host list"
+         
+         res,hosts = self._format_in(vim_response, get_hosts_response_schema)
+             
+         if res==False:
+             print("vimconnector.get_host error parsing GET HOSTS vim response", hosts)
+             return vimconn.HTTP_Internal_Server_Error, hosts
+     #obtain instances from hosts
+         for host in hosts['hosts']:
+             url=self.url+'/' + vim_tenant + '/servers?hostId='+host['id']
+             try:
+                 vim_response = requests.get(url)
+             except requests.exceptions.RequestException as e:
+                 print("get_hosts Exception: ", e.args)
+                 return -vimconn.HTTP_Not_Found, str(e.args[0])
+             print("vim get", url, "response:",  vim_response.status_code, vim_response.json())
+             if vim_response.status_code != 200:
+                 print('vimconnector.get_hosts error getting instances at host {} {}'.format(vim_response.status_code, vim_response.json()))
+                 continue
+             res,servers = self._format_in(vim_response, get_server_response_schema)
+             if res==False:
+                 print("vimconnector.get_host error parsing GET SERVERS/{} vim response {}".format(host['id']), servers)
+                 continue
+             #print 'host id '+host['id'], json.dumps(host_detail, indent=4)
+             host['instances'] = servers['servers']
+         return 200, hosts['hosts']
+     def get_processor_rankings(self):
+         '''Get the processor rankings in the VIM database'''
+         url=self.url+'/processor_ranking'
+         try:
+             vim_response = requests.get(url)
+         except requests.exceptions.RequestException as e:
+             print("get_processor_rankings Exception: ", e.args)
+             return -vimconn.HTTP_Not_Found, str(e.args[0])
+         print("vim get", url, "response:", vim_response.status_code, vim_response.json())
+         #print vim_response.status_code
+         #print json.dumps(vim_response.json(), indent=4)
+         if vim_response.status_code != 200:
+             #TODO: get error
+             print('vimconnector.get_processor_rankings error getting processor rankings {} {}'.format(vim_response.status_code, vim_response.json()))
+             return -vim_response.status_code, "Error getting processor rankings"
+         
+         res,rankings = self._format_in(vim_response, get_processor_rankings_response_schema)
+         return res, rankings['rankings']
+     
+     def new_host(self, host_data):
+         '''Adds a new host to VIM'''
+         '''Returns status code of the VIM response'''
+         payload_req = host_data
+         try:
+             url = self.url_admin+'/hosts'
+             self.logger.info("Adding a new host POST %s", url)
+             vim_response = requests.post(url, headers = self.headers_req, data=payload_req)
+             self._check_http_request_response(vim_response)
+             self.logger.debug(vim_response.text)
+             #print json.dumps(vim_response.json(), indent=4)
+             response = vim_response.json()
+             js_v(response, new_host_response_schema)
+             r = self._remove_extra_items(response, new_host_response_schema)
+             if r is not None: 
+                 self.logger.warn("Warning: remove extra items %s", str(r))
+             host_id = response['host']['id']
+             return host_id
+         except (requests.exceptions.RequestException, js_e.ValidationError) as e:
+             self._format_request_exception(e)
+     
+     def new_external_port(self, port_data):
+         '''Adds a external port to VIM'''
+         '''Returns the port identifier'''
+         #TODO change to logging exception code policies
+         print( "VIMConnector: Adding a new external port")
+         payload_req = port_data
+         try:
+             vim_response = requests.post(self.url_admin+'/ports', headers = self.headers_req, data=payload_req)
+         except requests.exceptions.RequestException as e:
+             self.logger.error("new_external_port Exception: ", str(e))
+             return -vimconn.HTTP_Not_Found, str(e.args[0])
+         print( vim_response)
+         #print vim_response.status_code
+         if vim_response.status_code == 200:
+         #print vim_response.json()
+         #print json.dumps(vim_response.json(), indent=4)
+             res, http_content = self._format_in(vim_response, new_port_response_schema)
+         #print http_content
+             if res:
+                 r = self._remove_extra_items(http_content, new_port_response_schema)
+                 if r is not None: print("Warning: remove extra items ", r)
+                 #print http_content
+                 port_id = http_content['port']['id']
+                 print("Port id: ",port_id)
+                 return vim_response.status_code,port_id
+             else: return -vimconn.HTTP_Bad_Request,http_content
+         else:
+             #print vim_response.text
+             jsonerror = self._format_jsonerror(vim_response)
+             text = 'Error in VIM "{}": not possible to add new external port. HTTP Response: {}. Error: {}'.format(
+                 self.url_admin, vim_response.status_code, jsonerror)
+             #print text
+             return -vim_response.status_code,text
+         
+     def new_external_network(self,net_name,net_type):
+         '''Adds a external network to VIM (shared)'''
+         '''Returns the network identifier'''
+         #TODO change to logging exception code policies
+         print("VIMConnector: Adding external shared network to VIM (type " + net_type + "): "+ net_name)
+         
+         payload_req = '{"network":{"name": "' + net_name + '","shared":true,"type": "' + net_type + '"}}'
+         try:
+             vim_response = requests.post(self.url+'/networks', headers = self.headers_req, data=payload_req)
+         except requests.exceptions.RequestException as e:
+             self.logger.error( "new_external_network Exception: ", e.args)
+             return -vimconn.HTTP_Not_Found, str(e.args[0])
+         print(vim_response)
+         #print vim_response.status_code
+         if vim_response.status_code == 200:
+             #print vim_response.json()
+             #print json.dumps(vim_response.json(), indent=4)
+             res,http_content = self._format_in(vim_response, new_network_response_schema)
+             #print http_content
+             if res:
+                 r = self._remove_extra_items(http_content, new_network_response_schema)
+                 if r is not None: print("Warning: remove extra items ", r)
+                 #print http_content
+                 network_id = http_content['network']['id']
+                 print( "Network id: ",network_id)
+                 return vim_response.status_code,network_id
+             else: return -vimconn.HTTP_Bad_Request,http_content
+         else:
+             #print vim_response.text
+             jsonerror = self._format_jsonerror(vim_response)
+             text = 'Error in VIM "{}": not possible to add new external network. HTTP Response: {}. Error: {}'.format(
+                 self.url, vim_response.status_code, jsonerror)
+             #print text
+             return -vim_response.status_code,text
+         
+     def connect_port_network(self, port_id, network_id, admin=False):
+         '''Connects a external port to a network'''
+         '''Returns status code of the VIM response'''
+         #TODO change to logging exception code policies
+         print("VIMConnector: Connecting external port to network")
+         
+         payload_req = '{"port":{"network_id":"' + network_id + '"}}'
+         if admin:
+             if self.url_admin==None:
+                 return -vimconn.HTTP_Unauthorized, "datacenter cannot contain  admin URL"
+             url= self.url_admin
+         else:
+             url= self.url
+         try:
+             vim_response = requests.put(url +'/ports/'+port_id, headers = self.headers_req, data=payload_req)
+         except requests.exceptions.RequestException as e:
+             print("connect_port_network Exception: ", e.args)
+             return -vimconn.HTTP_Not_Found, str(e.args[0])
+         print(vim_response)
+         #print vim_response.status_code
+         if vim_response.status_code == 200:
+             #print vim_response.json()
+             #print json.dumps(vim_response.json(), indent=4)
+             res,http_content = self._format_in(vim_response, new_port_response_schema)
+             #print http_content
+             if res:
+                 r = self._remove_extra_items(http_content, new_port_response_schema)
+                 if r is not None: print("Warning: remove extra items ", r)
+                 #print http_content
+                 port_id = http_content['port']['id']
+                 print("Port id: ",port_id)
+                 return vim_response.status_code,port_id
+             else: return -vimconn.HTTP_Bad_Request,http_content
+         else:
+             print(vim_response.text)
+             jsonerror = self._format_jsonerror(vim_response)
+             text = 'Error in VIM "{}": not possible to connect external port to network. HTTP Response: {}.' \
+                    ' Error: {}'.format(self.url_admin, vim_response.status_code, jsonerror)
+             print(text)
+             return -vim_response.status_code,text
+         
index 0000000,7366faf..34a2518
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,19 +1,19 @@@
 -git+https://osm.etsi.org/gerrit/osm/RO.git@py3#egg=osm-ro&subdirectory=RO
+ ##
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+ #
+ #    http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ # implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ ##
+ PyYAML
+ requests
+ netaddr
++git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
index 0000000,db121e8..171f7d4
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,6587 +1,6706 @@@
 -# Copyright 2016-2017 VMware Inc.
+ # -*- coding: utf-8 -*-
+ ##
 -    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
++# Copyright 2016-2019 VMware Inc.
+ # This file is part of ETSI OSM
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact:  osslegalrouting@vmware.com
+ ##
+ """
+ vimconn_vmware implementation an Abstract class in order to interact with VMware  vCloud Director.
+ mbayramov@vmware.com
+ """
+ from progressbar import Percentage, Bar, ETA, FileTransferSpeed, ProgressBar
+ from osm_ro import vimconn
+ import os
+ import shutil
+ import subprocess
+ import tempfile
+ import traceback
+ import itertools
+ import requests
+ import ssl
+ import atexit
+ from pyVmomi import vim, vmodl
+ from pyVim.connect import SmartConnect, Disconnect
+ from xml.etree import ElementTree as XmlElementTree
+ from lxml import etree as lxmlElementTree
+ import yaml
+ from pyvcloud.vcd.client import BasicLoginCredentials,Client,VcdTaskException
+ from pyvcloud.vcd.vdc import VDC
+ from pyvcloud.vcd.org import Org
+ import re
+ from pyvcloud.vcd.vapp import VApp
+ from xml.sax.saxutils import escape
+ import logging
+ import json
+ import time
+ import uuid
+ # import httplib
+ #For python3
+ #import http.client  # TODO py3 check
+ import hashlib
+ import socket
+ import struct
+ import netaddr
+ import random
+ # global variable for vcd connector type
+ STANDALONE = 'standalone'
+ # key for flavor dicts
+ FLAVOR_RAM_KEY = 'ram'
+ FLAVOR_VCPUS_KEY = 'vcpus'
+ FLAVOR_DISK_KEY = 'disk'
+ DEFAULT_IP_PROFILE = {'dhcp_count':50,
+                       'dhcp_enabled':True,
+                       'ip_version':"IPv4"
+                       }
+ # global variable for wait time
+ INTERVAL_TIME = 5
+ MAX_WAIT_TIME = 1800
+ API_VERSION = '27.0'
+ __author__ = "Mustafa Bayramov, Arpita Kate, Sachin Bhangare, Prakash Kasar"
+ __date__ = "$09-Mar-2018 11:09:29$"
+ __version__ = '0.2'
+ #     -1: "Could not be created",
+ #     0: "Unresolved",
+ #     1: "Resolved",
+ #     2: "Deployed",
+ #     3: "Suspended",
+ #     4: "Powered on",
+ #     5: "Waiting for user input",
+ #     6: "Unknown state",
+ #     7: "Unrecognized state",
+ #     8: "Powered off",
+ #     9: "Inconsistent state",
+ #     10: "Children do not all have the same status",
+ #     11: "Upload initiated, OVF descriptor pending",
+ #     12: "Upload initiated, copying contents",
+ #     13: "Upload initiated , disk contents pending",
+ #     14: "Upload has been quarantined",
+ #     15: "Upload quarantine period has expired"
+ # mapping vCD status to MANO
+ vcdStatusCode2manoFormat = {4: 'ACTIVE',
+                             7: 'PAUSED',
+                             3: 'SUSPENDED',
+                             8: 'INACTIVE',
+                             12: 'BUILD',
+                             -1: 'ERROR',
+                             14: 'DELETED'}
+ #
+ netStatus2manoFormat = {'ACTIVE': 'ACTIVE', 'PAUSED': 'PAUSED', 'INACTIVE': 'INACTIVE', 'BUILD': 'BUILD',
+                         'ERROR': 'ERROR', 'DELETED': 'DELETED'
+                         }
+ class vimconnector(vimconn.vimconnector):
+     # dict used to store flavor in memory
+     flavorlist = {}
+     def __init__(self, uuid=None, name=None, tenant_id=None, tenant_name=None,
+                  url=None, url_admin=None, user=None, passwd=None, log_level=None, config={}, persistent_info={}):
+         """
+         Constructor create vmware connector to vCloud director.
+         By default construct doesn't validate connection state. So client can create object with None arguments.
+         If client specified username , password and host and VDC name.  Connector initialize other missing attributes.
+         a) It initialize organization UUID
+         b) Initialize tenant_id/vdc ID.   (This information derived from tenant name)
+         Args:
+             uuid - is organization uuid.
+             name - is organization name that must be presented in vCloud director.
+             tenant_id - is VDC uuid it must be presented in vCloud director
+             tenant_name - is VDC name.
+             url - is hostname or ip address of vCloud director
+             url_admin - same as above.
+             user - is user that administrator for organization. Caller must make sure that
+                     username has right privileges.
+             password - is password for a user.
+             VMware connector also requires PVDC administrative privileges and separate account.
+             This variables must be passed via config argument dict contains keys
+             dict['admin_username']
+             dict['admin_password']
+             config - Provide NSX and vCenter information
+             Returns:
+                 Nothing.
+         """
+         vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url,
+                                       url_admin, user, passwd, log_level, config)
+         self.logger = logging.getLogger('openmano.vim.vmware')
+         self.logger.setLevel(10)
+         self.persistent_info = persistent_info
+         self.name = name
+         self.id = uuid
+         self.url = url
+         self.url_admin = url_admin
+         self.tenant_id = tenant_id
+         self.tenant_name = tenant_name
+         self.user = user
+         self.passwd = passwd
+         self.config = config
+         self.admin_password = None
+         self.admin_user = None
+         self.org_name = ""
+         self.nsx_manager = None
+         self.nsx_user = None
+         self.nsx_password = None
+         self.availability_zone = None
+         # Disable warnings from self-signed certificates.
+         requests.packages.urllib3.disable_warnings()
+         if tenant_name is not None:
+             orgnameandtenant = tenant_name.split(":")
+             if len(orgnameandtenant) == 2:
+                 self.tenant_name = orgnameandtenant[1]
+                 self.org_name = orgnameandtenant[0]
+             else:
+                 self.tenant_name = tenant_name
+         if "orgname" in config:
+             self.org_name = config['orgname']
+         if log_level:
+             self.logger.setLevel(getattr(logging, log_level))
+         try:
+             self.admin_user = config['admin_username']
+             self.admin_password = config['admin_password']
+         except KeyError:
+             raise vimconn.vimconnException(message="Error admin username or admin password is empty.")
+         try:
+             self.nsx_manager = config['nsx_manager']
+             self.nsx_user = config['nsx_user']
+             self.nsx_password = config['nsx_password']
+         except KeyError:
+             raise vimconn.vimconnException(message="Error: nsx manager or nsx user or nsx password is empty in Config")
+         self.vcenter_ip = config.get("vcenter_ip", None)
+         self.vcenter_port = config.get("vcenter_port", None)
+         self.vcenter_user = config.get("vcenter_user", None)
+         self.vcenter_password = config.get("vcenter_password", None)
+         #Set availability zone for Affinity rules
+         self.availability_zone = self.set_availability_zones()
+ # ############# Stub code for SRIOV #################
+ #         try:
+ #             self.dvs_name = config['dv_switch_name']
+ #         except KeyError:
+ #             raise vimconn.vimconnException(message="Error: distributed virtaul switch name is empty in Config")
+ #
+ #         self.vlanID_range = config.get("vlanID_range", None)
+         self.org_uuid = None
+         self.client = None
+         if not url:
+             raise vimconn.vimconnException('url param can not be NoneType')
+         if not self.url_admin:  # try to use normal url
+             self.url_admin = self.url
+         logging.debug("UUID: {} name: {} tenant_id: {} tenant name {}".format(self.id, self.org_name,
+                                                                               self.tenant_id, self.tenant_name))
+         logging.debug("vcd url {} vcd username: {} vcd password: {}".format(self.url, self.user, self.passwd))
+         logging.debug("vcd admin username {} vcd admin passowrd {}".format(self.admin_user, self.admin_password))
+         # initialize organization
+         if self.user is not None and self.passwd is not None and self.url:
+             self.init_organization()
+     def __getitem__(self, index):
+         if index == 'name':
+             return self.name
+         if index == 'tenant_id':
+             return self.tenant_id
+         if index == 'tenant_name':
+             return self.tenant_name
+         elif index == 'id':
+             return self.id
+         elif index == 'org_name':
+             return self.org_name
+         elif index == 'org_uuid':
+             return self.org_uuid
+         elif index == 'user':
+             return self.user
+         elif index == 'passwd':
+             return self.passwd
+         elif index == 'url':
+             return self.url
+         elif index == 'url_admin':
+             return self.url_admin
+         elif index == "config":
+             return self.config
+         else:
+             raise KeyError("Invalid key '{}'".format(index))
+     def __setitem__(self, index, value):
+         if index == 'name':
+             self.name = value
+         if index == 'tenant_id':
+             self.tenant_id = value
+         if index == 'tenant_name':
+             self.tenant_name = value
+         elif index == 'id':
+             self.id = value
+         elif index == 'org_name':
+             self.org_name = value
+         elif index == 'org_uuid':
+             self.org_uuid = value
+         elif index == 'user':
+             self.user = value
+         elif index == 'passwd':
+             self.passwd = value
+         elif index == 'url':
+             self.url = value
+         elif index == 'url_admin':
+             self.url_admin = value
+         else:
+             raise KeyError("Invalid key '{}'".format(index))
+     def connect_as_admin(self):
+         """ Method connect as pvdc admin user to vCloud director.
+             There are certain action that can be done only by provider vdc admin user.
+             Organization creation / provider network creation etc.
+             Returns:
+                 The return client object that latter can be used to connect to vcloud director as admin for provider vdc
+         """
+         self.logger.debug("Logging into vCD {} as admin.".format(self.org_name))
+         try:
+             host = self.url
+             org = 'System'
+             client_as_admin = Client(host, verify_ssl_certs=False)
+             client_as_admin.set_highest_supported_version()
+             client_as_admin.set_credentials(BasicLoginCredentials(self.admin_user, org, self.admin_password))
+         except Exception as e:
+             raise vimconn.vimconnException(
+                   "Can't connect to a vCloud director as: {} with exception {}".format(self.admin_user, e))
+         return client_as_admin
+     def connect(self):
+         """ Method connect as normal user to vCloud director.
+             Returns:
+                 The return client object that latter can be used to connect to vCloud director as admin for VDC
+         """
+         try:
+             self.logger.debug("Logging into vCD {} as {} to datacenter {}.".format(self.org_name,
+                                                                                       self.user,
+                                                                                       self.org_name))
+             host = self.url
+             client = Client(host, verify_ssl_certs=False)
+             client.set_highest_supported_version()
+             client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
+         except:
+             raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
+                                                      "{} as user: {}".format(self.org_name, self.user))
+         return client
+     def init_organization(self):
+         """ Method initialize organization UUID and VDC parameters.
+             At bare minimum client must provide organization name that present in vCloud director and VDC.
+             The VDC - UUID ( tenant_id) will be initialized at the run time if client didn't call constructor.
+             The Org - UUID will be initialized at the run time if data center present in vCloud director.
+             Returns:
+                 The return vca object that letter can be used to connect to vcloud direct as admin
+         """
+         client = self.connect()
+         if not client:
+             raise vimconn.vimconnConnectionException("Failed to connect vCD.")
+         self.client = client
+         try:
+             if self.org_uuid is None:
+                 org_list = client.get_org_list()
+                 for org in org_list.Org:
+                     # we set org UUID at the init phase but we can do it only when we have valid credential.
+                     if org.get('name') == self.org_name:
+                         self.org_uuid = org.get('href').split('/')[-1]
+                         self.logger.debug("Setting organization UUID {}".format(self.org_uuid))
+                         break
+                 else:
+                     raise vimconn.vimconnException("Vcloud director organization {} not found".format(self.org_name))
+                 # if well good we require for org details
+                 org_details_dict = self.get_org(org_uuid=self.org_uuid)
+                 # we have two case if we want to initialize VDC ID or VDC name at run time
+                 # tenant_name provided but no tenant id
+                 if self.tenant_id is None and self.tenant_name is not None and 'vdcs' in org_details_dict:
+                     vdcs_dict = org_details_dict['vdcs']
+                     for vdc in vdcs_dict:
+                         if vdcs_dict[vdc] == self.tenant_name:
+                             self.tenant_id = vdc
+                             self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
+                                                                                                     self.org_name))
+                             break
+                     else:
+                         raise vimconn.vimconnException("Tenant name indicated but not present in vcloud director.")
+                     # case two we have tenant_id but we don't have tenant name so we find and set it.
+                     if self.tenant_id is not None and self.tenant_name is None and 'vdcs' in org_details_dict:
+                         vdcs_dict = org_details_dict['vdcs']
+                         for vdc in vdcs_dict:
+                             if vdc == self.tenant_id:
+                                 self.tenant_name = vdcs_dict[vdc]
+                                 self.logger.debug("Setting vdc uuid {} for organization UUID {}".format(self.tenant_id,
+                                                                                                         self.org_name))
+                                 break
+                         else:
+                             raise vimconn.vimconnException("Tenant id indicated but not present in vcloud director")
+             self.logger.debug("Setting organization uuid {}".format(self.org_uuid))
+         except:
+             self.logger.debug("Failed initialize organization UUID for org {}".format(self.org_name))
+             self.logger.debug(traceback.format_exc())
+             self.org_uuid = None
+     def new_tenant(self, tenant_name=None, tenant_description=None):
+         """ Method adds a new tenant to VIM with this name.
+             This action requires access to create VDC action in vCloud director.
+             Args:
+                 tenant_name is tenant_name to be created.
+                 tenant_description not used for this call
+             Return:
+                 returns the tenant identifier in UUID format.
+                 If action is failed method will throw vimconn.vimconnException method
+             """
+         vdc_task = self.create_vdc(vdc_name=tenant_name)
+         if vdc_task is not None:
+             vdc_uuid, value = vdc_task.popitem()
+             self.logger.info("Created new vdc {} and uuid: {}".format(tenant_name, vdc_uuid))
+             return vdc_uuid
+         else:
+             raise vimconn.vimconnException("Failed create tenant {}".format(tenant_name))
+     def delete_tenant(self, tenant_id=None):
+         """ Delete a tenant from VIM
+              Args:
+                 tenant_id is tenant_id to be deleted.
+             Return:
+                 returns the tenant identifier in UUID format.
+                 If action is failed method will throw exception
+         """
+         vca = self.connect_as_admin()
+         if not vca:
+             raise vimconn.vimconnConnectionException("Failed to connect vCD")
+         if tenant_id is not None:
+             if vca._session:
+                 #Get OrgVDC
+                 url_list = [self.url, '/api/vdc/', tenant_id]
+                 orgvdc_herf = ''.join(url_list)
+                 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                 response = self.perform_request(req_type='GET',
+                                                 url=orgvdc_herf,
+                                                 headers=headers)
+                 if response.status_code != requests.codes.ok:
+                     self.logger.debug("delete_tenant():GET REST API call {} failed. "\
+                                       "Return status code {}".format(orgvdc_herf,
+                                                                      response.status_code))
+                     raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
+                 lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+                 namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
+                 namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+                 vdc_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
+                 vdc_remove_href = vdc_remove_href + '?recursive=true&force=true'
+                 response = self.perform_request(req_type='DELETE',
+                                                 url=vdc_remove_href,
+                                                 headers=headers)
+                 if response.status_code == 202:
+                     time.sleep(5)
+                     return tenant_id
+                 else:
+                     self.logger.debug("delete_tenant(): DELETE REST API call {} failed. "\
+                                       "Return status code {}".format(vdc_remove_href,
+                                                                      response.status_code))
+                     raise vimconn.vimconnException("Fail to delete tenant with ID {}".format(tenant_id))
+         else:
+             self.logger.debug("delete_tenant():Incorrect tenant ID  {}".format(tenant_id))
+             raise vimconn.vimconnNotFoundException("Fail to get tenant {}".format(tenant_id))
+     def get_tenant_list(self, filter_dict={}):
+         """Obtain tenants of VIM
+         filter_dict can contain the following keys:
+             name: filter by tenant name
+             id: filter by tenant uuid/id
+             <other VIM specific>
+         Returns the tenant list of dictionaries:
+             [{'name':'<name>, 'id':'<id>, ...}, ...]
+         """
+         org_dict = self.get_org(self.org_uuid)
+         vdcs_dict = org_dict['vdcs']
+         vdclist = []
+         try:
+             for k in vdcs_dict:
+                 entry = {'name': vdcs_dict[k], 'id': k}
+                 # if caller didn't specify dictionary we return all tenants.
+                 if filter_dict is not None and filter_dict:
+                     filtered_entry = entry.copy()
+                     filtered_dict = set(entry.keys()) - set(filter_dict)
+                     for unwanted_key in filtered_dict: del entry[unwanted_key]
+                     if filter_dict == entry:
+                         vdclist.append(filtered_entry)
+                 else:
+                     vdclist.append(entry)
+         except:
+             self.logger.debug("Error in get_tenant_list()")
+             self.logger.debug(traceback.format_exc())
+             raise vimconn.vimconnException("Incorrect state. {}")
+         return vdclist
 -            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
++    def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+         """Adds a tenant network to VIM
+         Params:
+             'net_name': name of the network
+             'net_type': one of:
+                 'bridge': overlay isolated network
+                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+             'ip_profile': is a dict containing the IP parameters of the network
+                 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                 'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                 'dhcp_enabled': True or False
+                 'dhcp_start_address': ip_schema, first IP to grant
+                 'dhcp_count': number of IPs to grant.
+             'shared': if this network can be seen/use by other tenants/organization
 -        self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {}"
 -                          .format(net_name, net_type, ip_profile, shared))
++            'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
+         Returns a tuple with the network identifier and created_items, or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
 -                                           ip_profile=ip_profile, isshared=isshared)
++        self.logger.debug("new_network tenant {} net_type {} ip_profile {} shared {} provider_network_profile {}"
++                          .format(net_name, net_type, ip_profile, shared, provider_network_profile))
++        vlan = None
++        if provider_network_profile:
++            vlan = provider_network_profile.get("segmentation-id")
+         created_items = {}
+         isshared = 'false'
+         if shared:
+             isshared = 'true'
+ # ############# Stub code for SRIOV #################
+ #         if net_type == "data" or net_type == "ptp":
+ #             if self.config.get('dv_switch_name') == None:
+ #                  raise vimconn.vimconnConflictException("You must provide 'dv_switch_name' at config value")
+ #             network_uuid = self.create_dvPort_group(net_name)
++        parent_network_uuid = None
++
++        import traceback
++        traceback.print_stack()
++
++        if provider_network_profile is not None:
++            for k, v in provider_network_profile.items():
++                if k == 'physical_network':
++                    parent_network_uuid = self.get_physical_network_by_name(v)
+         network_uuid = self.create_network(network_name=net_name, net_type=net_type,
 -
++                                           ip_profile=ip_profile, isshared=isshared,
++                                           parent_network_uuid=parent_network_uuid)
+         if network_uuid is not None:
+             return network_uuid, created_items
+         else:
+             raise vimconn.vimconnUnexpectedResponse("Failed create a new network {}".format(net_name))
+     def get_vcd_network_list(self):
+         """ Method available organization for a logged in tenant
+             Returns:
+                 The return vca object that letter can be used to connect to vcloud direct as admin
+         """
+         self.logger.debug("get_vcd_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
+         if not self.tenant_name:
+             raise vimconn.vimconnConnectionException("Tenant name is empty.")
+         org, vdc = self.get_vdc_details()
+         if vdc is None:
+             raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}".format(self.tenant_name))
+         vdc_uuid = vdc.get('id').split(":")[3]
+         if self.client._session:
+                 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                 response = self.perform_request(req_type='GET',
+                                            url=vdc.get('href'),
+                                                headers=headers)
+         if response.status_code != 200:
+             self.logger.error("Failed to get vdc content")
+             raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+         else:
+             content = XmlElementTree.fromstring(response.content)
+         network_list = []
+         try:
+             for item in content:
+                 if item.tag.split('}')[-1] == 'AvailableNetworks':
+                     for net in item:
+                         response = self.perform_request(req_type='GET',
+                                                    url=net.get('href'),
+                                                        headers=headers)
+                         if response.status_code != 200:
+                             self.logger.error("Failed to get network content")
+                             raise vimconn.vimconnNotFoundException("Failed to get network content")
+                         else:
+                             net_details = XmlElementTree.fromstring(response.content)
+                             filter_dict = {}
+                             net_uuid = net_details.get('id').split(":")
+                             if len(net_uuid) != 4:
+                                 continue
+                             else:
+                                 net_uuid = net_uuid[3]
+                                 # create dict entry
+                                 self.logger.debug("get_vcd_network_list(): Adding network {} "
+                                                   "to a list vcd id {} network {}".format(net_uuid,
+                                                                                           vdc_uuid,
+                                                                                           net_details.get('name')))
+                                 filter_dict["name"] = net_details.get('name')
+                                 filter_dict["id"] = net_uuid
+                                 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                     shared = True
+                                 else:
+                                     shared = False
+                                 filter_dict["shared"] = shared
+                                 filter_dict["tenant_id"] = vdc_uuid
+                                 if int(net_details.get('status')) == 1:
+                                     filter_dict["admin_state_up"] = True
+                                 else:
+                                     filter_dict["admin_state_up"] = False
+                                 filter_dict["status"] = "ACTIVE"
+                                 filter_dict["type"] = "bridge"
+                                 network_list.append(filter_dict)
+                                 self.logger.debug("get_vcd_network_list adding entry {}".format(filter_dict))
+         except:
+             self.logger.debug("Error in get_vcd_network_list", exc_info=True)
+             pass
+         self.logger.debug("get_vcd_network_list returning {}".format(network_list))
+         return network_list
+     def get_network_list(self, filter_dict={}):
+         """Obtain tenant networks of VIM
+         Filter_dict can be:
+             name: network name  OR/AND
+             id: network uuid    OR/AND
+             shared: boolean     OR/AND
+             tenant_id: tenant   OR/AND
+             admin_state_up: boolean
+             status: 'ACTIVE'
+         [{key : value , key : value}]
+         Returns the network list of dictionaries:
+             [{<the fields at Filter_dict plus some VIM specific>}, ...]
+             List can be empty
+         """
+         self.logger.debug("get_network_list(): retrieving network list for vcd {}".format(self.tenant_name))
+         if not self.tenant_name:
+             raise vimconn.vimconnConnectionException("Tenant name is empty.")
+         org, vdc = self.get_vdc_details()
+         if vdc is None:
+             raise vimconn.vimconnConnectionException("Can't retrieve information for a VDC {}.".format(self.tenant_name))
+         try:
+             vdcid = vdc.get('id').split(":")[3]
+             if self.client._session:
+                 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                 response = self.perform_request(req_type='GET',
+                                            url=vdc.get('href'),
+                                                headers=headers)
+             if response.status_code != 200:
+                 self.logger.error("Failed to get vdc content")
+                 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+             else:
+                 content = XmlElementTree.fromstring(response.content)
+             network_list = []
+             for item in content:
+                 if item.tag.split('}')[-1] == 'AvailableNetworks':
+                     for net in item:
+                         response = self.perform_request(req_type='GET',
+                                                    url=net.get('href'),
+                                                        headers=headers)
+                         if response.status_code != 200:
+                             self.logger.error("Failed to get network content")
+                             raise vimconn.vimconnNotFoundException("Failed to get network content")
+                         else:
+                             net_details = XmlElementTree.fromstring(response.content)
+                             filter_entry = {}
+                             net_uuid = net_details.get('id').split(":")
+                             if len(net_uuid) != 4:
+                                 continue
+                             else:
+                                 net_uuid = net_uuid[3]
+                                 # create dict entry
+                                 self.logger.debug("get_network_list(): Adding net {}"
+                                                   " to a list vcd id {} network {}".format(net_uuid,
+                                                                                            vdcid,
+                                                                                            net_details.get('name')))
+                                 filter_entry["name"] = net_details.get('name')
+                                 filter_entry["id"] = net_uuid
+                                 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                     shared = True
+                                 else:
+                                     shared = False
+                                 filter_entry["shared"] = shared
+                                 filter_entry["tenant_id"] = vdcid
+                                 if int(net_details.get('status')) == 1:
+                                     filter_entry["admin_state_up"] = True
+                                 else:
+                                     filter_entry["admin_state_up"] = False
+                                 filter_entry["status"] = "ACTIVE"
+                                 filter_entry["type"] = "bridge"
+                                 filtered_entry = filter_entry.copy()
+                                 if filter_dict is not None and filter_dict:
+                                     # we remove all the key : value we don't care and match only
+                                     # respected field
+                                     filtered_dict = set(filter_entry.keys()) - set(filter_dict)
+                                     for unwanted_key in filtered_dict: del filter_entry[unwanted_key]
+                                     if filter_dict == filter_entry:
+                                         network_list.append(filtered_entry)
+                                 else:
+                                     network_list.append(filtered_entry)
+         except Exception as e:
+             self.logger.debug("Error in get_network_list",exc_info=True)
+             if isinstance(e, vimconn.vimconnException):
+                 raise
+             else:
+                 raise vimconn.vimconnNotFoundException("Failed : Networks list not found {} ".format(e))
+         self.logger.debug("Returning {}".format(network_list))
+         return network_list
+     def get_network(self, net_id):
+         """Method obtains network details of net_id VIM network
+            Return a dict with  the fields at filter_dict (see get_network_list) plus some VIM specific>}, ...]"""
+         try:
+             org, vdc = self.get_vdc_details()
+             vdc_id = vdc.get('id').split(":")[3]
+             if self.client._session:
+                 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                 response = self.perform_request(req_type='GET',
+                                            url=vdc.get('href'),
+                                                headers=headers)
+             if response.status_code != 200:
+                 self.logger.error("Failed to get vdc content")
+                 raise vimconn.vimconnNotFoundException("Failed to get vdc content")
+             else:
+                 content = XmlElementTree.fromstring(response.content)
+             filter_dict = {}
+             for item in content:
+                 if item.tag.split('}')[-1] == 'AvailableNetworks':
+                     for net in item:
+                         response = self.perform_request(req_type='GET',
+                                                    url=net.get('href'),
+                                                        headers=headers)
+                         if response.status_code != 200:
+                             self.logger.error("Failed to get network content")
+                             raise vimconn.vimconnNotFoundException("Failed to get network content")
+                         else:
+                             net_details = XmlElementTree.fromstring(response.content)
+                             vdc_network_id = net_details.get('id').split(":")
+                             if len(vdc_network_id) == 4 and vdc_network_id[3] == net_id:
+                                 filter_dict["name"] = net_details.get('name')
+                                 filter_dict["id"] = vdc_network_id[3]
+                                 if [i.text for i in net_details if i.tag.split('}')[-1] == 'IsShared'][0] == 'true':
+                                     shared = True
+                                 else:
+                                     shared = False
+                                 filter_dict["shared"] = shared
+                                 filter_dict["tenant_id"] = vdc_id
+                                 if int(net_details.get('status')) == 1:
+                                     filter_dict["admin_state_up"] = True
+                                 else:
+                                     filter_dict["admin_state_up"] = False
+                                 filter_dict["status"] = "ACTIVE"
+                                 filter_dict["type"] = "bridge"
+                                 self.logger.debug("Returning {}".format(filter_dict))
+                                 return filter_dict
+                     else:
+                         raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
+         except Exception as e:
+             self.logger.debug("Error in get_network")
+             self.logger.debug(traceback.format_exc())
+             if isinstance(e, vimconn.vimconnException):
+                 raise
+             else:
+                 raise vimconn.vimconnNotFoundException("Failed : Network not found {} ".format(e))
+         return filter_dict
+     def delete_network(self, net_id, created_items=None):
+         """
+         Removes a tenant network from VIM and its associated elements
+         :param net_id: VIM identifier of the network, provided by method new_network
+         :param created_items: dictionary with extra items to be deleted. provided by method new_network
+         Returns the network identifier or raises an exception upon error or when network is not found
+         """
+         # ############# Stub code for SRIOV #################
+ #         dvport_group = self.get_dvport_group(net_id)
+ #         if dvport_group:
+ #             #delete portgroup
+ #             status = self.destroy_dvport_group(net_id)
+ #             if status:
+ #                 # Remove vlanID from persistent info
+ #                 if net_id in self.persistent_info["used_vlanIDs"]:
+ #                     del self.persistent_info["used_vlanIDs"][net_id]
+ #
+ #                 return net_id
+         vcd_network = self.get_vcd_network(network_uuid=net_id)
+         if vcd_network is not None and vcd_network:
+             if self.delete_network_action(network_uuid=net_id):
+                 return net_id
+         else:
+             raise vimconn.vimconnNotFoundException("Network {} not found".format(net_id))
+     def refresh_nets_status(self, net_list):
+         """Get the status of the networks
+            Params: the list of network identifiers
+            Returns a dictionary with:
+                 net_id:         #VIM id of this network
+                     status:     #Mandatory. Text with one of:
+                                 #  DELETED (not found at vim)
+                                 #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                 #  OTHER (Vim reported other status not understood)
+                                 #  ERROR (VIM indicates an ERROR status)
+                                 #  ACTIVE, INACTIVE, DOWN (admin down),
+                                 #  BUILD (on building process)
+                                 #
+                     error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+         """
+         dict_entry = {}
+         try:
+             for net in net_list:
+                 errormsg = ''
+                 vcd_network = self.get_vcd_network(network_uuid=net)
+                 if vcd_network is not None and vcd_network:
+                     if vcd_network['status'] == '1':
+                         status = 'ACTIVE'
+                     else:
+                         status = 'DOWN'
+                 else:
+                     status = 'DELETED'
+                     errormsg = 'Network not found.'
+                 dict_entry[net] = {'status': status, 'error_msg': errormsg,
+                                    'vim_info': yaml.safe_dump(vcd_network)}
+         except:
+             self.logger.debug("Error in refresh_nets_status")
+             self.logger.debug(traceback.format_exc())
+         return dict_entry
+     def get_flavor(self, flavor_id):
+         """Obtain flavor details from the  VIM
+             Returns the flavor dict details {'id':<>, 'name':<>, other vim specific } #TODO to concrete
+         """
+         if flavor_id not in vimconnector.flavorlist:
+             raise vimconn.vimconnNotFoundException("Flavor not found.")
+         return vimconnector.flavorlist[flavor_id]
+     def new_flavor(self, flavor_data):
+         """Adds a tenant flavor to VIM
+             flavor_data contains a dictionary with information, keys:
+                 name: flavor name
+                 ram: memory (cloud type) in MBytes
+                 vpcus: cpus (cloud type)
+                 extended: EPA parameters
+                   - numas: #items requested in same NUMA
+                         memory: number of 1G huge pages memory
+                         paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+                         interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+                           - name: interface name
+                             dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
+                             bandwidth: X Gbps; requested guarantee bandwidth
+                             vpci: requested virtual PCI address
+                 disk: disk size
+                 is_public:
+                  #TODO to concrete
+         Returns the flavor identifier"""
+         # generate a new uuid put to internal dict and return it.
+         self.logger.debug("Creating new flavor - flavor_data: {}".format(flavor_data))
+         new_flavor=flavor_data
+         ram = flavor_data.get(FLAVOR_RAM_KEY, 1024)
+         cpu = flavor_data.get(FLAVOR_VCPUS_KEY, 1)
+         disk = flavor_data.get(FLAVOR_DISK_KEY, 0)
+         if not isinstance(ram, int):
+             raise vimconn.vimconnException("Non-integer value for ram")
+         elif not isinstance(cpu, int):
+             raise vimconn.vimconnException("Non-integer value for cpu")
+         elif not isinstance(disk, int):
+             raise vimconn.vimconnException("Non-integer value for disk")
+         extended_flv = flavor_data.get("extended")
+         if extended_flv:
+             numas=extended_flv.get("numas")
+             if numas:
+                 for numa in numas:
+                     #overwrite ram and vcpus
+                     if 'memory' in numa:
+                         ram = numa['memory']*1024
+                     if 'paired-threads' in numa:
+                         cpu = numa['paired-threads']*2
+                     elif 'cores' in numa:
+                         cpu = numa['cores']
+                     elif 'threads' in numa:
+                         cpu = numa['threads']
+         new_flavor[FLAVOR_RAM_KEY] = ram
+         new_flavor[FLAVOR_VCPUS_KEY] = cpu
+         new_flavor[FLAVOR_DISK_KEY] = disk
+         # generate a new uuid put to internal dict and return it.
+         flavor_id = uuid.uuid4()
+         vimconnector.flavorlist[str(flavor_id)] = new_flavor
+         self.logger.debug("Created flavor - {} : {}".format(flavor_id, new_flavor))
+         return str(flavor_id)
+     def delete_flavor(self, flavor_id):
+         """Deletes a tenant flavor from VIM identify by its id
+            Returns the used id or raise an exception
+         """
+         if flavor_id not in vimconnector.flavorlist:
+             raise vimconn.vimconnNotFoundException("Flavor not found.")
+         vimconnector.flavorlist.pop(flavor_id, None)
+         return flavor_id
+     def new_image(self, image_dict):
+         """
+         Adds a tenant image to VIM
+         Returns:
+             200, image-id        if the image is created
+             <0, message          if there is an error
+         """
+         return self.get_image_id_from_path(image_dict['location'])
+     def delete_image(self, image_id):
+         """
+             Deletes a tenant image from VIM
+             Args:
+                 image_id is ID of Image to be deleted
+             Return:
+                 returns the image identifier in UUID format or raises an exception on error
+         """
+         conn = self.connect_as_admin()
+         if not conn:
+             raise vimconn.vimconnConnectionException("Failed to connect vCD")
+         # Get Catalog details
+         url_list = [self.url, '/api/catalog/', image_id]
+         catalog_herf = ''.join(url_list)
+         headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                   'x-vcloud-authorization': conn._session.headers['x-vcloud-authorization']}
+         response = self.perform_request(req_type='GET',
+                                         url=catalog_herf,
+                                         headers=headers)
+         if response.status_code != requests.codes.ok:
+             self.logger.debug("delete_image():GET REST API call {} failed. "\
+                               "Return status code {}".format(catalog_herf,
+                                                              response.status_code))
+             raise vimconn.vimconnNotFoundException("Fail to get image {}".format(image_id))
+         lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+         namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
+         namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+         catalogItems_section = lxmlroot_respond.find("xmlns:CatalogItems",namespaces)
+         catalogItems = catalogItems_section.iterfind("xmlns:CatalogItem",namespaces)
+         for catalogItem in catalogItems:
+             catalogItem_href = catalogItem.attrib['href']
+             response = self.perform_request(req_type='GET',
+                                         url=catalogItem_href,
+                                         headers=headers)
+             if response.status_code != requests.codes.ok:
+                 self.logger.debug("delete_image():GET REST API call {} failed. "\
+                                   "Return status code {}".format(catalog_herf,
+                                                                  response.status_code))
+                 raise vimconn.vimconnNotFoundException("Fail to get catalogItem {} for catalog {}".format(
+                                                                                     catalogItem,
+                                                                                     image_id))
+             lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+             namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
+             namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+             catalogitem_remove_href = lxmlroot_respond.find("xmlns:Link[@rel='remove']",namespaces).attrib['href']
+             #Remove catalogItem
+             response = self.perform_request(req_type='DELETE',
+                                         url=catalogitem_remove_href,
+                                         headers=headers)
+             if response.status_code == requests.codes.no_content:
+                 self.logger.debug("Deleted Catalog item {}".format(catalogItem))
+             else:
+                 raise vimconn.vimconnException("Fail to delete Catalog Item {}".format(catalogItem))
+         #Remove catalog
+         url_list = [self.url, '/api/admin/catalog/', image_id]
+         catalog_remove_herf = ''.join(url_list)
+         response = self.perform_request(req_type='DELETE',
+                                         url=catalog_remove_herf,
+                                         headers=headers)
+         if response.status_code == requests.codes.no_content:
+             self.logger.debug("Deleted Catalog {}".format(image_id))
+             return image_id
+         else:
+             raise vimconn.vimconnException("Fail to delete Catalog {}".format(image_id))
+     def catalog_exists(self, catalog_name, catalogs):
+         """
+         :param catalog_name:
+         :param catalogs:
+         :return:
+         """
+         for catalog in catalogs:
+             if catalog['name'] == catalog_name:
+                 return catalog['id']
+     def create_vimcatalog(self, vca=None, catalog_name=None):
+         """ Create new catalog entry in vCloud director.
+             Args
+                 vca:  vCloud director.
+                 catalog_name catalog that client wish to create.   Note no validation done for a name.
+                 Client must make sure that provide valid string representation.
+              Returns catalog id if catalog created else None.
+         """
+         try:
+             lxml_catalog_element = vca.create_catalog(catalog_name, catalog_name)
+             if lxml_catalog_element:
+                 id_attr_value = lxml_catalog_element.get('id')  # 'urn:vcloud:catalog:7490d561-d384-4dac-8229-3575fd1fc7b4'
+                 return id_attr_value.split(':')[-1]
+             catalogs = vca.list_catalogs()
+         except Exception as ex:
+             self.logger.error(
+                 'create_vimcatalog(): Creation of catalog "{}" failed with error: {}'.format(catalog_name, ex))
+             raise
+         return self.catalog_exists(catalog_name, catalogs)
+     # noinspection PyIncorrectDocstring
+     def upload_ovf(self, vca=None, catalog_name=None, image_name=None, media_file_name=None,
+                    description='', progress=False, chunk_bytes=128 * 1024):
+         """
+         Uploads a OVF file to a vCloud catalog
+         :param chunk_bytes:
+         :param progress:
+         :param description:
+         :param image_name:
+         :param vca:
+         :param catalog_name: (str): The name of the catalog to upload the media.
+         :param media_file_name: (str): The name of the local media file to upload.
+         :return: (bool) True if the media file was successfully uploaded, false otherwise.
+         """
+         os.path.isfile(media_file_name)
+         statinfo = os.stat(media_file_name)
+         #  find a catalog entry where we upload OVF.
+         #  create vApp Template and check the status if vCD able to read OVF it will respond with appropirate
+         #  status change.
+         #  if VCD can parse OVF we upload VMDK file
+         try:
+             for catalog in vca.list_catalogs():
+                 if catalog_name != catalog['name']:
+                     continue
+                 catalog_href = "{}/api/catalog/{}/action/upload".format(self.url, catalog['id'])
+                 data = """
+                 <UploadVAppTemplateParams name="{}" xmlns="http://www.vmware.com/vcloud/v1.5" xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"><Description>{} vApp Template</Description></UploadVAppTemplateParams>
+                 """.format(catalog_name, description)
+                 if self.client:
+                     headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                     headers['Content-Type'] = 'application/vnd.vmware.vcloud.uploadVAppTemplateParams+xml'
+                 response = self.perform_request(req_type='POST',
+                                                 url=catalog_href,
+                                                 headers=headers,
+                                                 data=data)
+                 if response.status_code == requests.codes.created:
+                     catalogItem = XmlElementTree.fromstring(response.content)
+                     entity = [child for child in catalogItem if
+                               child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                     href = entity.get('href')
+                     template = href
+                     response = self.perform_request(req_type='GET',
+                                                     url=href,
+                                                     headers=headers)
+                     if response.status_code == requests.codes.ok:
+                         headers['Content-Type'] = 'Content-Type text/xml'
+                         result = re.search('rel="upload:default"\shref="(.*?\/descriptor.ovf)"',response.content)
+                         if result:
+                             transfer_href = result.group(1)
+                         response = self.perform_request(req_type='PUT',
+                                                     url=transfer_href,
+                                                     headers=headers,
+                                                     data=open(media_file_name, 'rb'))
+                         if response.status_code != requests.codes.ok:
+                             self.logger.debug(
+                                 "Failed create vApp template for catalog name {} and image {}".format(catalog_name,
+                                                                                                       media_file_name))
+                             return False
+                     # TODO fix this with aync block
+                     time.sleep(5)
+                     self.logger.debug("vApp template for catalog name {} and image {}".format(catalog_name, media_file_name))
+                     # uploading VMDK file
+                     # check status of OVF upload and upload remaining files.
+                     response = self.perform_request(req_type='GET',
+                                                     url=template,
+                                                     headers=headers)
+                     if response.status_code == requests.codes.ok:
+                         result = re.search('rel="upload:default"\s*href="(.*?vmdk)"',response.content)
+                         if result:
+                             link_href = result.group(1)
+                         # we skip ovf since it already uploaded.
+                         if 'ovf' in link_href:
+                             continue
+                         # The OVF file and VMDK must be in a same directory
+                         head, tail = os.path.split(media_file_name)
+                         file_vmdk = head + '/' + link_href.split("/")[-1]
+                         if not os.path.isfile(file_vmdk):
+                             return False
+                         statinfo = os.stat(file_vmdk)
+                         if statinfo.st_size == 0:
+                             return False
+                         hrefvmdk = link_href
+                         if progress:
+                             widgets = ['Uploading file: ', Percentage(), ' ', Bar(), ' ', ETA(), ' ',
+                                            FileTransferSpeed()]
+                             progress_bar = ProgressBar(widgets=widgets, maxval=statinfo.st_size).start()
+                         bytes_transferred = 0
+                         f = open(file_vmdk, 'rb')
+                         while bytes_transferred < statinfo.st_size:
+                             my_bytes = f.read(chunk_bytes)
+                             if len(my_bytes) <= chunk_bytes:
+                                 headers['Content-Range'] = 'bytes {}-{}/{}'.format(
+                                     bytes_transferred, len(my_bytes) - 1, statinfo.st_size)
+                                 headers['Content-Length'] = str(len(my_bytes))
+                                 response = requests.put(url=hrefvmdk,
+                                                          headers=headers,
+                                                          data=my_bytes,
+                                                          verify=False)
+                                 if response.status_code == requests.codes.ok:
+                                     bytes_transferred += len(my_bytes)
+                                     if progress:
+                                         progress_bar.update(bytes_transferred)
+                                 else:
+                                     self.logger.debug(
+                                         'file upload failed with error: [{}] {}'.format(response.status_code,
+                                                                                         response.content))
+                                     f.close()
+                                     return False
+                         f.close()
+                         if progress:
+                             progress_bar.finish()
+                             time.sleep(10)
+                     return True
+                 else:
+                     self.logger.debug("Failed retrieve vApp template for catalog name {} for OVF {}".
+                                       format(catalog_name, media_file_name))
+                     return False
+         except Exception as exp:
+             self.logger.debug("Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                 .format(catalog_name,media_file_name, exp))
+             raise vimconn.vimconnException(
+                 "Failed while uploading OVF to catalog {} for OVF file {} with Exception {}"
+                 .format(catalog_name,media_file_name, exp))
+         self.logger.debug("Failed retrieve catalog name {} for OVF file {}".format(catalog_name, media_file_name))
+         return False
+     def upload_vimimage(self, vca=None, catalog_name=None, media_name=None, medial_file_name=None, progress=False):
+         """Upload media file"""
+         # TODO add named parameters for readability
+         return self.upload_ovf(vca=vca, catalog_name=catalog_name, image_name=media_name.split(".")[0],
+                                media_file_name=medial_file_name, description='medial_file_name', progress=progress)
+     def validate_uuid4(self, uuid_string=None):
+         """  Method validate correct format of UUID.
+         Return: true if string represent valid uuid
+         """
+         try:
+             val = uuid.UUID(uuid_string, version=4)
+         except ValueError:
+             return False
+         return True
+     def get_catalogid(self, catalog_name=None, catalogs=None):
+         """  Method check catalog and return catalog ID in UUID format.
+         Args
+             catalog_name: catalog name as string
+             catalogs:  list of catalogs.
+         Return: catalogs uuid
+         """
+         for catalog in catalogs:
+             if catalog['name'] == catalog_name:
+                 catalog_id = catalog['id']
+                 return catalog_id
+         return None
+     def get_catalogbyid(self, catalog_uuid=None, catalogs=None):
+         """  Method check catalog and return catalog name lookup done by catalog UUID.
+         Args
+             catalog_name: catalog name as string
+             catalogs:  list of catalogs.
+         Return: catalogs name or None
+         """
+         if not self.validate_uuid4(uuid_string=catalog_uuid):
+             return None
+         for catalog in catalogs:
+             catalog_id = catalog.get('id')
+             if catalog_id == catalog_uuid:
+                 return catalog.get('name')
+         return None
+     def get_catalog_obj(self, catalog_uuid=None, catalogs=None):
+         """  Method check catalog and return catalog name lookup done by catalog UUID.
+         Args
+             catalog_name: catalog name as string
+             catalogs:  list of catalogs.
+         Return: catalogs name or None
+         """
+         if not self.validate_uuid4(uuid_string=catalog_uuid):
+             return None
+         for catalog in catalogs:
+             catalog_id = catalog.get('id')
+             if catalog_id == catalog_uuid:
+                 return catalog
+         return None
+     def get_image_id_from_path(self, path=None, progress=False):
+         """  Method upload OVF image to vCloud director.
+         Each OVF image represented as single catalog entry in vcloud director.
+         The method check for existing catalog entry.  The check done by file name without file extension.
+         if given catalog name already present method will respond with existing catalog uuid otherwise
+         it will create new catalog entry and upload OVF file to newly created catalog.
+         If method can't create catalog entry or upload a file it will throw exception.
+         Method accept boolean flag progress that will output progress bar. It useful method
+         for standalone upload use case. In case to test large file upload.
+         Args
+             path: - valid path to OVF file.
+             progress - boolean progress bar show progress bar.
+         Return: if image uploaded correct method will provide image catalog UUID.
+         """
+         if not path:
+             raise vimconn.vimconnException("Image path can't be None.")
+         if not os.path.isfile(path):
+             raise vimconn.vimconnException("Can't read file. File not found.")
+         if not os.access(path, os.R_OK):
+             raise vimconn.vimconnException("Can't read file. Check file permission to read.")
+         self.logger.debug("get_image_id_from_path() client requesting {} ".format(path))
+         dirpath, filename = os.path.split(path)
+         flname, file_extension = os.path.splitext(path)
+         if file_extension != '.ovf':
+             self.logger.debug("Wrong file extension {} connector support only OVF container.".format(file_extension))
+             raise vimconn.vimconnException("Wrong container.  vCloud director supports only OVF.")
+         catalog_name = os.path.splitext(filename)[0]
+         catalog_md5_name = hashlib.md5(path).hexdigest()
+         self.logger.debug("File name {} Catalog Name {} file path {} "
+                           "vdc catalog name {}".format(filename, catalog_name, path, catalog_md5_name))
+         try:
+             org,vdc = self.get_vdc_details()
+             catalogs = org.list_catalogs()
+         except Exception as exp:
+             self.logger.debug("Failed get catalogs() with Exception {} ".format(exp))
+             raise vimconn.vimconnException("Failed get catalogs() with Exception {} ".format(exp))
+         if len(catalogs) == 0:
+             self.logger.info("Creating a new catalog entry {} in vcloud director".format(catalog_name))
+             if self.create_vimcatalog(org, catalog_md5_name) is None:
+                 raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
+             result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
+                                           media_name=filename, medial_file_name=path, progress=progress)
+             if not result:
+                 raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_name))
+             return self.get_catalogid(catalog_name, catalogs)
+         else:
+             for catalog in catalogs:
+                 # search for existing catalog if we find same name we return ID
+                 # TODO optimize this
+                 if catalog['name'] == catalog_md5_name:
+                     self.logger.debug("Found existing catalog entry for {} "
+                                       "catalog id {}".format(catalog_name,
+                                                              self.get_catalogid(catalog_md5_name, catalogs)))
+                     return self.get_catalogid(catalog_md5_name, catalogs)
+         # if we didn't find existing catalog we create a new one and upload image.
+         self.logger.debug("Creating new catalog entry {} - {}".format(catalog_name, catalog_md5_name))
+         if self.create_vimcatalog(org, catalog_md5_name) is None:
+             raise vimconn.vimconnException("Failed create new catalog {} ".format(catalog_md5_name))
+         result = self.upload_vimimage(vca=org, catalog_name=catalog_md5_name,
+                                       media_name=filename, medial_file_name=path, progress=progress)
+         if not result:
+             raise vimconn.vimconnException("Failed create vApp template for catalog {} ".format(catalog_md5_name))
+         return self.get_catalogid(catalog_md5_name, org.list_catalogs())
+     def get_image_list(self, filter_dict={}):
+         '''Obtain tenant images from VIM
+         Filter_dict can be:
+             name: image name
+             id: image uuid
+             checksum: image checksum
+             location: image path
+         Returns the image list of dictionaries:
+             [{<the fields at Filter_dict plus some VIM specific>}, ...]
+             List can be empty
+         '''
+         try:
+             org, vdc = self.get_vdc_details()
+             image_list = []
+             catalogs = org.list_catalogs()
+             if len(catalogs) == 0:
+                 return image_list
+             else:
+                 for catalog in catalogs:
+                     catalog_uuid = catalog.get('id')
+                     name = catalog.get('name')
+                     filtered_dict = {}
+                     if filter_dict.get("name") and filter_dict["name"] != name:
+                         continue
+                     if filter_dict.get("id") and filter_dict["id"] != catalog_uuid:
+                         continue
+                     filtered_dict ["name"] = name
+                     filtered_dict ["id"] = catalog_uuid
+                     image_list.append(filtered_dict)
+                 self.logger.debug("List of already created catalog items: {}".format(image_list))
+                 return image_list
+         except Exception as exp:
+             raise vimconn.vimconnException("Exception occured while retriving catalog items {}".format(exp))
+     def get_vappid(self, vdc=None, vapp_name=None):
+         """ Method takes vdc object and vApp name and returns vapp uuid or None
+         Args:
+             vdc: The VDC object.
+             vapp_name: is application vappp name identifier
+         Returns:
+                 The return vApp name otherwise None
+         """
+         if vdc is None or vapp_name is None:
+             return None
+         # UUID has following format https://host/api/vApp/vapp-30da58a3-e7c7-4d09-8f68-d4c8201169cf
+         try:
+             refs = [ref for ref in vdc.ResourceEntities.ResourceEntity \
+                     if ref.name == vapp_name and ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
+             if len(refs) == 1:
+                 return refs[0].href.split("vapp")[1][1:]
+         except Exception as e:
+             self.logger.exception(e)
+             return False
+         return None
+     def check_vapp(self, vdc=None, vapp_uuid=None):
+         """ Method Method returns True or False if vapp deployed in vCloud director
+             Args:
+                 vca: Connector to VCA
+                 vdc: The VDC object.
+                 vappid: vappid is application identifier
+             Returns:
+                 The return True if vApp deployed
+                 :param vdc:
+                 :param vapp_uuid:
+         """
+         try:
+             refs = [ref for ref in vdc.ResourceEntities.ResourceEntity\
+                      if ref.type_ == 'application/vnd.vmware.vcloud.vApp+xml']
+             for ref in refs:
+                 vappid = ref.href.split("vapp")[1][1:]
+                 # find vapp with respected vapp uuid
+                 if vappid == vapp_uuid:
+                     return True
+         except Exception as e:
+             self.logger.exception(e)
+             return False
+         return False
+     def get_namebyvappid(self, vapp_uuid=None):
+         """Method returns vApp name from vCD and lookup done by vapp_id.
+         Args:
+             vapp_uuid: vappid is application identifier
+         Returns:
+             The return vApp name otherwise None
+         """
+         try:
+             if self.client and vapp_uuid:
+                 vapp_call = "{}/api/vApp/vapp-{}".format(self.url, vapp_uuid)
+                 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                      'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                 response = self.perform_request(req_type='GET',
+                                                 url=vapp_call,
+                                                 headers=headers)
+                 #Retry login if session expired & retry sending request
+                 if response.status_code == 403:
+                     response = self.retry_rest('GET', vapp_call)
+                 tree = XmlElementTree.fromstring(response.content)
+                 return tree.attrib['name']
+         except Exception as e:
+             self.logger.exception(e)
+             return None
+         return None
+     def new_vminstance(self, name=None, description="", start=False, image_id=None, flavor_id=None, net_list=[],
+                        cloud_config=None, disk_list=None, availability_zone_index=None, availability_zone_list=None):
+         """Adds a VM instance to VIM
+         Params:
+             'start': (boolean) indicates if VM must start or created in pause mode.
+             'image_id','flavor_id': image and flavor VIM id to use for the VM
+             'net_list': list of interfaces, each one is a dictionary with:
+                 'name': (optional) name for the interface.
+                 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
+                 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                 'mac_address': (optional) mac address to assign to this interface
+                 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
+                     the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
+                 'type': (mandatory) can be one of:
+                     'virtual', in this case always connected to a network of type 'net_type=bridge'
+                      'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                            can created unconnected
+                      'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                      'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                             are allocated on the same physical NIC
+                 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
+                                 or True, it must apply the default VIM behaviour
+                 After execution the method will add the key:
+                 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
+                         interface. 'net_list' is modified
+             'cloud_config': (optional) dictionary with:
+                 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                 'users': (optional) list of users to be inserted, each item is a dict with:
+                     'name': (mandatory) user name,
+                     'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                     or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                 'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                     'dest': (mandatory) string with the destination absolute path
+                     'encoding': (optional, by default text). Can be one of:
+                         'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                     'content' (mandatory): string with the content of the file
+                     'permissions': (optional) string with file permissions, typically octal notation '0644'
+                     'owner': (optional) file owner, string with the format 'owner:group'
+                 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+             'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                 'size': (mandatory) string with the size of the disk in GB
+             availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+             availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                 availability_zone_index is None
+         Returns a tuple with the instance identifier and created_items or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
+         self.logger.info("Creating new instance for entry {}".format(name))
+         self.logger.debug("desc {} boot {} image_id: {} flavor_id: {} net_list: {} cloud_config {} disk_list {} "\
+                           "availability_zone_index {} availability_zone_list {}"\
+                           .format(description, start, image_id, flavor_id, net_list, cloud_config, disk_list,\
+                                   availability_zone_index, availability_zone_list))
+         #new vm name = vmname + tenant_id + uuid
+         new_vm_name = [name, '-', str(uuid.uuid4())]
+         vmname_andid = ''.join(new_vm_name)
+         for net in net_list:
+             if net['type'] == "PCI-PASSTHROUGH":
+                 raise vimconn.vimconnNotSupportedException(
+                       "Current vCD version does not support type : {}".format(net['type']))
+         if len(net_list) > 10:
+             raise vimconn.vimconnNotSupportedException(
+                       "The VM hardware versions 7 and above support upto 10 NICs only")
+         # if vm already deployed we return existing uuid
+         # we check for presence of VDC, Catalog entry and Flavor.
+         org, vdc = self.get_vdc_details()
+         if vdc is None:
+             raise vimconn.vimconnNotFoundException(
+                 "new_vminstance(): Failed create vApp {}: (Failed retrieve VDC information)".format(name))
+         catalogs = org.list_catalogs()
+         if catalogs is None:
+             #Retry once, if failed by refreshing token
+             self.get_token()
+             org = Org(self.client, resource=self.client.get_org())
+             catalogs = org.list_catalogs()
+         if catalogs is None:
+             raise vimconn.vimconnNotFoundException(
+                 "new_vminstance(): Failed create vApp {}: (Failed retrieve catalogs list)".format(name))
+         catalog_hash_name = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
+         if catalog_hash_name:
+             self.logger.info("Found catalog entry {} for image id {}".format(catalog_hash_name, image_id))
+         else:
+             raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
+                                                    "(Failed retrieve catalog information {})".format(name, image_id))
+         # Set vCPU and Memory based on flavor.
+         vm_cpus = None
+         vm_memory = None
+         vm_disk = None
+         numas = None
+         if flavor_id is not None:
+             if flavor_id not in vimconnector.flavorlist:
+                 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed create vApp {}: "
+                                                        "Failed retrieve flavor information "
+                                                        "flavor id {}".format(name, flavor_id))
+             else:
+                 try:
+                     flavor = vimconnector.flavorlist[flavor_id]
+                     vm_cpus = flavor[FLAVOR_VCPUS_KEY]
+                     vm_memory = flavor[FLAVOR_RAM_KEY]
+                     vm_disk = flavor[FLAVOR_DISK_KEY]
+                     extended = flavor.get("extended", None)
+                     if extended:
+                         numas=extended.get("numas", None)
+                 except Exception as exp:
+                     raise vimconn.vimconnException("Corrupted flavor. {}.Exception: {}".format(flavor_id, exp))
+         # image upload creates template name as catalog name space Template.
+         templateName = self.get_catalogbyid(catalog_uuid=image_id, catalogs=catalogs)
+         power_on = 'false'
+         if start:
+             power_on = 'true'
+         # client must provide at least one entry in net_list if not we report error
+         #If net type is mgmt, then configure it as primary net & use its NIC index as primary NIC
+         #If no mgmt, then the 1st NN in netlist is considered as primary net. 
+         primary_net = None
+         primary_netname = None
+         primary_net_href = None
+         network_mode = 'bridged'
+         if net_list is not None and len(net_list) > 0:
+             for net in net_list:
+                 if 'use' in net and net['use'] == 'mgmt' and not primary_net:
+                     primary_net = net
+             if primary_net is None:
+                 primary_net = net_list[0]
+             try:
+                 primary_net_id = primary_net['net_id']
+                 url_list = [self.url, '/api/network/', primary_net_id]
+                 primary_net_href = ''.join(url_list) 
+                 network_dict = self.get_vcd_network(network_uuid=primary_net_id)
+                 if 'name' in network_dict:
+                     primary_netname = network_dict['name']
+             except KeyError:
+                 raise vimconn.vimconnException("Corrupted flavor. {}".format(primary_net))
+         else:
+             raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed network list is empty.".format(name))
+         # use: 'data', 'bridge', 'mgmt'
+         # create vApp.  Set vcpu and ram based on flavor id.
+         try:
+             vdc_obj = VDC(self.client, resource=org.get_vdc(self.tenant_name))
+             if not vdc_obj:
+                 raise vimconn.vimconnNotFoundException("new_vminstance(): Failed to get VDC object")
+             for retry in (1,2):
+                 items = org.get_catalog_item(catalog_hash_name, catalog_hash_name)
+                 catalog_items = [items.attrib]
+                 if len(catalog_items) == 1:
+                     if self.client:
+                         headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                     response = self.perform_request(req_type='GET',
+                                                 url=catalog_items[0].get('href'),
+                                                 headers=headers)
+                     catalogItem = XmlElementTree.fromstring(response.content)
+                     entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                     vapp_tempalte_href = entity.get("href")
+                 response = self.perform_request(req_type='GET',
+                                                     url=vapp_tempalte_href,
+                                                     headers=headers)
+                 if response.status_code != requests.codes.ok:
+                     self.logger.debug("REST API call {} failed. Return status code {}".format(vapp_tempalte_href,
+                                                                                            response.status_code))
+                 else:
+                     result = (response.content).replace("\n"," ")
+                 vapp_template_tree = XmlElementTree.fromstring(response.content)
+                 children_element = [child for child in vapp_template_tree if 'Children' in child.tag][0]
+                 vm_element = [child for child in children_element if 'Vm' in child.tag][0]
+                 vm_name = vm_element.get('name')
+                 vm_id = vm_element.get('id')
+                 vm_href = vm_element.get('href')
+                 cpus = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                 memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                 cores = re.search('<vmw:CoresPerSocket ovf:required.*?>(\d+)</vmw:CoresPerSocket>',result).group(1)
+                 headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
+                 vdc_id = vdc.get('id').split(':')[-1]
+                 instantiate_vapp_href = "{}/api/vdc/{}/action/instantiateVAppTemplate".format(self.url,
+                                                                                                 vdc_id)
+                 data = """<?xml version="1.0" encoding="UTF-8"?>
+                 <InstantiateVAppTemplateParams
+                 xmlns="http://www.vmware.com/vcloud/v1.5"
+                 name="{}"
+                 deploy="false"
+                 powerOn="false"
+                 xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+                 xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1">
+                 <Description>Vapp instantiation</Description>
+                 <InstantiationParams>
+                      <NetworkConfigSection>
+                          <ovf:Info>Configuration parameters for logical networks</ovf:Info>
+                          <NetworkConfig networkName="{}">
+                              <Configuration>
+                                  <ParentNetwork href="{}" />
+                                  <FenceMode>bridged</FenceMode>
+                              </Configuration>
+                          </NetworkConfig>
+                      </NetworkConfigSection>
+                 <LeaseSettingsSection
+                 type="application/vnd.vmware.vcloud.leaseSettingsSection+xml">
+                 <ovf:Info>Lease Settings</ovf:Info>
+                 <StorageLeaseInSeconds>172800</StorageLeaseInSeconds>
+                 <StorageLeaseExpiration>2014-04-25T08:08:16.438-07:00</StorageLeaseExpiration>
+                 </LeaseSettingsSection>
+                 </InstantiationParams>
+                 <Source href="{}"/>
+                 <SourcedItem>
+                 <Source href="{}" id="{}" name="{}"
+                 type="application/vnd.vmware.vcloud.vm+xml"/>
+                 <VmGeneralParams>
+                     <NeedsCustomization>false</NeedsCustomization>
+                 </VmGeneralParams>
+                 <InstantiationParams>
+                       <NetworkConnectionSection>
+                       <ovf:Info>Specifies the available VM network connections</ovf:Info>
+                       <NetworkConnection network="{}">
+                       <NetworkConnectionIndex>0</NetworkConnectionIndex>
+                       <IsConnected>true</IsConnected>
+                       <IpAddressAllocationMode>DHCP</IpAddressAllocationMode>
+                       </NetworkConnection>
+                       </NetworkConnectionSection><ovf:VirtualHardwareSection>
+                       <ovf:Info>Virtual hardware requirements</ovf:Info>
+                       <ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
+                       xmlns:vmw="http://www.vmware.com/schema/ovf">
+                       <rasd:AllocationUnits>hertz * 10^6</rasd:AllocationUnits>
+                       <rasd:Description>Number of Virtual CPUs</rasd:Description>
+                       <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{cpu} virtual CPU(s)</rasd:ElementName>
+                       <rasd:InstanceID>4</rasd:InstanceID>
+                       <rasd:Reservation>0</rasd:Reservation>
+                       <rasd:ResourceType>3</rasd:ResourceType>
+                       <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{cpu}</rasd:VirtualQuantity>
+                       <rasd:Weight>0</rasd:Weight>
+                       <vmw:CoresPerSocket ovf:required="false">{core}</vmw:CoresPerSocket>
+                       </ovf:Item><ovf:Item xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData">
+                       <rasd:AllocationUnits>byte * 2^20</rasd:AllocationUnits>
+                       <rasd:Description>Memory Size</rasd:Description>
+                       <rasd:ElementName xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="str">{memory} MB of memory</rasd:ElementName>
+                       <rasd:InstanceID>5</rasd:InstanceID>
+                       <rasd:Reservation>0</rasd:Reservation>
+                       <rasd:ResourceType>4</rasd:ResourceType>
+                       <rasd:VirtualQuantity xmlns:py="http://codespeak.net/lxml/objectify/pytype" py:pytype="int">{memory}</rasd:VirtualQuantity>
+                       <rasd:Weight>0</rasd:Weight>
+                       </ovf:Item>
+                 </ovf:VirtualHardwareSection>
+                 </InstantiationParams>
+                 </SourcedItem>
+                 <AllEULAsAccepted>false</AllEULAsAccepted>
+                 </InstantiateVAppTemplateParams>""".format(vmname_andid,
+                                                         primary_netname,
+                                                         primary_net_href,
+                                                      vapp_tempalte_href,
+                                                                 vm_href,
+                                                                   vm_id,
+                                                                 vm_name,
+                                                         primary_netname,
+                                                                cpu=cpus,
+                                                              core=cores,
+                                                        memory=memory_mb)
+                 response = self.perform_request(req_type='POST',
+                                                 url=instantiate_vapp_href,
+                                                 headers=headers,
+                                                 data=data)
+                 if response.status_code != 201:
+                     self.logger.error("REST call {} failed reason : {}"\
+                          "status code : {}".format(instantiate_vapp_href,
+                                                         response.content,
+                                                    response.status_code))
+                     raise vimconn.vimconnException("new_vminstance(): Failed to create"\
+                                                         "vAapp {}".format(vmname_andid))
+                 else:
+                     vapptask = self.get_task_from_response(response.content)
+                 if vapptask is None and retry==1:
+                     self.get_token() # Retry getting token
+                     continue
+                 else:
+                     break
+             if vapptask is None or vapptask is False:
+                 raise vimconn.vimconnUnexpectedResponse(
+                     "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+             # wait for task to complete
+             result = self.client.get_task_monitor().wait_for_success(task=vapptask)
+             if result.get('status') == 'success':
+                 self.logger.debug("new_vminstance(): Sucessfully created Vapp {}".format(vmname_andid))
+             else:
+                 raise vimconn.vimconnUnexpectedResponse(
+                     "new_vminstance(): failed to create vApp {}".format(vmname_andid))
+         except Exception as exp:
+             raise vimconn.vimconnUnexpectedResponse(
+                 "new_vminstance(): failed to create vApp {} with Exception:{}".format(vmname_andid, exp))
+         # we should have now vapp in undeployed state.
+         try:
+             vdc_obj = VDC(self.client, href=vdc.get('href'))
+             vapp_resource = vdc_obj.get_vapp(vmname_andid)
+             vapp_uuid = vapp_resource.get('id').split(':')[-1]
+             vapp = VApp(self.client, resource=vapp_resource)
+         except Exception as exp:
+             raise vimconn.vimconnUnexpectedResponse(
+                     "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                     .format(vmname_andid, exp))
+         if vapp_uuid is None:
+             raise vimconn.vimconnUnexpectedResponse(
+                 "new_vminstance(): Failed to retrieve vApp {} after creation".format(
+                                                                             vmname_andid))
+         #Add PCI passthrough/SRIOV configrations
+         vm_obj = None
+         pci_devices_info = []
+         reserve_memory = False
+         for net in net_list:
+             if net["type"] == "PF" or net["type"] == "PCI-PASSTHROUGH":
+                 pci_devices_info.append(net)
+             elif (net["type"] == "VF" or net["type"] == "SR-IOV" or net["type"] == "VFnotShared") and 'net_id'in net:
+                 reserve_memory = True
+         #Add PCI
+         if len(pci_devices_info) > 0:
+             self.logger.info("Need to add PCI devices {} into VM {}".format(pci_devices_info,
+                                                                         vmname_andid ))
+             PCI_devices_status, vm_obj, vcenter_conect = self.add_pci_devices(vapp_uuid,
+                                                                             pci_devices_info,
+                                                                             vmname_andid)
+             if PCI_devices_status:
+                 self.logger.info("Added PCI devives {} to VM {}".format(
+                                                             pci_devices_info,
+                                                             vmname_andid)
+                                  )
+                 reserve_memory = True
+             else:
+                 self.logger.info("Fail to add PCI devives {} to VM {}".format(
+                                                             pci_devices_info,
+                                                             vmname_andid)
+                                  )
+         # Modify vm disk
+         if vm_disk:
+             #Assuming there is only one disk in ovf and fast provisioning in organization vDC is disabled
+             result = self.modify_vm_disk(vapp_uuid, vm_disk)
+             if result :
+                 self.logger.debug("Modified Disk size of VM {} ".format(vmname_andid))
+         #Add new or existing disks to vApp
+         if disk_list:
+             added_existing_disk = False
+             for disk in disk_list:
+                 if 'device_type' in disk and disk['device_type'] == 'cdrom':
+                     image_id = disk['image_id']
+                     # Adding CD-ROM to VM
+                     # will revisit code once specification ready to support this feature
+                     self.insert_media_to_vm(vapp, image_id)
+                 elif "image_id" in disk and disk["image_id"] is not None:
+                     self.logger.debug("Adding existing disk from image {} to vm {} ".format(
+                                                                     disk["image_id"] , vapp_uuid))
+                     self.add_existing_disk(catalogs=catalogs,
+                                            image_id=disk["image_id"],
+                                            size = disk["size"],
+                                            template_name=templateName,
+                                            vapp_uuid=vapp_uuid
+                                            )
+                     added_existing_disk = True
+                 else:
+                     #Wait till added existing disk gets reflected into vCD database/API
+                     if added_existing_disk:
+                         time.sleep(5)
+                         added_existing_disk = False
+                     self.add_new_disk(vapp_uuid, disk['size'])
+         if numas:
+             # Assigning numa affinity setting
+             for numa in numas:
+                 if 'paired-threads-id' in numa:
+                     paired_threads_id = numa['paired-threads-id']
+                     self.set_numa_affinity(vapp_uuid, paired_threads_id)
+         # add NICs & connect to networks in netlist
+         try:
+             vdc_obj = VDC(self.client, href=vdc.get('href'))
+             vapp_resource = vdc_obj.get_vapp(vmname_andid)
+             vapp = VApp(self.client, resource=vapp_resource)
+             vapp_id = vapp_resource.get('id').split(':')[-1]
+             self.logger.info("Removing primary NIC: ")
+             # First remove all NICs so that NIC properties can be adjusted as needed
+             self.remove_primary_network_adapter_from_all_vms(vapp)
+             self.logger.info("Request to connect VM to a network: {}".format(net_list))
+             primary_nic_index = 0
+             nicIndex = 0
+             for net in net_list:
+                 # openmano uses network id in UUID format.
+                 # vCloud Director need a name so we do reverse operation from provided UUID we lookup a name
+                 # [{'use': 'bridge', 'net_id': '527d4bf7-566a-41e7-a9e7-ca3cdd9cef4f', 'type': 'virtual',
+                 #   'vpci': '0000:00:11.0', 'name': 'eth0'}]
+                 if 'net_id' not in net:
+                     continue
+                 #Using net_id as a vim_id i.e. vim interface id, as do not have saperate vim interface id
+                 #Same will be returned in refresh_vms_status() as vim_interface_id
+                 net['vim_id'] = net['net_id']  # Provide the same VIM identifier as the VIM network
+                 interface_net_id = net['net_id']
+                 interface_net_name = self.get_network_name_by_id(network_uuid=interface_net_id)
+                 interface_network_mode = net['use']
+                 if interface_network_mode == 'mgmt':
+                     primary_nic_index = nicIndex
+                 """- POOL (A static IP address is allocated automatically from a pool of addresses.)
+                                   - DHCP (The IP address is obtained from a DHCP service.)
+                                   - MANUAL (The IP address is assigned manually in the IpAddress element.)
+                                   - NONE (No IP addressing mode specified.)"""
+                 if primary_netname is not None:
+                     self.logger.debug("new_vminstance(): Filtering by net name {}".format(interface_net_name))
+                     nets = [n for n in self.get_network_list() if n.get('name') == interface_net_name]
+                     if len(nets) == 1:
+                         self.logger.info("new_vminstance(): Found requested network: {}".format(nets[0].get('name')))
+                         if interface_net_name != primary_netname:
+                             # connect network to VM - with all DHCP by default
+                             self.logger.info("new_vminstance(): Attaching net {} to vapp".format(interface_net_name))
+                             self.connect_vapp_to_org_vdc_network(vapp_id, nets[0].get('name'))
+                         type_list = ('PF', 'PCI-PASSTHROUGH', 'VFnotShared')
+                         nic_type = 'VMXNET3'
+                         if 'type' in net and net['type'] not in type_list:
+                             # fetching nic type from vnf
+                             if 'model' in net:
+                                 if net['model'] is not None:
+                                     if net['model'].lower() == 'paravirt' or net['model'].lower() == 'virtio':
+                                         nic_type = 'VMXNET3'
+                                 else:
+                                     nic_type = net['model']
+                                 self.logger.info("new_vminstance(): adding network adapter "\
+                                                           "to a network {}".format(nets[0].get('name')))
+                                 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
+                                                                 primary_nic_index,
+                                                                 nicIndex,
+                                                                 net,
+                                                                 nic_type=nic_type)
+                             else:
+                                 self.logger.info("new_vminstance(): adding network adapter "\
+                                                          "to a network {}".format(nets[0].get('name')))
+                                 if net['type'] in ['SR-IOV', 'VF']:
+                                     nic_type = net['type']
+                                 self.add_network_adapter_to_vms(vapp, nets[0].get('name'),
+                                                                 primary_nic_index,
+                                                                 nicIndex,
+                                                                 net,
+                                                                 nic_type=nic_type)
+                 nicIndex += 1
+             # cloud-init for ssh-key injection
+             if cloud_config:
+                 # Create a catalog which will be carrying the config drive ISO
+                 # This catalog is deleted during vApp deletion. The catalog name carries
+                 # vApp UUID and thats how it gets identified during its deletion.
+                 config_drive_catalog_name = 'cfg_drv-' + vapp_uuid
+                 self.logger.info('new_vminstance(): Creating catalog "{}" to carry config drive ISO'.format(
+                     config_drive_catalog_name))
+                 config_drive_catalog_id = self.create_vimcatalog(org, config_drive_catalog_name)
+                 if config_drive_catalog_id is None:
+                     error_msg = "new_vminstance(): Failed to create new catalog '{}' to carry the config drive " \
+                                 "ISO".format(config_drive_catalog_name)
+                     raise Exception(error_msg)
+                 # Create config-drive ISO
+                 _, userdata = self._create_user_data(cloud_config)
+                 # self.logger.debug('new_vminstance(): The userdata for cloud-init: {}'.format(userdata))
+                 iso_path = self.create_config_drive_iso(userdata)
+                 self.logger.debug('new_vminstance(): The ISO is successfully created. Path: {}'.format(iso_path))
+                 self.logger.info('new_vminstance(): uploading iso to catalog {}'.format(config_drive_catalog_name))
+                 self.upload_iso_to_catalog(config_drive_catalog_id, iso_path)
+                 # Attach the config-drive ISO to the VM
+                 self.logger.info('new_vminstance(): Attaching the config-drive ISO to the VM')
+                 # The ISO remains in INVALID_STATE right after the PUT request (its a blocking call though)
+                 time.sleep(5)
+                 self.insert_media_to_vm(vapp, config_drive_catalog_id)
+                 shutil.rmtree(os.path.dirname(iso_path), ignore_errors=True)
+             # If VM has PCI devices or SRIOV reserve memory for VM
+             if reserve_memory:
+                 self.reserve_memory_for_all_vms(vapp, memory_mb)
+             self.logger.debug("new_vminstance(): starting power on vApp {} ".format(vmname_andid))
+             poweron_task = self.power_on_vapp(vapp_id, vmname_andid)
+             result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+             if result.get('status') == 'success':
+                 self.logger.info("new_vminstance(): Successfully power on "\
+                                              "vApp {}".format(vmname_andid))
+             else:
+                 self.logger.error("new_vminstance(): failed to power on vApp "\
+                                                      "{}".format(vmname_andid))
+         except Exception as exp:
+             try:
+                 self.delete_vminstance(vapp_uuid)
+             except Exception as exp2:
+                 self.logger.error("new_vminstance rollback fail {}".format(exp2))
+             # it might be a case if specific mandatory entry in dict is empty or some other pyVcloud exception
+             self.logger.error("new_vminstance(): Failed create new vm instance {} with exception {}"
+                               .format(name, exp))
+             raise vimconn.vimconnException("new_vminstance(): Failed create new vm instance {} with exception {}"
+                                            .format(name, exp))
+         # check if vApp deployed and if that the case return vApp UUID otherwise -1
+         wait_time = 0
+         vapp_uuid = None
+         while wait_time <= MAX_WAIT_TIME:
+             try:
+                 vapp_resource = vdc_obj.get_vapp(vmname_andid)
+                 vapp = VApp(self.client, resource=vapp_resource)
+             except Exception as exp:
+                 raise vimconn.vimconnUnexpectedResponse(
+                         "new_vminstance(): Failed to retrieve vApp {} after creation: Exception:{}"
+                         .format(vmname_andid, exp))
+             #if vapp and vapp.me.deployed:
+             if vapp and vapp_resource.get('deployed') == 'true':
+                 vapp_uuid = vapp_resource.get('id').split(':')[-1]
+                 break
+             else:
+                 self.logger.debug("new_vminstance(): Wait for vApp {} to deploy".format(name))
+                 time.sleep(INTERVAL_TIME)
+             wait_time +=INTERVAL_TIME
+         #SET Affinity Rule for VM
+         #Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
+         #While creating VIM account user has to pass the Host Group names in availability_zone list
+         #"availability_zone" is a  part of VIM "config" parameters
+         #For example, in VIM config: "availability_zone":["HG_170","HG_174","HG_175"]
+         #Host groups are referred as availability zones
+         #With following procedure, deployed VM will be added into a VM group.
+         #Then A VM to Host Affinity rule will be created using the VM group & Host group.
+         if(availability_zone_list):
+             self.logger.debug("Existing Host Groups in VIM {}".format(self.config.get('availability_zone')))
+             #Admin access required for creating Affinity rules
+             client = self.connect_as_admin()
+             if not client:
+                 raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
+             else:
+                 self.client = client
+             if self.client:
+                 headers = {'Accept':'application/*+xml;version=27.0',
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             #Step1: Get provider vdc details from organization
+             pvdc_href = self.get_pvdc_for_org(self.tenant_name, headers)
+             if pvdc_href is not None:
+             #Step2: Found required pvdc, now get resource pool information
+                 respool_href = self.get_resource_pool_details(pvdc_href, headers)
+                 if respool_href is None:
+                     #Raise error if respool_href not found
+                     msg = "new_vminstance():Error in finding resource pool details in pvdc {}"\
+                            .format(pvdc_href)
+                     self.log_message(msg)
+             #Step3: Verify requested availability zone(hostGroup) is present in vCD
+             # get availability Zone
+             vm_az = self.get_vm_availability_zone(availability_zone_index, availability_zone_list)
+             # check if provided av zone(hostGroup) is present in vCD VIM
+             status = self.check_availibility_zone(vm_az, respool_href, headers)
+             if status is False:
+                 msg = "new_vminstance(): Error in finding availability zone(Host Group): {} in "\
+                        "resource pool {} status: {}".format(vm_az,respool_href,status)
+                 self.log_message(msg)
+             else:
+                 self.logger.debug ("new_vminstance(): Availability zone {} found in VIM".format(vm_az))
+             #Step4: Find VM group references to create vm group
+             vmgrp_href = self.find_vmgroup_reference(respool_href, headers)
+             if vmgrp_href == None:
+                 msg = "new_vminstance(): No reference to VmGroup found in resource pool"
+                 self.log_message(msg)
+             #Step5: Create a VmGroup with name az_VmGroup
+             vmgrp_name = vm_az + "_" + name #Formed VM Group name = Host Group name + VM name
+             status = self.create_vmgroup(vmgrp_name, vmgrp_href, headers)
+             if status is not True:
+                 msg = "new_vminstance(): Error in creating VM group {}".format(vmgrp_name)
+                 self.log_message(msg)
+             #VM Group url to add vms to vm group
+             vmgrpname_url = self.url + "/api/admin/extension/vmGroup/name/"+ vmgrp_name
+             #Step6: Add VM to VM Group
+             #Find VM uuid from vapp_uuid
+             vm_details = self.get_vapp_details_rest(vapp_uuid)
+             vm_uuid = vm_details['vmuuid']
+             status = self.add_vm_to_vmgroup(vm_uuid, vmgrpname_url, vmgrp_name, headers)
+             if status is not True:
+                 msg = "new_vminstance(): Error in adding VM to VM group {}".format(vmgrp_name)
+                 self.log_message(msg)
+             #Step7: Create VM to Host affinity rule
+             addrule_href = self.get_add_rule_reference (respool_href, headers)
+             if addrule_href is None:
+                 msg = "new_vminstance(): Error in finding href to add rule in resource pool: {}"\
+                       .format(respool_href)
+                 self.log_message(msg)
+             status = self.create_vm_to_host_affinity_rule(addrule_href, vmgrp_name, vm_az, "Affinity",  headers)
+             if status is False:
+                 msg = "new_vminstance(): Error in creating affinity rule for VM {} in Host group {}"\
+                       .format(name, vm_az)
+                 self.log_message(msg)
+             else:
+                 self.logger.debug("new_vminstance(): Affinity rule created successfully. Added {} in Host group {}"\
+                                     .format(name, vm_az))
+             #Reset token to a normal user to perform other operations
+             self.get_token()
+         if vapp_uuid is not None:
+             return vapp_uuid, None
+         else:
+             raise vimconn.vimconnUnexpectedResponse("new_vminstance(): Failed create new vm instance {}".format(name))
+     def create_config_drive_iso(self, user_data):
+         tmpdir = tempfile.mkdtemp()
+         iso_path = os.path.join(tmpdir, 'ConfigDrive.iso')
+         latest_dir = os.path.join(tmpdir, 'openstack', 'latest')
+         os.makedirs(latest_dir)
+         with open(os.path.join(latest_dir, 'meta_data.json'), 'w') as meta_file_obj, \
+                 open(os.path.join(latest_dir, 'user_data'), 'w') as userdata_file_obj:
+             userdata_file_obj.write(user_data)
+             meta_file_obj.write(json.dumps({"availability_zone": "nova",
+                                             "launch_index": 0,
+                                             "name": "ConfigDrive",
+                                             "uuid": str(uuid.uuid4())}
+                                            )
+                                 )
+         genisoimage_cmd = 'genisoimage -J -r -V config-2 -o {iso_path} {source_dir_path}'.format(
+             iso_path=iso_path, source_dir_path=tmpdir)
+         self.logger.info('create_config_drive_iso(): Creating ISO by running command "{}"'.format(genisoimage_cmd))
+         try:
+             FNULL = open(os.devnull, 'w')
+             subprocess.check_call(genisoimage_cmd, shell=True, stdout=FNULL)
+         except subprocess.CalledProcessError as e:
+             shutil.rmtree(tmpdir, ignore_errors=True)
+             error_msg = 'create_config_drive_iso(): Exception while running genisoimage command: {}'.format(e)
+             self.logger.error(error_msg)
+             raise Exception(error_msg)
+         return iso_path
+     def upload_iso_to_catalog(self, catalog_id, iso_file_path):
+         if not os.path.isfile(iso_file_path):
+             error_msg = "upload_iso_to_catalog(): Given iso file is not present. Given path: {}".format(iso_file_path)
+             self.logger.error(error_msg)
+             raise Exception(error_msg)
+         iso_file_stat = os.stat(iso_file_path)
+         xml_media_elem = '''<?xml version="1.0" encoding="UTF-8"?>
+                             <Media
+                                 xmlns="http://www.vmware.com/vcloud/v1.5"
+                                 name="{iso_name}"
+                                 size="{iso_size}"
+                                 imageType="iso">
+                                 <Description>ISO image for config-drive</Description>
+                             </Media>'''.format(iso_name=os.path.basename(iso_file_path), iso_size=iso_file_stat.st_size)
+         headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                    'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+         headers['Content-Type'] = 'application/vnd.vmware.vcloud.media+xml'
+         catalog_href = self.url + '/api/catalog/' + catalog_id + '/action/upload'
+         response = self.perform_request(req_type='POST', url=catalog_href, headers=headers, data=xml_media_elem)
+         if response.status_code != 201:
+             error_msg = "upload_iso_to_catalog(): Failed to POST an action/upload request to {}".format(catalog_href)
+             self.logger.error(error_msg)
+             raise Exception(error_msg)
+         catalogItem = XmlElementTree.fromstring(response.content)
+         entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.media+xml"][0]
+         entity_href = entity.get('href')
+         response = self.perform_request(req_type='GET', url=entity_href, headers=headers)
+         if response.status_code != 200:
+             raise Exception("upload_iso_to_catalog(): Failed to GET entity href {}".format(entity_href))
+         match = re.search(r'<Files>\s+?<File.+?href="(.+?)"/>\s+?</File>\s+?</Files>', response.text, re.DOTALL)
+         if match:
+             media_upload_href = match.group(1)
+         else:
+             raise Exception('Could not parse the upload URL for the media file from the last response')
+         upload_iso_task = self.get_task_from_response(response.content)
+         headers['Content-Type'] = 'application/octet-stream'
+         response = self.perform_request(req_type='PUT',
+                                         url=media_upload_href,
+                                         headers=headers,
+                                         data=open(iso_file_path, 'rb'))
+         if response.status_code != 200:
+             raise Exception('PUT request to "{}" failed'.format(media_upload_href))
+         result = self.client.get_task_monitor().wait_for_success(task=upload_iso_task)
+         if result.get('status') != 'success':
+             raise Exception('The upload iso task failed with status {}'.format(result.get('status')))
+     def get_vcd_availibility_zones(self,respool_href, headers):
+         """ Method to find presence of av zone is VIM resource pool
+             Args:
+                 respool_href - resource pool href
+                 headers - header information
+             Returns:
+                vcd_az - list of azone present in vCD
+         """
+         vcd_az = []
+         url=respool_href
+         resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+         if resp.status_code != requests.codes.ok:
+             self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
+         else:
+         #Get the href to hostGroups and find provided hostGroup is present in it
+             resp_xml = XmlElementTree.fromstring(resp.content)
+             for child in resp_xml:
+                 if 'VMWProviderVdcResourcePool' in child.tag:
+                     for schild in child:
+                         if 'Link' in schild.tag:
+                             if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
+                                 hostGroup = schild.attrib.get('href')
+                                 hg_resp = self.perform_request(req_type='GET',url=hostGroup, headers=headers)
+                                 if hg_resp.status_code != requests.codes.ok:
+                                     self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup, hg_resp.status_code))
+                                 else:
+                                     hg_resp_xml =  XmlElementTree.fromstring(hg_resp.content)
+                                     for hostGroup in hg_resp_xml:
+                                         if 'HostGroup' in hostGroup.tag:
+                                             #append host group name to the list
+                                             vcd_az.append(hostGroup.attrib.get("name"))
+         return vcd_az
+     def set_availability_zones(self):
+         """
+         Set vim availability zone
+         """
+         vim_availability_zones = None
+         availability_zone = None
+         if 'availability_zone' in self.config:
+             vim_availability_zones = self.config.get('availability_zone')
+         if isinstance(vim_availability_zones, str):
+             availability_zone = [vim_availability_zones]
+         elif isinstance(vim_availability_zones, list):
+             availability_zone = vim_availability_zones
+         else:
+             return availability_zone
+         return availability_zone
+     def get_vm_availability_zone(self, availability_zone_index, availability_zone_list):
+         """
+         Return the availability zone to be used by the created VM.
+         returns: The VIM availability zone to be used or None
+         """
+         if availability_zone_index is None:
+             if not self.config.get('availability_zone'):
+                 return None
+             elif isinstance(self.config.get('availability_zone'), str):
+                 return self.config['availability_zone']
+             else:
+                 return self.config['availability_zone'][0]
+         vim_availability_zones = self.availability_zone
+         # check if VIM offer enough availability zones describe in the VNFD
+         if vim_availability_zones and len(availability_zone_list) <= len(vim_availability_zones):
+             # check if all the names of NFV AV match VIM AV names
+             match_by_index = False
+             for av in availability_zone_list:
+                 if av not in vim_availability_zones:
+                     match_by_index = True
+                     break
+             if match_by_index:
+                 self.logger.debug("Required Availability zone or Host Group not found in VIM config")
+                 self.logger.debug("Input Availability zone list: {}".format(availability_zone_list))
+                 self.logger.debug("VIM configured Availability zones: {}".format(vim_availability_zones))
+                 self.logger.debug("VIM Availability zones will be used by index")
+                 return vim_availability_zones[availability_zone_index]
+             else:
+                 return availability_zone_list[availability_zone_index]
+         else:
+             raise vimconn.vimconnConflictException("No enough availability zones at VIM for this deployment")
+     def create_vm_to_host_affinity_rule(self, addrule_href, vmgrpname, hostgrpname, polarity, headers):
+         """ Method to create VM to Host Affinity rule in vCD
+         Args:
+             addrule_href - href to make a POST request
+             vmgrpname - name of the VM group created
+             hostgrpnmae - name of the host group created earlier
+             polarity - Affinity or Anti-affinity (default: Affinity)
+             headers - headers to make REST call
+         Returns:
+             True- if rule is created
+             False- Failed to create rule due to some error
+         """
+         task_status = False
+         rule_name = polarity + "_" + vmgrpname
+         payload = """<?xml version="1.0" encoding="UTF-8"?>
+                      <vmext:VMWVmHostAffinityRule
+                        xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
+                        xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
+                        type="application/vnd.vmware.admin.vmwVmHostAffinityRule+xml">
+                        <vcloud:Name>{}</vcloud:Name>
+                        <vcloud:IsEnabled>true</vcloud:IsEnabled>
+                        <vcloud:IsMandatory>true</vcloud:IsMandatory>
+                        <vcloud:Polarity>{}</vcloud:Polarity>
+                        <vmext:HostGroupName>{}</vmext:HostGroupName>
+                        <vmext:VmGroupName>{}</vmext:VmGroupName>
+                      </vmext:VMWVmHostAffinityRule>""".format(rule_name, polarity, hostgrpname, vmgrpname)
+         resp = self.perform_request(req_type='POST',url=addrule_href, headers=headers, data=payload)
+         if resp.status_code != requests.codes.accepted:
+             self.logger.debug ("REST API call {} failed. Return status code {}".format(addrule_href, resp.status_code))
+             task_status = False
+             return task_status
+         else:
+             affinity_task = self.get_task_from_response(resp.content)
+             self.logger.debug ("affinity_task: {}".format(affinity_task))
+             if affinity_task is None or affinity_task is False:
+                 raise vimconn.vimconnUnexpectedResponse("failed to find affinity task")
+             # wait for task to complete
+             result = self.client.get_task_monitor().wait_for_success(task=affinity_task)
+             if result.get('status') == 'success':
+                 self.logger.debug("Successfully created affinity rule {}".format(rule_name))
+                 return True
+             else:
+                 raise vimconn.vimconnUnexpectedResponse(
+                       "failed to create affinity rule {}".format(rule_name))
+     def get_add_rule_reference (self, respool_href, headers):
+         """ This method finds href to add vm to host affinity rule to vCD
+         Args:
+             respool_href- href to resource pool
+             headers- header information to make REST call
+         Returns:
+             None - if no valid href to add rule found or
+             addrule_href - href to add vm to host affinity rule of resource pool
+         """
+         addrule_href = None
+         resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+         if resp.status_code != requests.codes.ok:
+             self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
+         else:
+             resp_xml = XmlElementTree.fromstring(resp.content)
+             for child in resp_xml:
+                 if 'VMWProviderVdcResourcePool' in child.tag:
+                     for schild in child:
+                         if 'Link' in schild.tag:
+                             if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmHostAffinityRule+xml" and \
+                                 schild.attrib.get('rel') == "add":
+                                 addrule_href = schild.attrib.get('href')
+                                 break
+         return addrule_href
+     def add_vm_to_vmgroup(self, vm_uuid, vmGroupNameURL, vmGroup_name, headers):
+         """ Method to add deployed VM to newly created VM Group.
+             This is required to create VM to Host affinity in vCD
+         Args:
+             vm_uuid- newly created vm uuid
+             vmGroupNameURL- URL to VM Group name
+             vmGroup_name- Name of VM group created
+             headers- Headers for REST request
+         Returns:
+             True- if VM added to VM group successfully
+             False- if any error encounter
+         """
+         addvm_resp = self.perform_request(req_type='GET',url=vmGroupNameURL, headers=headers)#, data=payload)
+         if addvm_resp.status_code != requests.codes.ok:
+             self.logger.debug ("REST API call to get VM Group Name url {} failed. Return status code {}"\
+                                .format(vmGroupNameURL, addvm_resp.status_code))
+             return False
+         else:
+             resp_xml = XmlElementTree.fromstring(addvm_resp.content)
+             for child in resp_xml:
+                 if child.tag.split('}')[1] == 'Link':
+                     if child.attrib.get("rel") == "addVms":
+                         addvmtogrpURL =  child.attrib.get("href")
+         #Get vm details
+         url_list = [self.url, '/api/vApp/vm-',vm_uuid]
+         vmdetailsURL = ''.join(url_list)
+         resp = self.perform_request(req_type='GET',url=vmdetailsURL, headers=headers)
+         if resp.status_code != requests.codes.ok:
+             self.logger.debug ("REST API call {} failed. Return status code {}".format(vmdetailsURL, resp.status_code))
+             return False
+         #Parse VM details
+         resp_xml = XmlElementTree.fromstring(resp.content)
+         if resp_xml.tag.split('}')[1] == "Vm":
+             vm_id = resp_xml.attrib.get("id")
+             vm_name = resp_xml.attrib.get("name")
+             vm_href = resp_xml.attrib.get("href")
+             #print vm_id, vm_name, vm_href
+         #Add VM into VMgroup
+         payload = """<?xml version="1.0" encoding="UTF-8"?>\
+                    <ns2:Vms xmlns:ns2="http://www.vmware.com/vcloud/v1.5" \
+                     xmlns="http://www.vmware.com/vcloud/versions" \
+                     xmlns:ns3="http://schemas.dmtf.org/ovf/envelope/1" \
+                     xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" \
+                     xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/common" \
+                     xmlns:ns6="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" \
+                     xmlns:ns7="http://www.vmware.com/schema/ovf" \
+                     xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" \
+                     xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">\
+                     <ns2:VmReference href="{}" id="{}" name="{}" \
+                     type="application/vnd.vmware.vcloud.vm+xml" />\
+                    </ns2:Vms>""".format(vm_href, vm_id, vm_name)
+         addvmtogrp_resp = self.perform_request(req_type='POST',url=addvmtogrpURL, headers=headers, data=payload)
+         if addvmtogrp_resp.status_code != requests.codes.accepted:
+             self.logger.debug ("REST API call {} failed. Return status code {}".format(addvmtogrpURL, addvmtogrp_resp.status_code))
+             return False
+         else:
+             self.logger.debug ("Done adding VM {} to VMgroup {}".format(vm_name, vmGroup_name))
+             return True
+     def create_vmgroup(self, vmgroup_name, vmgroup_href, headers):
+         """Method to create a VM group in vCD
+            Args:
+               vmgroup_name : Name of VM group to be created
+               vmgroup_href : href for vmgroup
+               headers- Headers for REST request
+         """
+         #POST to add URL with required data
+         vmgroup_status = False
+         payload = """<VMWVmGroup xmlns="http://www.vmware.com/vcloud/extension/v1.5" \
+                        xmlns:vcloud_v1.5="http://www.vmware.com/vcloud/v1.5" name="{}">\
+                    <vmCount>1</vmCount>\
+                    </VMWVmGroup>""".format(vmgroup_name)
+         resp = self.perform_request(req_type='POST',url=vmgroup_href, headers=headers, data=payload)
+         if resp.status_code != requests.codes.accepted:
+             self.logger.debug ("REST API call {} failed. Return status code {}".format(vmgroup_href, resp.status_code))
+             return vmgroup_status
+         else:
+             vmgroup_task = self.get_task_from_response(resp.content)
+             if vmgroup_task is None or vmgroup_task is False:
+                 raise vimconn.vimconnUnexpectedResponse(
+                     "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
+             # wait for task to complete
+             result = self.client.get_task_monitor().wait_for_success(task=vmgroup_task)
+             if result.get('status') == 'success':
+                 self.logger.debug("create_vmgroup(): Successfully created VM group {}".format(vmgroup_name))
+                 #time.sleep(10)
+                 vmgroup_status = True
+                 return vmgroup_status
+             else:
+                 raise vimconn.vimconnUnexpectedResponse(\
+                         "create_vmgroup(): failed to create VM group {}".format(vmgroup_name))
+     def find_vmgroup_reference(self, url, headers):
+         """ Method to create a new VMGroup which is required to add created VM
+             Args:
+                url- resource pool href
+                headers- header information
+             Returns:
+                returns href to VM group to create VM group
+         """
+         #Perform GET on resource pool to find 'add' link to create VMGroup
+         #https://vcd-ip/api/admin/extension/providervdc/<providervdc id>/resourcePools
+         vmgrp_href = None
+         resp = self.perform_request(req_type='GET',url=url, headers=headers)
+         if resp.status_code != requests.codes.ok:
+             self.logger.debug ("REST API call {} failed. Return status code {}".format(url, resp.status_code))
+         else:
+             #Get the href to add vmGroup to vCD
+             resp_xml = XmlElementTree.fromstring(resp.content)
+             for child in resp_xml:
+                 if 'VMWProviderVdcResourcePool' in child.tag:
+                     for schild in child:
+                         if 'Link' in schild.tag:
+                             #Find href with type VMGroup and rel with add
+                             if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwVmGroupType+xml"\
+                                 and schild.attrib.get('rel') == "add":
+                                 vmgrp_href = schild.attrib.get('href')
+                                 return vmgrp_href
+     def check_availibility_zone(self, az, respool_href, headers):
+         """ Method to verify requested av zone is present or not in provided
+             resource pool
+             Args:
+                 az - name of hostgroup (availibility_zone)
+                 respool_href - Resource Pool href
+                 headers - Headers to make REST call
+             Returns:
+                 az_found - True if availibility_zone is found else False
+         """
+         az_found = False
+         headers['Accept']='application/*+xml;version=27.0'
+         resp = self.perform_request(req_type='GET',url=respool_href, headers=headers)
+         if resp.status_code != requests.codes.ok:
+             self.logger.debug ("REST API call {} failed. Return status code {}".format(respool_href, resp.status_code))
+         else:
+         #Get the href to hostGroups and find provided hostGroup is present in it
+             resp_xml = XmlElementTree.fromstring(resp.content)
+             for child in resp_xml:
+                 if 'VMWProviderVdcResourcePool' in child.tag:
+                     for schild in child:
+                         if 'Link' in schild.tag:
+                             if schild.attrib.get('type') == "application/vnd.vmware.admin.vmwHostGroupsType+xml":
+                                 hostGroup_href = schild.attrib.get('href')
+                                 hg_resp = self.perform_request(req_type='GET',url=hostGroup_href, headers=headers)
+                                 if hg_resp.status_code != requests.codes.ok:
+                                     self.logger.debug ("REST API call {} failed. Return status code {}".format(hostGroup_href, hg_resp.status_code))
+                                 else:
+                                     hg_resp_xml = XmlElementTree.fromstring(hg_resp.content)
+                                     for hostGroup in hg_resp_xml:
+                                         if 'HostGroup' in hostGroup.tag:
+                                             if hostGroup.attrib.get("name") == az:
+                                                 az_found = True
+                                                 break
+         return az_found
+     def get_pvdc_for_org(self, org_vdc, headers):
+         """ This method gets provider vdc references from organisation
+             Args:
+                org_vdc - name of the organisation VDC to find pvdc
+                headers - headers to make REST call
+             Returns:
+                None - if no pvdc href found else
+                pvdc_href - href to pvdc
+         """
+         #Get provider VDC references from vCD
+         pvdc_href = None
+         #url = '<vcd url>/api/admin/extension/providerVdcReferences'
+         url_list = [self.url, '/api/admin/extension/providerVdcReferences']
+         url = ''.join(url_list)
+         response = self.perform_request(req_type='GET',url=url, headers=headers)
+         if response.status_code != requests.codes.ok:
+             self.logger.debug ("REST API call {} failed. Return status code {}"\
+                                .format(url, response.status_code))
+         else:
+             xmlroot_response = XmlElementTree.fromstring(response.content)
+             for child in xmlroot_response:
+                 if 'ProviderVdcReference' in child.tag:
+                     pvdc_href = child.attrib.get('href')
+                     #Get vdcReferences to find org
+                     pvdc_resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
+                     if pvdc_resp.status_code != requests.codes.ok:
+                         raise vimconn.vimconnException("REST API call {} failed. "\
+                                                        "Return status code {}"\
+                                                        .format(url, pvdc_resp.status_code))
+                     pvdc_resp_xml = XmlElementTree.fromstring(pvdc_resp.content)
+                     for child in pvdc_resp_xml:
+                         if 'Link' in child.tag:
+                             if child.attrib.get('type') == "application/vnd.vmware.admin.vdcReferences+xml":
+                                 vdc_href = child.attrib.get('href')
+                                 #Check if provided org is present in vdc
+                                 vdc_resp = self.perform_request(req_type='GET',
+                                                                 url=vdc_href,
+                                                                 headers=headers)
+                                 if vdc_resp.status_code != requests.codes.ok:
+                                     raise vimconn.vimconnException("REST API call {} failed. "\
+                                                                    "Return status code {}"\
+                                                                    .format(url, vdc_resp.status_code))
+                                 vdc_resp_xml = XmlElementTree.fromstring(vdc_resp.content)
+                                 for child in vdc_resp_xml:
+                                     if 'VdcReference' in child.tag:
+                                         if child.attrib.get('name') == org_vdc:
+                                             return pvdc_href
+     def get_resource_pool_details(self, pvdc_href, headers):
+         """ Method to get resource pool information.
+             Host groups are property of resource group.
+             To get host groups, we need to GET details of resource pool.
+             Args:
+                 pvdc_href: href to pvdc details
+                 headers: headers
+             Returns:
+                 respool_href - Returns href link reference to resource pool
+         """
+         respool_href = None
+         resp = self.perform_request(req_type='GET',url=pvdc_href, headers=headers)
+         if resp.status_code != requests.codes.ok:
+             self.logger.debug ("REST API call {} failed. Return status code {}"\
+                                .format(pvdc_href, resp.status_code))
+         else:
+             respool_resp_xml = XmlElementTree.fromstring(resp.content)
+             for child in respool_resp_xml:
+                 if 'Link' in child.tag:
+                     if child.attrib.get('type') == "application/vnd.vmware.admin.vmwProviderVdcResourcePoolSet+xml":
+                         respool_href = child.attrib.get("href")
+                         break
+         return respool_href
+     def log_message(self, msg):
+         """
+             Method to log error messages related to Affinity rule creation
+             in new_vminstance & raise Exception
+                 Args :
+                     msg - Error message to be logged
+         """
+         #get token to connect vCD as a normal user
+         self.get_token()
+         self.logger.debug(msg)
+         raise vimconn.vimconnException(msg)
+     ##
+     ##
+     ##  based on current discussion
+     ##
+     ##
+     ##  server:
+     #   created: '2016-09-08T11:51:58'
+     #   description: simple-instance.linux1.1
+     #   flavor: ddc6776e-75a9-11e6-ad5f-0800273e724c
+     #   hostId: e836c036-74e7-11e6-b249-0800273e724c
+     #   image: dde30fe6-75a9-11e6-ad5f-0800273e724c
+     #   status: ACTIVE
+     #   error_msg:
+     #   interfaces: â€¦
+     #
+     def get_vminstance(self, vim_vm_uuid=None):
+         """Returns the VM instance information from VIM"""
+         self.logger.debug("Client requesting vm instance {} ".format(vim_vm_uuid))
+         org, vdc = self.get_vdc_details()
+         if vdc is None:
+             raise vimconn.vimconnConnectionException(
+                 "Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+         vm_info_dict = self.get_vapp_details_rest(vapp_uuid=vim_vm_uuid)
+         if not vm_info_dict:
+             self.logger.debug("get_vminstance(): Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+             raise vimconn.vimconnNotFoundException("Failed to get vApp name by UUID {}".format(vim_vm_uuid))
+         status_key = vm_info_dict['status']
+         error = ''
+         try:
+             vm_dict = {'created': vm_info_dict['created'],
+                        'description': vm_info_dict['name'],
+                        'status': vcdStatusCode2manoFormat[int(status_key)],
+                        'hostId': vm_info_dict['vmuuid'],
+                        'error_msg': error,
+                        'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
+             if 'interfaces' in vm_info_dict:
+                 vm_dict['interfaces'] = vm_info_dict['interfaces']
+             else:
+                 vm_dict['interfaces'] = []
+         except KeyError:
+             vm_dict = {'created': '',
+                        'description': '',
+                        'status': vcdStatusCode2manoFormat[int(-1)],
+                        'hostId': vm_info_dict['vmuuid'],
+                        'error_msg': "Inconsistency state",
+                        'vim_info': yaml.safe_dump(vm_info_dict), 'interfaces': []}
+         return vm_dict
+     def delete_vminstance(self, vm__vim_uuid, created_items=None):
+         """Method poweroff and remove VM instance from vcloud director network.
+         Args:
+             vm__vim_uuid: VM UUID
+         Returns:
+             Returns the instance identifier
+         """
+         self.logger.debug("Client requesting delete vm instance {} ".format(vm__vim_uuid))
+         org, vdc = self.get_vdc_details()
+         vdc_obj = VDC(self.client, href=vdc.get('href'))
+         if vdc_obj is None:
+             self.logger.debug("delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(
+                 self.tenant_name))
+             raise vimconn.vimconnException(
+                 "delete_vminstance(): Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+         try:
+             vapp_name = self.get_namebyvappid(vm__vim_uuid)
+             if vapp_name is None:
+                 self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                 return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+             self.logger.info("Deleting vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+             vapp_resource = vdc_obj.get_vapp(vapp_name)
+             vapp = VApp(self.client, resource=vapp_resource)
+             # Delete vApp and wait for status change if task executed and vApp is None.
+             if vapp:
+                 if vapp_resource.get('deployed') == 'true':
+                     self.logger.info("Powering off vApp {}".format(vapp_name))
+                     #Power off vApp
+                     powered_off = False
+                     wait_time = 0
+                     while wait_time <= MAX_WAIT_TIME:
+                         power_off_task = vapp.power_off()
+                         result = self.client.get_task_monitor().wait_for_success(task=power_off_task)
+                         if result.get('status') == 'success':
+                             powered_off = True
+                             break
+                         else:
+                             self.logger.info("Wait for vApp {} to power off".format(vapp_name))
+                             time.sleep(INTERVAL_TIME)
+                         wait_time +=INTERVAL_TIME
+                     if not powered_off:
+                         self.logger.debug("delete_vminstance(): Failed to power off VM instance {} ".format(vm__vim_uuid))
+                     else:
+                         self.logger.info("delete_vminstance(): Powered off VM instance {} ".format(vm__vim_uuid))
+                     #Undeploy vApp
+                     self.logger.info("Undeploy vApp {}".format(vapp_name))
+                     wait_time = 0
+                     undeployed = False
+                     while wait_time <= MAX_WAIT_TIME:
+                         vapp = VApp(self.client, resource=vapp_resource)
+                         if not vapp:
+                             self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                             return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+                         undeploy_task = vapp.undeploy()
+                         result = self.client.get_task_monitor().wait_for_success(task=undeploy_task)
+                         if result.get('status') == 'success':
+                             undeployed = True
+                             break
+                         else:
+                             self.logger.debug("Wait for vApp {} to undeploy".format(vapp_name))
+                             time.sleep(INTERVAL_TIME)
+                         wait_time +=INTERVAL_TIME
+                     if not undeployed:
+                         self.logger.debug("delete_vminstance(): Failed to undeploy vApp {} ".format(vm__vim_uuid))
+                 # delete vapp
+                 self.logger.info("Start deletion of vApp {} ".format(vapp_name))
+                 if vapp is not None:
+                     wait_time = 0
+                     result = False
+                     while wait_time <= MAX_WAIT_TIME:
+                         vapp = VApp(self.client, resource=vapp_resource)
+                         if not vapp:
+                             self.logger.debug("delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+                             return -1, "delete_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid)
+                         delete_task = vdc_obj.delete_vapp(vapp.name, force=True)
+                         result = self.client.get_task_monitor().wait_for_success(task=delete_task)
+                         if result.get('status') == 'success':
+                             break
+                         else:
+                             self.logger.debug("Wait for vApp {} to delete".format(vapp_name))
+                             time.sleep(INTERVAL_TIME)
+                         wait_time +=INTERVAL_TIME
+                     if result is None:
+                         self.logger.debug("delete_vminstance(): Failed delete uuid {} ".format(vm__vim_uuid))
+                     else:
+                         self.logger.info("Deleted vm instance {} sccessfully".format(vm__vim_uuid))
+                         config_drive_catalog_name, config_drive_catalog_id = 'cfg_drv-' + vm__vim_uuid, None
+                         catalog_list = self.get_image_list()
+                         try:
+                             config_drive_catalog_id = [catalog_['id'] for catalog_ in catalog_list
+                                                        if catalog_['name'] == config_drive_catalog_name][0]
+                         except IndexError:
+                             pass
+                         if config_drive_catalog_id:
+                             self.logger.debug('delete_vminstance(): Found a config drive catalog {} matching '
+                                               'vapp_name"{}". Deleting it.'.format(config_drive_catalog_id, vapp_name))
+                             self.delete_image(config_drive_catalog_id)
+                         return vm__vim_uuid
+         except:
+             self.logger.debug(traceback.format_exc())
+             raise vimconn.vimconnException("delete_vminstance(): Failed delete vm instance {}".format(vm__vim_uuid))
+     def refresh_vms_status(self, vm_list):
+         """Get the status of the virtual machines and their interfaces/ports
+            Params: the list of VM identifiers
+            Returns a dictionary with:
+                 vm_id:          #VIM id of this Virtual Machine
+                     status:     #Mandatory. Text with one of:
+                                 #  DELETED (not found at vim)
+                                 #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...)
+                                 #  OTHER (Vim reported other status not understood)
+                                 #  ERROR (VIM indicates an ERROR status)
+                                 #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running),
+                                 #  CREATING (on building process), ERROR
+                                 #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                 #
+                     error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                     interfaces:
+                      -  vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                         mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                         vim_net_id:       #network id where this interface is connected
+                         vim_interface_id: #interface/port VIM id
+                         ip_address:       #null, or text with IPv4, IPv6 address
+         """
+         self.logger.debug("Client requesting refresh vm status for {} ".format(vm_list))
+         org,vdc = self.get_vdc_details()
+         if vdc is None:
+             raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+         vms_dict = {}
+         nsx_edge_list = []
+         for vmuuid in vm_list:
+             vapp_name = self.get_namebyvappid(vmuuid)
+             if vapp_name is not None:
+                 try:
+                     vm_pci_details = self.get_vm_pci_details(vmuuid)
+                     vdc_obj = VDC(self.client, href=vdc.get('href'))
+                     vapp_resource = vdc_obj.get_vapp(vapp_name)
+                     the_vapp = VApp(self.client, resource=vapp_resource)
+                     vm_details = {}
+                     for vm in the_vapp.get_all_vms():
+                         headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                         response = self.perform_request(req_type='GET',
+                                                         url=vm.get('href'),
+                                                         headers=headers)
+                         if response.status_code != 200:
+                             self.logger.error("refresh_vms_status : REST call {} failed reason : {}"\
+                                                             "status code : {}".format(vm.get('href'),
+                                                                                     response.content,
+                                                                                response.status_code))
+                             raise vimconn.vimconnException("refresh_vms_status : Failed to get "\
+                                                                          "VM details")
+                         xmlroot = XmlElementTree.fromstring(response.content)
+                         
+                         result = response.content.replace("\n"," ")
+                         hdd_match = re.search('vcloud:capacity="(\d+)"\svcloud:storageProfileOverrideVmDefault=',result)
+                         if hdd_match:
+                             hdd_mb = hdd_match.group(1)
+                             vm_details['hdd_mb'] = int(hdd_mb) if hdd_mb else None
+                         cpus_match = re.search('<rasd:Description>Number of Virtual CPUs</.*?>(\d+)</rasd:VirtualQuantity>',result)
+                         if cpus_match:
+                             cpus = cpus_match.group(1)
+                             vm_details['cpus'] = int(cpus) if cpus else None
+                         memory_mb = re.search('<rasd:Description>Memory Size</.*?>(\d+)</rasd:VirtualQuantity>',result).group(1)
+                         vm_details['memory_mb'] = int(memory_mb) if memory_mb else None
+                         vm_details['status'] = vcdStatusCode2manoFormat[int(xmlroot.get('status'))]
+                         vm_details['id'] = xmlroot.get('id')
+                         vm_details['name'] = xmlroot.get('name')
+                         vm_info = [vm_details]
+                         if vm_pci_details:
+                             vm_info[0].update(vm_pci_details)
+                         vm_dict = {'status': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
+                                    'error_msg': vcdStatusCode2manoFormat[int(vapp_resource.get('status'))],
+                                    'vim_info': yaml.safe_dump(vm_info), 'interfaces': []}
+                         # get networks
+                         vm_ip = None
+                         vm_mac = None
+                         networks = re.findall('<NetworkConnection needsCustomization=.*?</NetworkConnection>',result)
+                         for network in networks:
+                             mac_s = re.search('<MACAddress>(.*?)</MACAddress>',network)
+                             vm_mac = mac_s.group(1) if mac_s else None
+                             ip_s = re.search('<IpAddress>(.*?)</IpAddress>',network)
+                             vm_ip = ip_s.group(1) if ip_s else None
+                             if vm_ip is None:
+                                 if not nsx_edge_list:
+                                     nsx_edge_list = self.get_edge_details()
+                                     if nsx_edge_list is None:
+                                         raise vimconn.vimconnException("refresh_vms_status:"\
+                                                                        "Failed to get edge details from NSX Manager")
+                                 if vm_mac is not None:
+                                     vm_ip = self.get_ipaddr_from_NSXedge(nsx_edge_list, vm_mac)
+                             net_s = re.search('network="(.*?)"',network)
+                             network_name = net_s.group(1) if net_s else None
+                             vm_net_id = self.get_network_id_by_name(network_name)
+                             interface = {"mac_address": vm_mac,
+                                          "vim_net_id": vm_net_id,
+                                          "vim_interface_id": vm_net_id,
+                                          "ip_address": vm_ip}
+                             vm_dict["interfaces"].append(interface)
+                     # add a vm to vm dict
+                     vms_dict.setdefault(vmuuid, vm_dict)
+                     self.logger.debug("refresh_vms_status : vm info {}".format(vm_dict))
+                 except Exception as exp:
+                     self.logger.debug("Error in response {}".format(exp))
+                     self.logger.debug(traceback.format_exc())
+         return vms_dict
+     def get_edge_details(self):
+         """Get the NSX edge list from NSX Manager
+            Returns list of NSX edges
+         """
+         edge_list = []
+         rheaders = {'Content-Type': 'application/xml'}
+         nsx_api_url = '/api/4.0/edges'
+         self.logger.debug("Get edge details from NSX Manager {} {}".format(self.nsx_manager, nsx_api_url))
+         try:
+             resp = requests.get(self.nsx_manager + nsx_api_url,
+                                 auth = (self.nsx_user, self.nsx_password),
+                                 verify = False, headers = rheaders)
+             if resp.status_code == requests.codes.ok:
+                 paged_Edge_List = XmlElementTree.fromstring(resp.text)
+                 for edge_pages in paged_Edge_List:
+                     if edge_pages.tag == 'edgePage':
+                         for edge_summary in edge_pages:
+                             if edge_summary.tag == 'pagingInfo':
+                                 for element in edge_summary:
+                                     if element.tag == 'totalCount' and element.text == '0':
+                                         raise vimconn.vimconnException("get_edge_details: No NSX edges details found: {}"
+                                                                        .format(self.nsx_manager))
+                             if edge_summary.tag == 'edgeSummary':
+                                 for element in edge_summary:
+                                     if element.tag == 'id':
+                                         edge_list.append(element.text)
+                     else:
+                         raise vimconn.vimconnException("get_edge_details: No NSX edge details found: {}"
+                                                        .format(self.nsx_manager))
+                 if not edge_list:
+                     raise vimconn.vimconnException("get_edge_details: "\
+                                                    "No NSX edge details found: {}"
+                                                    .format(self.nsx_manager))
+                 else:
+                     self.logger.debug("get_edge_details: Found NSX edges {}".format(edge_list))
+                     return edge_list
+             else:
+                 self.logger.debug("get_edge_details: "
+                                   "Failed to get NSX edge details from NSX Manager: {}"
+                                   .format(resp.content))
+                 return None
+         except Exception as exp:
+             self.logger.debug("get_edge_details: "\
+                               "Failed to get NSX edge details from NSX Manager: {}"
+                               .format(exp))
+             raise vimconn.vimconnException("get_edge_details: "\
+                                            "Failed to get NSX edge details from NSX Manager: {}"
+                                            .format(exp))
+     def get_ipaddr_from_NSXedge(self, nsx_edges, mac_address):
+         """Get IP address details from NSX edges, using the MAC address
+            PARAMS: nsx_edges : List of NSX edges
+                    mac_address : Find IP address corresponding to this MAC address
+            Returns: IP address corrresponding to the provided MAC address
+         """
+         ip_addr = None
+         rheaders = {'Content-Type': 'application/xml'}
+         self.logger.debug("get_ipaddr_from_NSXedge: Finding IP addr from NSX edge")
+         try:
+             for edge in nsx_edges:
+                 nsx_api_url = '/api/4.0/edges/'+ edge +'/dhcp/leaseInfo'
+                 resp = requests.get(self.nsx_manager + nsx_api_url,
+                                     auth = (self.nsx_user, self.nsx_password),
+                                     verify = False, headers = rheaders)
+                 if resp.status_code == requests.codes.ok:
+                     dhcp_leases = XmlElementTree.fromstring(resp.text)
+                     for child in dhcp_leases:
+                         if child.tag == 'dhcpLeaseInfo':
+                             dhcpLeaseInfo = child
+                             for leaseInfo in dhcpLeaseInfo:
+                                 for elem in leaseInfo:
+                                     if (elem.tag)=='macAddress':
+                                         edge_mac_addr = elem.text
+                                     if (elem.tag)=='ipAddress':
+                                         ip_addr = elem.text
+                                 if edge_mac_addr is not None:
+                                     if edge_mac_addr == mac_address:
+                                         self.logger.debug("Found ip addr {} for mac {} at NSX edge {}"
+                                                           .format(ip_addr, mac_address,edge))
+                                         return ip_addr
+                 else:
+                     self.logger.debug("get_ipaddr_from_NSXedge: "\
+                                       "Error occurred while getting DHCP lease info from NSX Manager: {}"
+                                       .format(resp.content))
+             self.logger.debug("get_ipaddr_from_NSXedge: No IP addr found in any NSX edge")
+             return None
+         except XmlElementTree.ParseError as Err:
+             self.logger.debug("ParseError in response from NSX Manager {}".format(Err.message), exc_info=True)
+     def action_vminstance(self, vm__vim_uuid=None, action_dict=None, created_items={}):
+         """Send and action over a VM instance from VIM
+         Returns the vm_id if the action was successfully sent to the VIM"""
+         self.logger.debug("Received action for vm {} and action dict {}".format(vm__vim_uuid, action_dict))
+         if vm__vim_uuid is None or action_dict is None:
+             raise vimconn.vimconnException("Invalid request. VM id or action is None.")
+         org, vdc = self.get_vdc_details()
+         if vdc is None:
+             raise vimconn.vimconnException("Failed to get a reference of VDC for a tenant {}".format(self.tenant_name))
+         vapp_name = self.get_namebyvappid(vm__vim_uuid)
+         if vapp_name is None:
+             self.logger.debug("action_vminstance(): Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+             raise vimconn.vimconnException("Failed to get vm by given {} vm uuid".format(vm__vim_uuid))
+         else:
+             self.logger.info("Action_vminstance vApp {} and UUID {}".format(vapp_name, vm__vim_uuid))
+         try:
+             vdc_obj = VDC(self.client, href=vdc.get('href'))
+             vapp_resource = vdc_obj.get_vapp(vapp_name)
+             vapp = VApp(self.client, resource=vapp_resource)
+             if "start" in action_dict:
+                 self.logger.info("action_vminstance: Power on vApp: {}".format(vapp_name))
+                 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
+                 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+                 self.instance_actions_result("start", result, vapp_name)
+             elif "rebuild" in action_dict:
+                 self.logger.info("action_vminstance: Rebuild vApp: {}".format(vapp_name))
+                 rebuild_task = vapp.deploy(power_on=True)
+                 result = self.client.get_task_monitor().wait_for_success(task=rebuild_task)
+                 self.instance_actions_result("rebuild", result, vapp_name)
+             elif "pause" in action_dict:
+                 self.logger.info("action_vminstance: pause vApp: {}".format(vapp_name))
+                 pause_task = vapp.undeploy(action='suspend')
+                 result = self.client.get_task_monitor().wait_for_success(task=pause_task)
+                 self.instance_actions_result("pause", result, vapp_name)
+             elif "resume" in action_dict:
+                 self.logger.info("action_vminstance: resume vApp: {}".format(vapp_name))
+                 poweron_task = self.power_on_vapp(vm__vim_uuid, vapp_name)
+                 result = self.client.get_task_monitor().wait_for_success(task=poweron_task)
+                 self.instance_actions_result("resume", result, vapp_name)
+             elif "shutoff" in action_dict or "shutdown" in action_dict:
+                 action_name , value = list(action_dict.items())[0]
+                 self.logger.info("action_vminstance: {} vApp: {}".format(action_name, vapp_name))
+                 shutdown_task = vapp.shutdown()
+                 result = self.client.get_task_monitor().wait_for_success(task=shutdown_task)
+                 if action_name == "shutdown":
+                     self.instance_actions_result("shutdown", result, vapp_name)
+                 else:
+                     self.instance_actions_result("shutoff", result, vapp_name)
+             elif "forceOff" in action_dict:
+                 result = vapp.undeploy(action='powerOff')
+                 self.instance_actions_result("forceOff", result, vapp_name)
+             elif "reboot" in action_dict:
+                 self.logger.info("action_vminstance: reboot vApp: {}".format(vapp_name))
+                 reboot_task = vapp.reboot()
+                 self.client.get_task_monitor().wait_for_success(task=reboot_task)
+             else:
+                 raise vimconn.vimconnException("action_vminstance: Invalid action {} or action is None.".format(action_dict))
+             return vm__vim_uuid
+         except Exception as exp :
+             self.logger.debug("action_vminstance: Failed with Exception {}".format(exp))
+             raise vimconn.vimconnException("action_vminstance: Failed with Exception {}".format(exp))
+     def instance_actions_result(self, action, result, vapp_name):
+         if result.get('status') == 'success':
+             self.logger.info("action_vminstance: Sucessfully {} the vApp: {}".format(action, vapp_name))
+         else:
+             self.logger.error("action_vminstance: Failed to {} vApp: {}".format(action, vapp_name))
+     def get_vminstance_console(self, vm_id, console_type="novnc"):
+         """
+         Get a console for the virtual machine
+         Params:
+             vm_id: uuid of the VM
+             console_type, can be:
+                 "novnc" (by default), "xvpvnc" for VNC types,
+                 "rdp-html5" for RDP types, "spice-html5" for SPICE types
+         Returns dict with the console parameters:
+                 protocol: ssh, ftp, http, https, ...
+                 server:   usually ip address
+                 port:     the http, ssh, ... port
+                 suffix:   extra text, e.g. the http path and query string
+         """
+         console_dict = {}
+         if console_type==None or console_type=='novnc':
+             url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireMksTicket".format(self.url, vm_id)
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                        'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             response = self.perform_request(req_type='POST',
+                                          url=url_rest_call,
+                                            headers=headers)
+             if response.status_code == 403:
+                 response = self.retry_rest('GET', url_rest_call)
+             if response.status_code != 200:
+                 self.logger.error("REST call {} failed reason : {}"\
+                                   "status code : {}".format(url_rest_call,
+                                                          response.content,
+                                                     response.status_code))
+                 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
+                                                                      "VM Mks ticket details")
+             s = re.search("<Host>(.*?)</Host>",response.content)
+             console_dict['server'] = s.group(1) if s else None
+             s1 = re.search("<Port>(\d+)</Port>",response.content)
+             console_dict['port'] = s1.group(1) if s1 else None
+             url_rest_call = "{}/api/vApp/vm-{}/screen/action/acquireTicket".format(self.url, vm_id)
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                        'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             response = self.perform_request(req_type='POST',
+                                          url=url_rest_call,
+                                            headers=headers)
+             if response.status_code == 403:
+                 response = self.retry_rest('GET', url_rest_call)
+             if response.status_code != 200:
+                 self.logger.error("REST call {} failed reason : {}"\
+                                   "status code : {}".format(url_rest_call,
+                                                          response.content,
+                                                     response.status_code))
+                 raise vimconn.vimconnException("get_vminstance_console : Failed to get "\
+                                                                      "VM console details")
+             s = re.search(">.*?/(vm-\d+.*)</",response.content)
+             console_dict['suffix'] = s.group(1) if s else None
+             console_dict['protocol'] = "https"
+         return console_dict
+     # NOT USED METHODS in current version
+     def host_vim2gui(self, host, server_dict):
+         """Transform host dictionary from VIM format to GUI format,
+         and append to the server_dict
+         """
+         raise vimconn.vimconnNotImplemented("Should have implemented this")
+     def get_hosts_info(self):
+         """Get the information of deployed hosts
+         Returns the hosts content"""
+         raise vimconn.vimconnNotImplemented("Should have implemented this")
+     def get_hosts(self, vim_tenant):
+         """Get the hosts and deployed instances
+         Returns the hosts content"""
+         raise vimconn.vimconnNotImplemented("Should have implemented this")
+     def get_processor_rankings(self):
+         """Get the processor rankings in the VIM database"""
+         raise vimconn.vimconnNotImplemented("Should have implemented this")
+     def new_host(self, host_data):
+         """Adds a new host to VIM"""
+         '''Returns status code of the VIM response'''
+         raise vimconn.vimconnNotImplemented("Should have implemented this")
+     def new_external_port(self, port_data):
+         """Adds a external port to VIM"""
+         '''Returns the port identifier'''
+         raise vimconn.vimconnNotImplemented("Should have implemented this")
+     def new_external_network(self, net_name, net_type):
+         """Adds a external network to VIM (shared)"""
+         '''Returns the network identifier'''
+         raise vimconn.vimconnNotImplemented("Should have implemented this")
+     def connect_port_network(self, port_id, network_id, admin=False):
+         """Connects a external port to a network"""
+         '''Returns status code of the VIM response'''
+         raise vimconn.vimconnNotImplemented("Should have implemented this")
+     def new_vminstancefromJSON(self, vm_data):
+         """Adds a VM instance to VIM"""
+         '''Returns the instance identifier'''
+         raise vimconn.vimconnNotImplemented("Should have implemented this")
+     def get_network_name_by_id(self, network_uuid=None):
+         """Method gets vcloud director network named based on supplied uuid.
+         Args:
+             network_uuid: network_id
+         Returns:
+             The return network name.
+         """
+         if not network_uuid:
+             return None
+         try:
+             org_dict = self.get_org(self.org_uuid)
+             if 'networks' in org_dict:
+                 org_network_dict = org_dict['networks']
+                 for net_uuid in org_network_dict:
+                     if net_uuid == network_uuid:
+                         return org_network_dict[net_uuid]
+         except:
+             self.logger.debug("Exception in get_network_name_by_id")
+             self.logger.debug(traceback.format_exc())
+         return None
+     def get_network_id_by_name(self, network_name=None):
+         """Method gets vcloud director network uuid based on supplied name.
+         Args:
+             network_name: network_name
+         Returns:
+             The return network uuid.
+             network_uuid: network_id
+         """
 -            data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
 -                            <Description>Openmano created</Description>
 -                                    <Configuration>
 -                                        <IpScopes>
 -                                            <IpScope>
 -                                                <IsInherited>{1:s}</IsInherited>
 -                                                <Gateway>{2:s}</Gateway>
 -                                                <Netmask>{3:s}</Netmask>
 -                                                <Dns1>{4:s}</Dns1>{5:s}
 -                                                <IsEnabled>{6:s}</IsEnabled>
 -                                                <IpRanges>
 -                                                    <IpRange>
 -                                                        <StartAddress>{7:s}</StartAddress>
 -                                                        <EndAddress>{8:s}</EndAddress>
 -                                                    </IpRange>
 -                                                </IpRanges>
 -                                            </IpScope>
 -                                        </IpScopes>
 -                                        <FenceMode>{9:s}</FenceMode>
 -                                    </Configuration>
 -                                    <IsShared>{10:s}</IsShared>
 -                        </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
 -                                                    subnet_address, dns1, dns2_text, dhcp_enabled,
 -                                                    dhcp_start_address, dhcp_end_address,
 -                                                    fence_mode, isshared)
+         if not network_name:
+             self.logger.debug("get_network_id_by_name() : Network name is empty")
+             return None
+         try:
+             org_dict = self.get_org(self.org_uuid)
+             if org_dict and 'networks' in org_dict:
+                 org_network_dict = org_dict['networks']
+                 for net_uuid, net_name in org_network_dict.items():
+                     if net_name == network_name:
+                         return net_uuid
+         except KeyError as exp:
+             self.logger.debug("get_network_id_by_name() : KeyError- {} ".format(exp))
+         return None
++    def get_physical_network_by_name(self, physical_network_name):
++        '''
++        Methos returns uuid of physical network which passed
++        Args:
++            physical_network_name: physical network name
++        Returns:
++            UUID of physical_network_name
++        '''
++        try:
++            client_as_admin = self.connect_as_admin()
++            if not client_as_admin:
++                raise vimconn.vimconnConnectionException("Failed to connect vCD.")
++            url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
++            vm_list_rest_call = ''.join(url_list)
++
++            if client_as_admin._session:
++                headers = {'Accept':'application/*+xml;version=' + API_VERSION,
++                         'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
++
++                response = self.perform_request(req_type='GET',
++                                                url=vm_list_rest_call,
++                                                headers=headers)
++
++                provider_network = None
++                available_network = None
++                add_vdc_rest_url = None
++
++                if response.status_code != requests.codes.ok:
++                    self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
++                                                                                              response.status_code))
++                    return None
++                else:
++                    try:
++                        vm_list_xmlroot = XmlElementTree.fromstring(response.content)
++                        for child in vm_list_xmlroot:
++
++                            if child.tag.split("}")[1] == 'ProviderVdcReference':
++                                provider_network = child.attrib.get('href')
++                                # application/vnd.vmware.admin.providervdc+xml
++                            if child.tag.split("}")[1] == 'Link':
++                                if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
++                                        and child.attrib.get('rel') == 'add':
++                                    add_vdc_rest_url = child.attrib.get('href')
++                    except:
++                        self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
++                        self.logger.debug("Respond body {}".format(response.content))
++                        return None
++
++                # find  pvdc provided available network
++                response = self.perform_request(req_type='GET',
++                                                url=provider_network,
++                                                headers=headers)
++
++                if response.status_code != requests.codes.ok:
++                    self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
++                                                                                              response.status_code))
++                    return None
++
++                try:
++                    vm_list_xmlroot = XmlElementTree.fromstring(response.content)
++                    for child in vm_list_xmlroot.iter():
++                        if child.tag.split("}")[1] == 'AvailableNetworks':
++                            for networks in child.iter():
++                                if networks.attrib.get('href') is not None and networks.attrib.get('name') is not None:
++                                    if networks.attrib.get('name') == physical_network_name:
++                                        network_url = networks.attrib.get('href')
++                                        available_network = network_url[network_url.rindex('/')+1:]
++                                        break
++                except Exception as e:
++                    return None
++
++            return available_network
++        except Exception as e:
++            self.logger.error("Error while getting physical network: {}".format(e))
++
+     def list_org_action(self):
+         """
+         Method leverages vCloud director and query for available organization for particular user
+         Args:
+             vca - is active VCA connection.
+             vdc_name - is a vdc name that will be used to query vms action
+             Returns:
+                 The return XML respond
+         """
+         url_list = [self.url, '/api/org']
+         vm_list_rest_call = ''.join(url_list)
+         if self.client._session:
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                        'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             response = self.perform_request(req_type='GET',
+                                      url=vm_list_rest_call,
+                                            headers=headers)
+             if response.status_code == 403:
+                 response = self.retry_rest('GET', vm_list_rest_call)
+             if response.status_code == requests.codes.ok:
+                 return response.content
+         return None
+     def get_org_action(self, org_uuid=None):
+         """
+         Method leverages vCloud director and retrieve available object for organization.
+         Args:
+             org_uuid - vCD organization uuid
+             self.client - is active connection.
+             Returns:
+                 The return XML respond
+         """
+         if org_uuid is None:
+             return None
+         url_list = [self.url, '/api/org/', org_uuid]
+         vm_list_rest_call = ''.join(url_list)
+         if self.client._session:
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                      'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             #response = requests.get(vm_list_rest_call, headers=headers, verify=False)
+             response = self.perform_request(req_type='GET',
+                                             url=vm_list_rest_call,
+                                             headers=headers)
+             if response.status_code == 403:
+                 response = self.retry_rest('GET', vm_list_rest_call)
+             if response.status_code == requests.codes.ok:
+                 return response.content
+         return None
+     def get_org(self, org_uuid=None):
+         """
+         Method retrieves available organization in vCloud Director
+         Args:
+             org_uuid - is a organization uuid.
+             Returns:
+                 The return dictionary with following key
+                     "network" - for network list under the org
+                     "catalogs" - for network list under the org
+                     "vdcs" - for vdc list under org
+         """
+         org_dict = {}
+         if org_uuid is None:
+             return org_dict
+         content = self.get_org_action(org_uuid=org_uuid)
+         try:
+             vdc_list = {}
+             network_list = {}
+             catalog_list = {}
+             vm_list_xmlroot = XmlElementTree.fromstring(content)
+             for child in vm_list_xmlroot:
+                 if child.attrib['type'] == 'application/vnd.vmware.vcloud.vdc+xml':
+                     vdc_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+                     org_dict['vdcs'] = vdc_list
+                 if child.attrib['type'] == 'application/vnd.vmware.vcloud.orgNetwork+xml':
+                     network_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+                     org_dict['networks'] = network_list
+                 if child.attrib['type'] == 'application/vnd.vmware.vcloud.catalog+xml':
+                     catalog_list[child.attrib['href'].split("/")[-1:][0]] = child.attrib['name']
+                     org_dict['catalogs'] = catalog_list
+         except:
+             pass
+         return org_dict
+     def get_org_list(self):
+         """
+         Method retrieves available organization in vCloud Director
+         Args:
+             vca - is active VCA connection.
+             Returns:
+                 The return dictionary and key for each entry VDC UUID
+         """
+         org_dict = {}
+         content = self.list_org_action()
+         try:
+             vm_list_xmlroot = XmlElementTree.fromstring(content)
+             for vm_xml in vm_list_xmlroot:
+                 if vm_xml.tag.split("}")[1] == 'Org':
+                     org_uuid = vm_xml.attrib['href'].split('/')[-1:]
+                     org_dict[org_uuid[0]] = vm_xml.attrib['name']
+         except:
+             pass
+         return org_dict
+     def vms_view_action(self, vdc_name=None):
+         """ Method leverages vCloud director vms query call
+         Args:
+             vca - is active VCA connection.
+             vdc_name - is a vdc name that will be used to query vms action
+             Returns:
+                 The return XML respond
+         """
+         vca = self.connect()
+         if vdc_name is None:
+             return None
+         url_list = [vca.host, '/api/vms/query']
+         vm_list_rest_call = ''.join(url_list)
+         if not (not vca.vcloud_session or not vca.vcloud_session.organization):
+             refs = [ref for ref in vca.vcloud_session.organization.Link if ref.name == vdc_name and
+                     ref.type_ == 'application/vnd.vmware.vcloud.vdc+xml']
+             if len(refs) == 1:
+                 response = Http.get(url=vm_list_rest_call,
+                                     headers=vca.vcloud_session.get_vcloud_headers(),
+                                     verify=vca.verify,
+                                     logger=vca.logger)
+                 if response.status_code == requests.codes.ok:
+                     return response.content
+         return None
+     def get_vapp_list(self, vdc_name=None):
+         """
+         Method retrieves vApp list deployed vCloud director and returns a dictionary
+         contains a list of all vapp deployed for queried VDC.
+         The key for a dictionary is vApp UUID
+         Args:
+             vca - is active VCA connection.
+             vdc_name - is a vdc name that will be used to query vms action
+             Returns:
+                 The return dictionary and key for each entry vapp UUID
+         """
+         vapp_dict = {}
+         if vdc_name is None:
+             return vapp_dict
+         content = self.vms_view_action(vdc_name=vdc_name)
+         try:
+             vm_list_xmlroot = XmlElementTree.fromstring(content)
+             for vm_xml in vm_list_xmlroot:
+                 if vm_xml.tag.split("}")[1] == 'VMRecord':
+                     if vm_xml.attrib['isVAppTemplate'] == 'true':
+                         rawuuid = vm_xml.attrib['container'].split('/')[-1:]
+                         if 'vappTemplate-' in rawuuid[0]:
+                             # vm in format vappTemplate-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+                             # vm and use raw UUID as key
+                             vapp_dict[rawuuid[0][13:]] = vm_xml.attrib
+         except:
+             pass
+         return vapp_dict
+     def get_vm_list(self, vdc_name=None):
+         """
+         Method retrieves VM's list deployed vCloud director. It returns a dictionary
+         contains a list of all VM's deployed for queried VDC.
+         The key for a dictionary is VM UUID
+         Args:
+             vca - is active VCA connection.
+             vdc_name - is a vdc name that will be used to query vms action
+             Returns:
+                 The return dictionary and key for each entry vapp UUID
+         """
+         vm_dict = {}
+         if vdc_name is None:
+             return vm_dict
+         content = self.vms_view_action(vdc_name=vdc_name)
+         try:
+             vm_list_xmlroot = XmlElementTree.fromstring(content)
+             for vm_xml in vm_list_xmlroot:
+                 if vm_xml.tag.split("}")[1] == 'VMRecord':
+                     if vm_xml.attrib['isVAppTemplate'] == 'false':
+                         rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+                         if 'vm-' in rawuuid[0]:
+                             # vm in format vm-e63d40e7-4ff5-4c6d-851f-96c1e4da86a5 we remove
+                             #  vm and use raw UUID as key
+                             vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+         except:
+             pass
+         return vm_dict
+     def get_vapp(self, vdc_name=None, vapp_name=None, isuuid=False):
+         """
+         Method retrieves VM deployed vCloud director. It returns VM attribute as dictionary
+         contains a list of all VM's deployed for queried VDC.
+         The key for a dictionary is VM UUID
+         Args:
+             vca - is active VCA connection.
+             vdc_name - is a vdc name that will be used to query vms action
+             Returns:
+                 The return dictionary and key for each entry vapp UUID
+         """
+         vm_dict = {}
+         vca = self.connect()
+         if not vca:
+             raise vimconn.vimconnConnectionException("self.connect() is failed")
+         if vdc_name is None:
+             return vm_dict
+         content = self.vms_view_action(vdc_name=vdc_name)
+         try:
+             vm_list_xmlroot = XmlElementTree.fromstring(content)
+             for vm_xml in vm_list_xmlroot:
+                 if vm_xml.tag.split("}")[1] == 'VMRecord' and vm_xml.attrib['isVAppTemplate'] == 'false':
+                     # lookup done by UUID
+                     if isuuid:
+                         if vapp_name in vm_xml.attrib['container']:
+                             rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+                             if 'vm-' in rawuuid[0]:
+                                 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+                                 break
+                     # lookup done by Name
+                     else:
+                         if vapp_name in vm_xml.attrib['name']:
+                             rawuuid = vm_xml.attrib['href'].split('/')[-1:]
+                             if 'vm-' in rawuuid[0]:
+                                 vm_dict[rawuuid[0][3:]] = vm_xml.attrib
+                                 break
+         except:
+             pass
+         return vm_dict
+     def get_network_action(self, network_uuid=None):
+         """
+         Method leverages vCloud director and query network based on network uuid
+         Args:
+             vca - is active VCA connection.
+             network_uuid - is a network uuid
+             Returns:
+                 The return XML respond
+         """
+         if network_uuid is None:
+             return None
+         url_list = [self.url, '/api/network/', network_uuid]
+         vm_list_rest_call = ''.join(url_list)
+         if self.client._session:
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                      'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             response = self.perform_request(req_type='GET',
+                                             url=vm_list_rest_call,
+                                             headers=headers)
+             #Retry login if session expired & retry sending request
+             if response.status_code == 403:
+                 response = self.retry_rest('GET', vm_list_rest_call)
+             if response.status_code == requests.codes.ok:
+                 return response.content
+         return None
+     def get_vcd_network(self, network_uuid=None):
+         """
+         Method retrieves available network from vCloud Director
+         Args:
+             network_uuid - is VCD network UUID
+         Each element serialized as key : value pair
+         Following keys available for access.    network_configuration['Gateway'}
+         <Configuration>
+           <IpScopes>
+             <IpScope>
+                 <IsInherited>true</IsInherited>
+                 <Gateway>172.16.252.100</Gateway>
+                 <Netmask>255.255.255.0</Netmask>
+                 <Dns1>172.16.254.201</Dns1>
+                 <Dns2>172.16.254.202</Dns2>
+                 <DnsSuffix>vmwarelab.edu</DnsSuffix>
+                 <IsEnabled>true</IsEnabled>
+                 <IpRanges>
+                     <IpRange>
+                         <StartAddress>172.16.252.1</StartAddress>
+                         <EndAddress>172.16.252.99</EndAddress>
+                     </IpRange>
+                 </IpRanges>
+             </IpScope>
+         </IpScopes>
+         <FenceMode>bridged</FenceMode>
+         Returns:
+                 The return dictionary and key for each entry vapp UUID
+         """
+         network_configuration = {}
+         if network_uuid is None:
+             return network_uuid
+         try:
+             content = self.get_network_action(network_uuid=network_uuid)
+             vm_list_xmlroot = XmlElementTree.fromstring(content)
+             network_configuration['status'] = vm_list_xmlroot.get("status")
+             network_configuration['name'] = vm_list_xmlroot.get("name")
+             network_configuration['uuid'] = vm_list_xmlroot.get("id").split(":")[3]
+             for child in vm_list_xmlroot:
+                 if child.tag.split("}")[1] == 'IsShared':
+                     network_configuration['isShared'] = child.text.strip()
+                 if child.tag.split("}")[1] == 'Configuration':
+                     for configuration in child.iter():
+                         tagKey = configuration.tag.split("}")[1].strip()
+                         if tagKey != "":
+                             network_configuration[tagKey] = configuration.text.strip()
+             return network_configuration
+         except Exception as exp :
+             self.logger.debug("get_vcd_network: Failed with Exception {}".format(exp))
+             raise vimconn.vimconnException("get_vcd_network: Failed with Exception {}".format(exp))
+         return network_configuration
+     def delete_network_action(self, network_uuid=None):
+         """
+         Method delete given network from vCloud director
+         Args:
+             network_uuid - is a network uuid that client wish to delete
+             Returns:
+                 The return None or XML respond or false
+         """
+         client = self.connect_as_admin()
+         if not client:
+             raise vimconn.vimconnConnectionException("Failed to connect vCD as admin")
+         if network_uuid is None:
+             return False
+         url_list = [self.url, '/api/admin/network/', network_uuid]
+         vm_list_rest_call = ''.join(url_list)
+         if client._session:
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                      'x-vcloud-authorization': client._session.headers['x-vcloud-authorization']}
+             response = self.perform_request(req_type='DELETE',
+                                             url=vm_list_rest_call,
+                                             headers=headers)
+             if response.status_code == 202:
+                 return True
+         return False
+     def create_network(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+                        ip_profile=None, isshared='true'):
+         """
+         Method create network in vCloud director
+         Args:
+             network_name - is network name to be created.
+             net_type - can be 'bridge','data','ptp','mgmt'.
+             ip_profile is a dict containing the IP parameters of the network
+             isshared - is a boolean
+             parent_network_uuid - is parent provider vdc network that will be used for mapping.
+             It optional attribute. by default if no parent network indicate the first available will be used.
+             Returns:
+                 The return network uuid or return None
+         """
+         new_network_name = [network_name, '-', str(uuid.uuid4())]
+         content = self.create_network_rest(network_name=''.join(new_network_name),
+                                            ip_profile=ip_profile,
+                                            net_type=net_type,
+                                            parent_network_uuid=parent_network_uuid,
+                                            isshared=isshared)
+         if content is None:
+             self.logger.debug("Failed create network {}.".format(network_name))
+             return None
+         try:
+             vm_list_xmlroot = XmlElementTree.fromstring(content)
+             vcd_uuid = vm_list_xmlroot.get('id').split(":")
+             if len(vcd_uuid) == 4:
+                 self.logger.info("Created new network name: {} uuid: {}".format(network_name, vcd_uuid[3]))
+                 return vcd_uuid[3]
+         except:
+             self.logger.debug("Failed create network {}".format(network_name))
+             return None
+     def create_network_rest(self, network_name=None, net_type='bridge', parent_network_uuid=None,
+                             ip_profile=None, isshared='true'):
+         """
+         Method create network in vCloud director
+         Args:
+             network_name - is network name to be created.
+             net_type - can be 'bridge','data','ptp','mgmt'.
+             ip_profile is a dict containing the IP parameters of the network
+             isshared - is a boolean
+             parent_network_uuid - is parent provider vdc network that will be used for mapping.
+             It optional attribute. by default if no parent network indicate the first available will be used.
+             Returns:
+                 The return network uuid or return None
+         """
+         client_as_admin = self.connect_as_admin()
+         if not client_as_admin:
+             raise vimconn.vimconnConnectionException("Failed to connect vCD.")
+         if network_name is None:
+             return None
+         url_list = [self.url, '/api/admin/vdc/', self.tenant_id]
+         vm_list_rest_call = ''.join(url_list)
+         if client_as_admin._session:
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                      'x-vcloud-authorization': client_as_admin._session.headers['x-vcloud-authorization']}
+             response = self.perform_request(req_type='GET',
+                                             url=vm_list_rest_call,
+                                             headers=headers)
+             provider_network = None
+             available_networks = None
+             add_vdc_rest_url = None
+             if response.status_code != requests.codes.ok:
+                 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+                                                                                           response.status_code))
+                 return None
+             else:
+                 try:
+                     vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+                     for child in vm_list_xmlroot:
++
+                         if child.tag.split("}")[1] == 'ProviderVdcReference':
+                             provider_network = child.attrib.get('href')
+                             # application/vnd.vmware.admin.providervdc+xml
+                         if child.tag.split("}")[1] == 'Link':
+                             if child.attrib.get('type') == 'application/vnd.vmware.vcloud.orgVdcNetwork+xml' \
+                                     and child.attrib.get('rel') == 'add':
+                                 add_vdc_rest_url = child.attrib.get('href')
+                 except:
+                     self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+                     self.logger.debug("Respond body {}".format(response.content))
+                     return None
+             # find  pvdc provided available network
+             response = self.perform_request(req_type='GET',
+                                             url=provider_network,
+                                             headers=headers)
++
+             if response.status_code != requests.codes.ok:
+                 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+                                                                                           response.status_code))
+                 return None
+             if parent_network_uuid is None:
+                 try:
+                     vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+                     for child in vm_list_xmlroot.iter():
+                         if child.tag.split("}")[1] == 'AvailableNetworks':
+                             for networks in child.iter():
+                                 # application/vnd.vmware.admin.network+xml
+                                 if networks.attrib.get('href') is not None:
+                                     available_networks = networks.attrib.get('href')
+                                     break
+                 except:
+                     return None
+             try:
+                 #Configure IP profile of the network
+                 ip_profile = ip_profile if ip_profile is not None else DEFAULT_IP_PROFILE
+                 if 'subnet_address' not in ip_profile or ip_profile['subnet_address'] is None:
+                     subnet_rand = random.randint(0, 255)
+                     ip_base = "192.168.{}.".format(subnet_rand)
+                     ip_profile['subnet_address'] = ip_base + "0/24"
+                 else:
+                     ip_base = ip_profile['subnet_address'].rsplit('.',1)[0] + '.'
+                 if 'gateway_address' not in ip_profile or ip_profile['gateway_address'] is None:
+                     ip_profile['gateway_address']=ip_base + "1"
+                 if 'dhcp_count' not in ip_profile or ip_profile['dhcp_count'] is None:
+                     ip_profile['dhcp_count']=DEFAULT_IP_PROFILE['dhcp_count']
+                 if 'dhcp_enabled' not in ip_profile or ip_profile['dhcp_enabled'] is None:
+                     ip_profile['dhcp_enabled']=DEFAULT_IP_PROFILE['dhcp_enabled']
+                 if 'dhcp_start_address' not in ip_profile or ip_profile['dhcp_start_address'] is None:
+                     ip_profile['dhcp_start_address']=ip_base + "3"
+                 if 'ip_version' not in ip_profile or ip_profile['ip_version'] is None:
+                     ip_profile['ip_version']=DEFAULT_IP_PROFILE['ip_version']
+                 if 'dns_address' not in ip_profile or ip_profile['dns_address'] is None:
+                     ip_profile['dns_address']=ip_base + "2"
+                 gateway_address=ip_profile['gateway_address']
+                 dhcp_count=int(ip_profile['dhcp_count'])
+                 subnet_address=self.convert_cidr_to_netmask(ip_profile['subnet_address'])
+                 if ip_profile['dhcp_enabled']==True:
+                     dhcp_enabled='true'
+                 else:
+                     dhcp_enabled='false'
+                 dhcp_start_address=ip_profile['dhcp_start_address']
+                 #derive dhcp_end_address from dhcp_start_address & dhcp_count
+                 end_ip_int = int(netaddr.IPAddress(dhcp_start_address))
+                 end_ip_int += dhcp_count - 1
+                 dhcp_end_address = str(netaddr.IPAddress(end_ip_int))
+                 ip_version=ip_profile['ip_version']
+                 dns_address=ip_profile['dns_address']
+             except KeyError as exp:
+                 self.logger.debug("Create Network REST: Key error {}".format(exp))
+                 raise vimconn.vimconnException("Create Network REST: Key error{}".format(exp))
+             # either use client provided UUID or search for a first available
+             #  if both are not defined we return none
+             if parent_network_uuid is not None:
+                 provider_network = None
+                 available_networks = None
+                 add_vdc_rest_url = None
+                 url_list = [self.url, '/api/admin/vdc/', self.tenant_id, '/networks']
+                 add_vdc_rest_url = ''.join(url_list)
+                 url_list = [self.url, '/api/admin/network/', parent_network_uuid]
+                 available_networks = ''.join(url_list)
+             #Creating all networks as Direct Org VDC type networks.
+             #Unused in case of Underlay (data/ptp) network interface.
+             fence_mode="isolated"
+             is_inherited='false'
+             dns_list = dns_address.split(";")
+             dns1 = dns_list[0]
+             dns2_text = ""
+             if len(dns_list) >= 2:
+                 dns2_text = "\n                                                <Dns2>{}</Dns2>\n".format(dns_list[1])
++            if net_type == "isolated":
++                fence_mode="isolated"
++                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
++                                <Description>Openmano created</Description>
++                                        <Configuration>
++                                            <IpScopes>
++                                                <IpScope>
++                                                    <IsInherited>{1:s}</IsInherited>
++                                                    <Gateway>{2:s}</Gateway>
++                                                    <Netmask>{3:s}</Netmask>
++                                                    <Dns1>{4:s}</Dns1>{5:s}
++                                                    <IsEnabled>{6:s}</IsEnabled>
++                                                    <IpRanges>
++                                                        <IpRange>
++                                                            <StartAddress>{7:s}</StartAddress>
++                                                            <EndAddress>{8:s}</EndAddress>
++                                                        </IpRange>
++                                                    </IpRanges>
++                                                </IpScope>
++                                            </IpScopes>
++                                            <FenceMode>{9:s}</FenceMode>
++                                        </Configuration>
++                                        <IsShared>{10:s}</IsShared>
++                            </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
++                                                        subnet_address, dns1, dns2_text, dhcp_enabled,
++                                                        dhcp_start_address, dhcp_end_address,
++                                                        fence_mode, isshared)
++            else:
++                fence_mode = "bridged"
++                data = """ <OrgVdcNetwork name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
++                        <Description>Openmano created</Description>
++                                <Configuration>
++                                    <IpScopes>
++                                        <IpScope>
++                                            <IsInherited>{1:s}</IsInherited>
++                                            <Gateway>{2:s}</Gateway>
++                                            <Netmask>{3:s}</Netmask>
++                                            <Dns1>{4:s}</Dns1>{5:s}
++                                            <IsEnabled>{6:s}</IsEnabled>
++                                            <IpRanges>
++                                                <IpRange>
++                                                    <StartAddress>{7:s}</StartAddress>
++                                                    <EndAddress>{8:s}</EndAddress>
++                                                </IpRange>
++                                            </IpRanges>
++                                        </IpScope>
++                                    </IpScopes>
++                                    <ParentNetwork href="{9:s}"/>
++                                    <FenceMode>{10:s}</FenceMode>
++                                </Configuration>
++                                <IsShared>{11:s}</IsShared>
++                    </OrgVdcNetwork> """.format(escape(network_name), is_inherited, gateway_address,
++                                                subnet_address, dns1, dns2_text, dhcp_enabled,
++                                                dhcp_start_address, dhcp_end_address, available_networks,
++                                                fence_mode, isshared)
+             headers['Content-Type'] = 'application/vnd.vmware.vcloud.orgVdcNetwork+xml'
+             try:
+                 response = self.perform_request(req_type='POST',
+                                            url=add_vdc_rest_url,
+                                            headers=headers,
+                                            data=data)
+                 if response.status_code != 201:
+                     self.logger.debug("Create Network POST REST API call failed. Return status code {}, Response content: {}"
+                                       .format(response.status_code,response.content))
+                 else:
+                     network_task = self.get_task_from_response(response.content)
+                     self.logger.debug("Create Network REST : Waiting for Network creation complete")
+                     time.sleep(5)
+                     result = self.client.get_task_monitor().wait_for_success(task=network_task)
+                     if result.get('status') == 'success':
+                         return response.content
+                     else:
+                         self.logger.debug("create_network_rest task failed. Network Create response : {}"
+                                           .format(response.content))
+             except Exception as exp:
+                 self.logger.debug("create_network_rest : Exception : {} ".format(exp))
+         return None
+     def convert_cidr_to_netmask(self, cidr_ip=None):
+         """
+         Method sets convert CIDR netmask address to normal IP format
+         Args:
+             cidr_ip : CIDR IP address
+             Returns:
+                 netmask : Converted netmask
+         """
+         if cidr_ip is not None:
+             if '/' in cidr_ip:
+                 network, net_bits = cidr_ip.split('/')
+                 netmask = socket.inet_ntoa(struct.pack(">I", (0xffffffff << (32 - int(net_bits))) & 0xffffffff))
+             else:
+                 netmask = cidr_ip
+             return netmask
+         return None
+     def get_provider_rest(self, vca=None):
+         """
+         Method gets provider vdc view from vcloud director
+         Args:
+             network_name - is network name to be created.
+             parent_network_uuid - is parent provider vdc network that will be used for mapping.
+             It optional attribute. by default if no parent network indicate the first available will be used.
+             Returns:
+                 The return xml content of respond or None
+         """
+         url_list = [self.url, '/api/admin']
+         if vca:
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                        'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             response = self.perform_request(req_type='GET',
+                                             url=''.join(url_list),
+                                             headers=headers)
+         if response.status_code == requests.codes.ok:
+             return response.content
+         return None
+     def create_vdc(self, vdc_name=None):
+         vdc_dict = {}
+         xml_content = self.create_vdc_from_tmpl_rest(vdc_name=vdc_name)
+         if xml_content is not None:
+             try:
+                 task_resp_xmlroot = XmlElementTree.fromstring(xml_content)
+                 for child in task_resp_xmlroot:
+                     if child.tag.split("}")[1] == 'Owner':
+                         vdc_id = child.attrib.get('href').split("/")[-1]
+                         vdc_dict[vdc_id] = task_resp_xmlroot.get('href')
+                         return vdc_dict
+             except:
+                 self.logger.debug("Respond body {}".format(xml_content))
+         return None
+     def create_vdc_from_tmpl_rest(self, vdc_name=None):
+         """
+         Method create vdc in vCloud director based on VDC template.
+         it uses pre-defined template.
+         Args:
+             vdc_name -  name of a new vdc.
+             Returns:
+                 The return xml content of respond or None
+         """
+         # pre-requesite atleast one vdc template should be available in vCD
+         self.logger.info("Creating new vdc {}".format(vdc_name))
+         vca = self.connect_as_admin()
+         if not vca:
+             raise vimconn.vimconnConnectionException("Failed to connect vCD")
+         if vdc_name is None:
+             return None
+         url_list = [self.url, '/api/vdcTemplates']
+         vm_list_rest_call = ''.join(url_list)
+         headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                     'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+         response = self.perform_request(req_type='GET',
+                                         url=vm_list_rest_call,
+                                         headers=headers)
+         # container url to a template
+         vdc_template_ref = None
+         try:
+             vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+             for child in vm_list_xmlroot:
+                 # application/vnd.vmware.admin.providervdc+xml
+                 # we need find a template from witch we instantiate VDC
+                 if child.tag.split("}")[1] == 'VdcTemplate':
+                     if child.attrib.get('type') == 'application/vnd.vmware.admin.vdcTemplate+xml':
+                         vdc_template_ref = child.attrib.get('href')
+         except:
+             self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+             self.logger.debug("Respond body {}".format(response.content))
+             return None
+         # if we didn't found required pre defined template we return None
+         if vdc_template_ref is None:
+             return None
+         try:
+             # instantiate vdc
+             url_list = [self.url, '/api/org/', self.org_uuid, '/action/instantiate']
+             vm_list_rest_call = ''.join(url_list)
+             data = """<InstantiateVdcTemplateParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5">
+                                         <Source href="{1:s}"></Source>
+                                         <Description>opnemano</Description>
+                                         </InstantiateVdcTemplateParams>""".format(vdc_name, vdc_template_ref)
+             headers['Content-Type'] = 'application/vnd.vmware.vcloud.instantiateVdcTemplateParams+xml'
+             response = self.perform_request(req_type='POST',
+                                             url=vm_list_rest_call,
+                                             headers=headers,
+                                             data=data)
+             vdc_task = self.get_task_from_response(response.content)
+             self.client.get_task_monitor().wait_for_success(task=vdc_task)
+             # if we all ok we respond with content otherwise by default None
+             if response.status_code >= 200 and response.status_code < 300:
+                 return response.content
+             return None
+         except:
+             self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+             self.logger.debug("Respond body {}".format(response.content))
+         return None
+     def create_vdc_rest(self, vdc_name=None):
+         """
+         Method create network in vCloud director
+         Args:
+             vdc_name - vdc name to be created
+             Returns:
+                 The return response
+         """
+         self.logger.info("Creating new vdc {}".format(vdc_name))
+         vca = self.connect_as_admin()
+         if not vca:
+             raise vimconn.vimconnConnectionException("Failed to connect vCD")
+         if vdc_name is None:
+             return None
+         url_list = [self.url, '/api/admin/org/', self.org_uuid]
+         vm_list_rest_call = ''.join(url_list)
+         if vca._session:
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                       'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             response = self.perform_request(req_type='GET',
+                                             url=vm_list_rest_call,
+                                             headers=headers)
+             provider_vdc_ref = None
+             add_vdc_rest_url = None
+             available_networks = None
+             if response.status_code != requests.codes.ok:
+                 self.logger.debug("REST API call {} failed. Return status code {}".format(vm_list_rest_call,
+                                                                                           response.status_code))
+                 return None
+             else:
+                 try:
+                     vm_list_xmlroot = XmlElementTree.fromstring(response.content)
+                     for child in vm_list_xmlroot:
+                         # application/vnd.vmware.admin.providervdc+xml
+                         if child.tag.split("}")[1] == 'Link':
+                             if child.attrib.get('type') == 'application/vnd.vmware.admin.createVdcParams+xml' \
+                                     and child.attrib.get('rel') == 'add':
+                                 add_vdc_rest_url = child.attrib.get('href')
+                 except:
+                     self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+                     self.logger.debug("Respond body {}".format(response.content))
+                     return None
+                 response = self.get_provider_rest(vca=vca)
+                 try:
+                     vm_list_xmlroot = XmlElementTree.fromstring(response)
+                     for child in vm_list_xmlroot:
+                         if child.tag.split("}")[1] == 'ProviderVdcReferences':
+                             for sub_child in child:
+                                 provider_vdc_ref = sub_child.attrib.get('href')
+                 except:
+                     self.logger.debug("Failed parse respond for rest api call {}".format(vm_list_rest_call))
+                     self.logger.debug("Respond body {}".format(response))
+                     return None
+                 if add_vdc_rest_url is not None and provider_vdc_ref is not None:
+                     data = """ <CreateVdcParams name="{0:s}" xmlns="http://www.vmware.com/vcloud/v1.5"><Description>{1:s}</Description>
+                             <AllocationModel>ReservationPool</AllocationModel>
+                             <ComputeCapacity><Cpu><Units>MHz</Units><Allocated>2048</Allocated><Limit>2048</Limit></Cpu>
+                             <Memory><Units>MB</Units><Allocated>2048</Allocated><Limit>2048</Limit></Memory>
+                             </ComputeCapacity><NicQuota>0</NicQuota><NetworkQuota>100</NetworkQuota>
+                             <VdcStorageProfile><Enabled>true</Enabled><Units>MB</Units><Limit>20480</Limit><Default>true</Default></VdcStorageProfile>
+                             <ProviderVdcReference
+                             name="Main Provider"
+                             href="{2:s}" />
+                     <UsesFastProvisioning>true</UsesFastProvisioning></CreateVdcParams>""".format(escape(vdc_name),
+                                                                                                   escape(vdc_name),
+                                                                                                   provider_vdc_ref)
+                     headers['Content-Type'] = 'application/vnd.vmware.admin.createVdcParams+xml'
+                     response = self.perform_request(req_type='POST',
+                                                     url=add_vdc_rest_url,
+                                                     headers=headers,
+                                                     data=data)
+                     # if we all ok we respond with content otherwise by default None
+                     if response.status_code == 201:
+                         return response.content
+         return None
+     def get_vapp_details_rest(self, vapp_uuid=None, need_admin_access=False):
+         """
+         Method retrieve vapp detail from vCloud director
+         Args:
+             vapp_uuid - is vapp identifier.
+             Returns:
+                 The return network uuid or return None
+         """
+         parsed_respond = {}
+         vca = None
+         if need_admin_access:
+             vca = self.connect_as_admin()
+         else:
+             vca = self.client
+         if not vca:
+             raise vimconn.vimconnConnectionException("Failed to connect vCD")
+         if vapp_uuid is None:
+             return None
+         url_list = [self.url, '/api/vApp/vapp-', vapp_uuid]
+         get_vapp_restcall = ''.join(url_list)
+         if vca._session:
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                        'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+             response = self.perform_request(req_type='GET',
+                                             url=get_vapp_restcall,
+                                             headers=headers)
+             if response.status_code == 403:
+                 if need_admin_access == False:
+                     response = self.retry_rest('GET', get_vapp_restcall)
+             if response.status_code != requests.codes.ok:
+                 self.logger.debug("REST API call {} failed. Return status code {}".format(get_vapp_restcall,
+                                                                                           response.status_code))
+                 return parsed_respond
+             try:
+                 xmlroot_respond = XmlElementTree.fromstring(response.content)
+                 parsed_respond['ovfDescriptorUploaded'] = xmlroot_respond.attrib['ovfDescriptorUploaded']
+                 namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                               'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                               'vmw': 'http://www.vmware.com/schema/ovf',
+                               'vm': 'http://www.vmware.com/vcloud/v1.5',
+                               'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                               "vmext":"http://www.vmware.com/vcloud/extension/v1.5",
+                               "xmlns":"http://www.vmware.com/vcloud/v1.5"
+                              }
+                 created_section = xmlroot_respond.find('vm:DateCreated', namespaces)
+                 if created_section is not None:
+                     parsed_respond['created'] = created_section.text
+                 network_section = xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig', namespaces)
+                 if network_section is not None and 'networkName' in network_section.attrib:
+                     parsed_respond['networkname'] = network_section.attrib['networkName']
+                 ipscopes_section = \
+                     xmlroot_respond.find('vm:NetworkConfigSection/vm:NetworkConfig/vm:Configuration/vm:IpScopes',
+                                          namespaces)
+                 if ipscopes_section is not None:
+                     for ipscope in ipscopes_section:
+                         for scope in ipscope:
+                             tag_key = scope.tag.split("}")[1]
+                             if tag_key == 'IpRanges':
+                                 ip_ranges = scope.getchildren()
+                                 for ipblock in ip_ranges:
+                                     for block in ipblock:
+                                         parsed_respond[block.tag.split("}")[1]] = block.text
+                             else:
+                                 parsed_respond[tag_key] = scope.text
+                 # parse children section for other attrib
+                 children_section = xmlroot_respond.find('vm:Children/', namespaces)
+                 if children_section is not None:
+                     parsed_respond['name'] = children_section.attrib['name']
+                     parsed_respond['nestedHypervisorEnabled'] = children_section.attrib['nestedHypervisorEnabled'] \
+                      if  "nestedHypervisorEnabled" in children_section.attrib else None
+                     parsed_respond['deployed'] = children_section.attrib['deployed']
+                     parsed_respond['status'] = children_section.attrib['status']
+                     parsed_respond['vmuuid'] = children_section.attrib['id'].split(":")[-1]
+                     network_adapter = children_section.find('vm:NetworkConnectionSection', namespaces)
+                     nic_list = []
+                     for adapters in network_adapter:
+                         adapter_key = adapters.tag.split("}")[1]
+                         if adapter_key == 'PrimaryNetworkConnectionIndex':
+                             parsed_respond['primarynetwork'] = adapters.text
+                         if adapter_key == 'NetworkConnection':
+                             vnic = {}
+                             if 'network' in adapters.attrib:
+                                 vnic['network'] = adapters.attrib['network']
+                             for adapter in adapters:
+                                 setting_key = adapter.tag.split("}")[1]
+                                 vnic[setting_key] = adapter.text
+                             nic_list.append(vnic)
+                     for link in children_section:
+                         if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+                             if link.attrib['rel'] == 'screen:acquireTicket':
+                                 parsed_respond['acquireTicket'] = link.attrib
+                             if link.attrib['rel'] == 'screen:acquireMksTicket':
+                                 parsed_respond['acquireMksTicket'] = link.attrib
+                     parsed_respond['interfaces'] = nic_list
+                     vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                     if vCloud_extension_section is not None:
+                         vm_vcenter_info = {}
+                         vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+                         vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                         if vmext is not None:
+                             vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                         parsed_respond["vm_vcenter_info"]= vm_vcenter_info
+                     virtual_hardware_section = children_section.find('ovf:VirtualHardwareSection', namespaces)
+                     vm_virtual_hardware_info = {}
+                     if virtual_hardware_section is not None:
+                         for item in virtual_hardware_section.iterfind('ovf:Item',namespaces):
+                             if item.find("rasd:Description",namespaces).text == "Hard disk":
+                                 disk_size = item.find("rasd:HostResource" ,namespaces
+                                                 ).attrib["{"+namespaces['vm']+"}capacity"]
+                                 vm_virtual_hardware_info["disk_size"]= disk_size
+                                 break
+                         for link in virtual_hardware_section:
+                             if link.tag.split("}")[1] == 'Link' and 'rel' in link.attrib:
+                                 if link.attrib['rel'] == 'edit' and link.attrib['href'].endswith("/disks"):
+                                     vm_virtual_hardware_info["disk_edit_href"] = link.attrib['href']
+                                     break
+                     parsed_respond["vm_virtual_hardware"]= vm_virtual_hardware_info
+             except Exception as exp :
+                 self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+         return parsed_respond
+     def acquire_console(self, vm_uuid=None):
+         if vm_uuid is None:
+             return None
+         if self.client._session:
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                        'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             vm_dict = self.get_vapp_details_rest(vapp_uuid=vm_uuid)
+             console_dict = vm_dict['acquireTicket']
+             console_rest_call = console_dict['href']
+             response = self.perform_request(req_type='POST',
+                                             url=console_rest_call,
+                                             headers=headers)
+             if response.status_code == 403:
+                 response = self.retry_rest('POST', console_rest_call)
+             if response.status_code == requests.codes.ok:
+                 return response.content
+         return None
+     def modify_vm_disk(self, vapp_uuid, flavor_disk):
+         """
+         Method retrieve vm disk details
+         Args:
+             vapp_uuid - is vapp identifier.
+             flavor_disk - disk size as specified in VNFD (flavor)
+             Returns:
+                 The return network uuid or return None
+         """
+         status = None
+         try:
+             #Flavor disk is in GB convert it into MB
+             flavor_disk = int(flavor_disk) * 1024
+             vm_details = self.get_vapp_details_rest(vapp_uuid)
+             if vm_details:
+                 vm_name = vm_details["name"]
+                 self.logger.info("VM: {} flavor_disk :{}".format(vm_name , flavor_disk))
+             if vm_details and "vm_virtual_hardware" in vm_details:
+                 vm_disk = int(vm_details["vm_virtual_hardware"]["disk_size"])
+                 disk_edit_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+                 self.logger.info("VM: {} VM_disk :{}".format(vm_name , vm_disk))
+                 if flavor_disk > vm_disk:
+                     status = self.modify_vm_disk_rest(disk_edit_href ,flavor_disk)
+                     self.logger.info("Modify disk of VM {} from {} to {} MB".format(vm_name,
+                                                          vm_disk,  flavor_disk ))
+                 else:
+                     status = True
+                     self.logger.info("No need to modify disk of VM {}".format(vm_name))
+             return status
+         except Exception as exp:
+             self.logger.info("Error occurred while modifing disk size {}".format(exp))
+     def modify_vm_disk_rest(self, disk_href , disk_size):
+         """
+         Method retrieve modify vm disk size
+         Args:
+             disk_href - vCD API URL to GET and PUT disk data
+             disk_size - disk size as specified in VNFD (flavor)
+             Returns:
+                 The return network uuid or return None
+         """
+         if disk_href is None or disk_size is None:
+             return None
+         if self.client._session:
+                 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                 response = self.perform_request(req_type='GET',
+                                                 url=disk_href,
+                                                 headers=headers)
+         if response.status_code == 403:
+             response = self.retry_rest('GET', disk_href)
+         if response.status_code != requests.codes.ok:
+             self.logger.debug("GET REST API call {} failed. Return status code {}".format(disk_href,
+                                                                             response.status_code))
+             return None
+         try:
+             lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+             namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
+             namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+             for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                 if item.find("rasd:Description",namespaces).text == "Hard disk":
+                     disk_item = item.find("rasd:HostResource" ,namespaces )
+                     if disk_item is not None:
+                         disk_item.attrib["{"+namespaces['xmlns']+"}capacity"] = str(disk_size)
+                         break
+             data = lxmlElementTree.tostring(lxmlroot_respond, encoding='utf8', method='xml',
+                                              xml_declaration=True)
+             #Send PUT request to modify disk size
+             headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+             response = self.perform_request(req_type='PUT',
+                                                 url=disk_href,
+                                                 headers=headers,
+                                                 data=data)
+             if response.status_code == 403:
+                 add_headers = {'Content-Type': headers['Content-Type']}
+                 response = self.retry_rest('PUT', disk_href, add_headers, data)
+             if response.status_code != 202:
+                 self.logger.debug("PUT REST API call {} failed. Return status code {}".format(disk_href,
+                                                                             response.status_code))
+             else:
+                 modify_disk_task = self.get_task_from_response(response.content)
+                 result = self.client.get_task_monitor().wait_for_success(task=modify_disk_task)
+                 if result.get('status') == 'success':
+                     return True
+                 else:
+                     return False
+             return None
+         except Exception as exp :
+                 self.logger.info("Error occurred calling rest api for modifing disk size {}".format(exp))
+                 return None
+     def add_pci_devices(self, vapp_uuid , pci_devices , vmname_andid):
+         """
+             Method to attach pci devices to VM
+              Args:
+                 vapp_uuid - uuid of vApp/VM
+                 pci_devices - pci devices infromation as specified in VNFD (flavor)
+             Returns:
+                 The status of add pci device task , vm object and
+                 vcenter_conect object
+         """
+         vm_obj = None
+         self.logger.info("Add pci devices {} into vApp {}".format(pci_devices , vapp_uuid))
+         vcenter_conect, content = self.get_vcenter_content()
+         vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+         if vm_moref_id:
+             try:
+                 no_of_pci_devices = len(pci_devices)
+                 if no_of_pci_devices > 0:
+                     #Get VM and its host
+                     host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                     self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+                     if host_obj and vm_obj:
+                         #get PCI devies from host on which vapp is currently installed
+                         avilable_pci_devices = self.get_pci_devices(host_obj, no_of_pci_devices)
+                         if avilable_pci_devices is None:
+                             #find other hosts with active pci devices
+                             new_host_obj , avilable_pci_devices = self.get_host_and_PCIdevices(
+                                                                 content,
+                                                                 no_of_pci_devices
+                                                                 )
+                             if new_host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+                                 #Migrate vm to the host where PCI devices are availble
+                                 self.logger.info("Relocate VM {} on new host {}".format(vm_obj, new_host_obj))
+                                 task = self.relocate_vm(new_host_obj, vm_obj)
+                                 if task is not None:
+                                     result = self.wait_for_vcenter_task(task, vcenter_conect)
+                                     self.logger.info("Migrate VM status: {}".format(result))
+                                     host_obj = new_host_obj
+                                 else:
+                                     self.logger.info("Fail to migrate VM : {}".format(result))
+                                     raise vimconn.vimconnNotFoundException(
+                                     "Fail to migrate VM : {} to host {}".format(
+                                                     vmname_andid,
+                                                     new_host_obj)
+                                         )
+                         if host_obj is not None and avilable_pci_devices is not None and len(avilable_pci_devices)> 0:
+                             #Add PCI devices one by one
+                             for pci_device in avilable_pci_devices:
+                                 task = self.add_pci_to_vm(host_obj, vm_obj, pci_device)
+                                 if task:
+                                     status= self.wait_for_vcenter_task(task, vcenter_conect)
+                                     if status:
+                                         self.logger.info("Added PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+                                 else:
+                                     self.logger.error("Fail to add PCI device {} to VM {}".format(pci_device,str(vm_obj)))
+                             return True, vm_obj, vcenter_conect
+                         else:
+                             self.logger.error("Currently there is no host with"\
+                                               " {} number of avaialble PCI devices required for VM {}".format(
+                                                                             no_of_pci_devices,
+                                                                             vmname_andid)
+                                               )
+                             raise vimconn.vimconnNotFoundException(
+                                     "Currently there is no host with {} "\
+                                     "number of avaialble PCI devices required for VM {}".format(
+                                                                             no_of_pci_devices,
+                                                                             vmname_andid))
+                 else:
+                     self.logger.debug("No infromation about PCI devices {} ",pci_devices)
+             except vmodl.MethodFault as error:
+                 self.logger.error("Error occurred while adding PCI devices {} ",error)
+         return None, vm_obj, vcenter_conect
+     def get_vm_obj(self, content, mob_id):
+         """
+             Method to get the vsphere VM object associated with a given morf ID
+              Args:
+                 vapp_uuid - uuid of vApp/VM
+                 content - vCenter content object
+                 mob_id - mob_id of VM
+             Returns:
+                     VM and host object
+         """
+         vm_obj = None
+         host_obj = None
+         try :
+             container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                         [vim.VirtualMachine], True
+                                                         )
+             for vm in container.view:
+                 mobID = vm._GetMoId()
+                 if mobID == mob_id:
+                     vm_obj = vm
+                     host_obj = vm_obj.runtime.host
+                     break
+         except Exception as exp:
+             self.logger.error("Error occurred while finding VM object : {}".format(exp))
+         return host_obj, vm_obj
+     def get_pci_devices(self, host, need_devices):
+         """
+             Method to get the details of pci devices on given host
+              Args:
+                 host - vSphere host object
+                 need_devices - number of pci devices needed on host
+              Returns:
+                 array of pci devices
+         """
+         all_devices = []
+         all_device_ids = []
+         used_devices_ids = []
+         try:
+             if host:
+                 pciPassthruInfo = host.config.pciPassthruInfo
+                 pciDevies = host.hardware.pciDevice
+             for pci_status in pciPassthruInfo:
+                 if pci_status.passthruActive:
+                     for device in pciDevies:
+                         if device.id == pci_status.id:
+                             all_device_ids.append(device.id)
+                             all_devices.append(device)
+             #check if devices are in use
+             avalible_devices = all_devices
+             for vm in host.vm:
+                 if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
+                     vm_devices = vm.config.hardware.device
+                     for device in vm_devices:
+                         if type(device) is vim.vm.device.VirtualPCIPassthrough:
+                             if device.backing.id in all_device_ids:
+                                 for use_device in avalible_devices:
+                                     if use_device.id == device.backing.id:
+                                         avalible_devices.remove(use_device)
+                                 used_devices_ids.append(device.backing.id)
+                                 self.logger.debug("Device {} from devices {}"\
+                                         "is in use".format(device.backing.id,
+                                                            device)
+                                             )
+             if len(avalible_devices) < need_devices:
+                 self.logger.debug("Host {} don't have {} number of active devices".format(host,
+                                                                             need_devices))
+                 self.logger.debug("found only {} devives {}".format(len(avalible_devices),
+                                                                     avalible_devices))
+                 return None
+             else:
+                 required_devices = avalible_devices[:need_devices]
+                 self.logger.info("Found {} PCI devivces on host {} but required only {}".format(
+                                                             len(avalible_devices),
+                                                             host,
+                                                             need_devices))
+                 self.logger.info("Retruning {} devices as {}".format(need_devices,
+                                                                 required_devices ))
+                 return required_devices
+         except Exception as exp:
+             self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host))
+         return None
+     def get_host_and_PCIdevices(self, content, need_devices):
+         """
+          Method to get the details of pci devices infromation on all hosts
+             Args:
+                 content - vSphere host object
+                 need_devices - number of pci devices needed on host
+             Returns:
+                  array of pci devices and host object
+         """
+         host_obj = None
+         pci_device_objs = None
+         try:
+             if content:
+                 container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                             [vim.HostSystem], True)
+                 for host in container.view:
+                     devices = self.get_pci_devices(host, need_devices)
+                     if devices:
+                         host_obj = host
+                         pci_device_objs = devices
+                         break
+         except Exception as exp:
+             self.logger.error("Error {} occurred while finding pci devices on host: {}".format(exp, host_obj))
+         return host_obj,pci_device_objs
+     def relocate_vm(self, dest_host, vm) :
+         """
+          Method to get the relocate VM to new host
+             Args:
+                 dest_host - vSphere host object
+                 vm - vSphere VM object
+             Returns:
+                 task object
+         """
+         task = None
+         try:
+             relocate_spec = vim.vm.RelocateSpec(host=dest_host)
+             task = vm.Relocate(relocate_spec)
+             self.logger.info("Migrating {} to destination host {}".format(vm, dest_host))
+         except Exception as exp:
+             self.logger.error("Error occurred while relocate VM {} to new host {}: {}".format(
+                                                                             dest_host, vm, exp))
+         return task
+     def wait_for_vcenter_task(self, task, actionName='job', hideResult=False):
+         """
+         Waits and provides updates on a vSphere task
+         """
+         while task.info.state == vim.TaskInfo.State.running:
+             time.sleep(2)
+         if task.info.state == vim.TaskInfo.State.success:
+             if task.info.result is not None and not hideResult:
+                 self.logger.info('{} completed successfully, result: {}'.format(
+                                                             actionName,
+                                                             task.info.result))
+             else:
+                 self.logger.info('Task {} completed successfully.'.format(actionName))
+         else:
+             self.logger.error('{} did not complete successfully: {} '.format(
+                                                             actionName,
+                                                             task.info.error)
+                               )
+         return task.info.result
+     def add_pci_to_vm(self,host_object, vm_object, host_pci_dev):
+         """
+          Method to add pci device in given VM
+             Args:
+                 host_object - vSphere host object
+                 vm_object - vSphere VM object
+                 host_pci_dev -  host_pci_dev must be one of the devices from the
+                                 host_object.hardware.pciDevice list
+                                 which is configured as a PCI passthrough device
+             Returns:
+                 task object
+         """
+         task = None
+         if vm_object and host_object and host_pci_dev:
+             try :
+                 #Add PCI device to VM
+                 pci_passthroughs = vm_object.environmentBrowser.QueryConfigTarget(host=None).pciPassthrough
+                 systemid_by_pciid = {item.pciDevice.id: item.systemId for item in pci_passthroughs}
+                 if host_pci_dev.id not in systemid_by_pciid:
+                     self.logger.error("Device {} is not a passthrough device ".format(host_pci_dev))
+                     return None
+                 deviceId = hex(host_pci_dev.deviceId % 2**16).lstrip('0x')
+                 backing = vim.VirtualPCIPassthroughDeviceBackingInfo(deviceId=deviceId,
+                                             id=host_pci_dev.id,
+                                             systemId=systemid_by_pciid[host_pci_dev.id],
+                                             vendorId=host_pci_dev.vendorId,
+                                             deviceName=host_pci_dev.deviceName)
+                 hba_object = vim.VirtualPCIPassthrough(key=-100, backing=backing)
+                 new_device_config = vim.VirtualDeviceConfigSpec(device=hba_object)
+                 new_device_config.operation = "add"
+                 vmConfigSpec = vim.vm.ConfigSpec()
+                 vmConfigSpec.deviceChange = [new_device_config]
+                 task = vm_object.ReconfigVM_Task(spec=vmConfigSpec)
+                 self.logger.info("Adding PCI device {} into VM {} from host {} ".format(
+                                                             host_pci_dev, vm_object, host_object)
+                                 )
+             except Exception as exp:
+                 self.logger.error("Error occurred while adding pci devive {} to VM {}: {}".format(
+                                                                             host_pci_dev,
+                                                                             vm_object,
+                                                                              exp))
+         return task
+     def get_vm_vcenter_info(self):
+         """
+         Method to get details of vCenter and vm
+             Args:
+                 vapp_uuid - uuid of vApp or VM
+             Returns:
+                 Moref Id of VM and deails of vCenter
+         """
+         vm_vcenter_info = {}
+         if self.vcenter_ip is not None:
+             vm_vcenter_info["vm_vcenter_ip"] = self.vcenter_ip
+         else:
+             raise vimconn.vimconnException(message="vCenter IP is not provided."\
+                                            " Please provide vCenter IP while attaching datacenter to tenant in --config")
+         if self.vcenter_port is not None:
+             vm_vcenter_info["vm_vcenter_port"] = self.vcenter_port
+         else:
+             raise vimconn.vimconnException(message="vCenter port is not provided."\
+                                            " Please provide vCenter port while attaching datacenter to tenant in --config")
+         if self.vcenter_user is not None:
+             vm_vcenter_info["vm_vcenter_user"] = self.vcenter_user
+         else:
+             raise vimconn.vimconnException(message="vCenter user is not provided."\
+                                            " Please provide vCenter user while attaching datacenter to tenant in --config")
+         if self.vcenter_password is not None:
+             vm_vcenter_info["vm_vcenter_password"] = self.vcenter_password
+         else:
+             raise vimconn.vimconnException(message="vCenter user password is not provided."\
+                                            " Please provide vCenter user password while attaching datacenter to tenant in --config")
+         return vm_vcenter_info
+     def get_vm_pci_details(self, vmuuid):
+         """
+             Method to get VM PCI device details from vCenter
+             Args:
+                 vm_obj - vSphere VM object
+             Returns:
+                 dict of PCI devives attached to VM
+         """
+         vm_pci_devices_info = {}
+         try:
+             vcenter_conect, content = self.get_vcenter_content()
+             vm_moref_id = self.get_vm_moref_id(vmuuid)
+             if vm_moref_id:
+                 #Get VM and its host
+                 if content:
+                     host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                     if host_obj and vm_obj:
+                         vm_pci_devices_info["host_name"]= host_obj.name
+                         vm_pci_devices_info["host_ip"]= host_obj.config.network.vnic[0].spec.ip.ipAddress
+                         for device in vm_obj.config.hardware.device:
+                             if type(device) == vim.vm.device.VirtualPCIPassthrough:
+                                 device_details={'devide_id':device.backing.id,
+                                                 'pciSlotNumber':device.slotInfo.pciSlotNumber,
+                                             }
+                                 vm_pci_devices_info[device.deviceInfo.label] = device_details
+                 else:
+                     self.logger.error("Can not connect to vCenter while getting "\
+                                           "PCI devices infromationn")
+                 return vm_pci_devices_info
+         except Exception as exp:
+             self.logger.error("Error occurred while getting VM infromationn"\
+                              " for VM : {}".format(exp))
+             raise vimconn.vimconnException(message=exp)
+     def reserve_memory_for_all_vms(self, vapp, memory_mb):
+         """
+             Method to reserve memory for all VMs
+             Args :
+                 vapp - VApp
+                 memory_mb - Memory in MB
+             Returns:
+                 None
+         """
+         self.logger.info("Reserve memory for all VMs")
+         for vms in vapp.get_all_vms():
+             vm_id = vms.get('id').split(':')[-1]
+             url_rest_call = "{}/api/vApp/vm-{}/virtualHardwareSection/memory".format(self.url, vm_id)
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                        'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItem+xml'
+             response = self.perform_request(req_type='GET',
+                                             url=url_rest_call,
+                                             headers=headers)
+             if response.status_code == 403:
+                 response = self.retry_rest('GET', url_rest_call)
+             if response.status_code != 200:
+                 self.logger.error("REST call {} failed reason : {}"\
+                                   "status code : {}".format(url_rest_call,
+                                                             response.content,
+                                                             response.status_code))
+                 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to get "\
+                                                "memory")
+             bytexml = bytes(bytearray(response.content, encoding='utf-8'))
+             contentelem = lxmlElementTree.XML(bytexml)
+             namespaces = {prefix:uri for prefix,uri in contentelem.nsmap.items() if prefix}
+             namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+             # Find the reservation element in the response
+             memelem_list = contentelem.findall(".//rasd:Reservation", namespaces)
+             for memelem in memelem_list:
+                 memelem.text = str(memory_mb)
+             newdata = lxmlElementTree.tostring(contentelem, pretty_print=True)
+             response = self.perform_request(req_type='PUT',
+                                             url=url_rest_call,
+                                             headers=headers,
+                                             data=newdata)
+             if response.status_code == 403:
+                 add_headers = {'Content-Type': headers['Content-Type']}
+                 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+             if response.status_code != 202:
+                 self.logger.error("REST call {} failed reason : {}"\
+                                   "status code : {} ".format(url_rest_call,
+                                   response.content,
+                                   response.status_code))
+                 raise vimconn.vimconnException("reserve_memory_for_all_vms : Failed to update "\
+                                                "virtual hardware memory section")
+             else:
+                 mem_task = self.get_task_from_response(response.content)
+                 result = self.client.get_task_monitor().wait_for_success(task=mem_task)
+                 if result.get('status') == 'success':
+                     self.logger.info("reserve_memory_for_all_vms(): VM {} succeeded "\
+                                       .format(vm_id))
+                 else:
+                     self.logger.error("reserve_memory_for_all_vms(): VM {} failed "\
+                                       .format(vm_id))
+     def connect_vapp_to_org_vdc_network(self, vapp_id, net_name):
+         """
+             Configure VApp network config with org vdc network
+             Args :
+                 vapp - VApp
+             Returns:
+                 None
+         """
+         self.logger.info("Connecting vapp {} to org vdc network {}".
+                          format(vapp_id, net_name))
+         url_rest_call = "{}/api/vApp/vapp-{}/networkConfigSection/".format(self.url, vapp_id)
+         headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                    'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+         response = self.perform_request(req_type='GET',
+                                         url=url_rest_call,
+                                         headers=headers)
+         if response.status_code == 403:
+             response = self.retry_rest('GET', url_rest_call)
+         if response.status_code != 200:
+             self.logger.error("REST call {} failed reason : {}"\
+                               "status code : {}".format(url_rest_call,
+                                                         response.content,
+                                                         response.status_code))
+             raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to get "\
+                                            "network config section")
+         data = response.content
+         headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConfigSection+xml'
+         net_id = self.get_network_id_by_name(net_name)
+         if not net_id:
+             raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to find "\
+                                            "existing network")
+         bytexml = bytes(bytearray(data, encoding='utf-8'))
+         newelem = lxmlElementTree.XML(bytexml)
+         namespaces = {prefix: uri for prefix, uri in newelem.nsmap.items() if prefix}
+         namespaces["xmlns"] = "http://www.vmware.com/vcloud/v1.5"
+         nwcfglist = newelem.findall(".//xmlns:NetworkConfig", namespaces)
+         # VCD 9.7 returns an incorrect parentnetwork element. Fix it before PUT operation
+         parentnetworklist = newelem.findall(".//xmlns:ParentNetwork", namespaces)
+         if parentnetworklist:
+             for pn in parentnetworklist:
+                 if "href" not in pn.keys():
+                     id_val = pn.get("id")
+                     href_val = "{}/api/network/{}".format(self.url, id_val)
+                     pn.set("href", href_val)
+         newstr = """<NetworkConfig networkName="{}">
+                   <Configuration>
+                        <ParentNetwork href="{}/api/network/{}"/>
+                        <FenceMode>bridged</FenceMode>
+                   </Configuration>
+               </NetworkConfig>
+            """.format(net_name, self.url, net_id)
+         newcfgelem = lxmlElementTree.fromstring(newstr)
+         if nwcfglist:
+             nwcfglist[0].addnext(newcfgelem)
+         newdata = lxmlElementTree.tostring(newelem, pretty_print=True)
+         response = self.perform_request(req_type='PUT',
+                                         url=url_rest_call,
+                                         headers=headers,
+                                         data=newdata)
+         if response.status_code == 403:
+             add_headers = {'Content-Type': headers['Content-Type']}
+             response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+         if response.status_code != 202:
+             self.logger.error("REST call {} failed reason : {}"\
+                               "status code : {} ".format(url_rest_call,
+                               response.content,
+                               response.status_code))
+             raise vimconn.vimconnException("connect_vapp_to_org_vdc_network : Failed to update "\
+                                            "network config section")
+         else:
+             vapp_task = self.get_task_from_response(response.content)
+             result = self.client.get_task_monitor().wait_for_success(task=vapp_task)
+             if result.get('status') == 'success':
+                 self.logger.info("connect_vapp_to_org_vdc_network(): Vapp {} connected to "\
+                                  "network {}".format(vapp_id, net_name))
+             else:
+                 self.logger.error("connect_vapp_to_org_vdc_network(): Vapp {} failed to "\
+                                   "connect to network {}".format(vapp_id, net_name))
+     def remove_primary_network_adapter_from_all_vms(self, vapp):
+         """
+             Method to remove network adapter type to vm
+             Args :
+                 vapp - VApp
+             Returns:
+                 None
+         """
+         self.logger.info("Removing network adapter from all VMs")
+         for vms in vapp.get_all_vms():
+             vm_id = vms.get('id').split(':')[-1]
+             url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                        'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             response = self.perform_request(req_type='GET',
+                                             url=url_rest_call,
+                                             headers=headers)
+             if response.status_code == 403:
+                 response = self.retry_rest('GET', url_rest_call)
+             if response.status_code != 200:
+                 self.logger.error("REST call {} failed reason : {}"\
+                                   "status code : {}".format(url_rest_call,
+                                                             response.content,
+                                                             response.status_code))
+                 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to get "\
+                                                "network connection section")
+             data = response.content
+             data = data.split('<Link rel="edit"')[0]
+             headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+             newdata = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+                       <NetworkConnectionSection xmlns="http://www.vmware.com/vcloud/v1.5"
+                               xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
+                               xmlns:vssd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData"
+                               xmlns:common="http://schemas.dmtf.org/wbem/wscim/1/common"
+                               xmlns:rasd="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData"
+                               xmlns:vmw="http://www.vmware.com/schema/ovf"
+                               xmlns:ovfenv="http://schemas.dmtf.org/ovf/environment/1"
+                               xmlns:vmext="http://www.vmware.com/vcloud/extension/v1.5"
+                               xmlns:ns9="http://www.vmware.com/vcloud/versions"
+                               href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml" ovf:required="false">
+                               <ovf:Info>Specifies the available VM network connections</ovf:Info>
+                              <PrimaryNetworkConnectionIndex>0</PrimaryNetworkConnectionIndex>
+                              <Link rel="edit" href="{url}" type="application/vnd.vmware.vcloud.networkConnectionSection+xml"/>
+                       </NetworkConnectionSection>""".format(url=url_rest_call)
+             response = self.perform_request(req_type='PUT',
+                                             url=url_rest_call,
+                                             headers=headers,
+                                             data=newdata)
+             if response.status_code == 403:
+                 add_headers = {'Content-Type': headers['Content-Type']}
+                 response = self.retry_rest('PUT', url_rest_call, add_headers, newdata)
+             if response.status_code != 202:
+                 self.logger.error("REST call {} failed reason : {}"\
+                                   "status code : {} ".format(url_rest_call,
+                                   response.content,
+                                   response.status_code))
+                 raise vimconn.vimconnException("remove_primary_network_adapter : Failed to update "\
+                                                "network connection section")
+             else:
+                 nic_task = self.get_task_from_response(response.content)
+                 result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                 if result.get('status') == 'success':
+                     self.logger.info("remove_primary_network_adapter(): VM {} conneced to "\
+                                       "default NIC type".format(vm_id))
+                 else:
+                     self.logger.error("remove_primary_network_adapter(): VM {} failed to "\
+                                       "connect NIC type".format(vm_id))
+     def add_network_adapter_to_vms(self, vapp, network_name, primary_nic_index, nicIndex, net, nic_type=None):
+         """
+             Method to add network adapter type to vm
+             Args :
+                 network_name - name of network
+                 primary_nic_index - int value for primary nic index
+                 nicIndex - int value for nic index
+                 nic_type - specify model name to which add to vm
+             Returns:
+                 None
+         """
+         self.logger.info("Add network adapter to VM: network_name {} nicIndex {} nic_type {}".\
+                          format(network_name, nicIndex, nic_type))
+         try:
+             ip_address = None
+             floating_ip = False
+             mac_address = None
+             if 'floating_ip' in net: floating_ip = net['floating_ip']
+             # Stub for ip_address feature
+             if 'ip_address' in net: ip_address = net['ip_address']
+             if 'mac_address' in net: mac_address = net['mac_address']
+             if floating_ip:
+                 allocation_mode = "POOL"
+             elif ip_address:
+                 allocation_mode = "MANUAL"
+             else:
+                 allocation_mode = "DHCP"
+             if not nic_type:
+                 for vms in vapp.get_all_vms():
+                     vm_id = vms.get('id').split(':')[-1]
+                     url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+                     headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                     response = self.perform_request(req_type='GET',
+                                                     url=url_rest_call,
+                                                     headers=headers)
+                     if response.status_code == 403:
+                         response = self.retry_rest('GET', url_rest_call)
+                     if response.status_code != 200:
+                         self.logger.error("REST call {} failed reason : {}"\
+                                              "status code : {}".format(url_rest_call,
+                                                                     response.content,
+                                                                response.status_code))
+                         raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                          "network connection section")
+                     data = response.content
+                     data = data.split('<Link rel="edit"')[0]
+                     if '<PrimaryNetworkConnectionIndex>' not in data:
+                         self.logger.debug("add_network_adapter PrimaryNIC not in data")
+                         item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                 <NetworkConnection network="{}">
+                                 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                 <IsConnected>true</IsConnected>
+                                 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                                          allocation_mode)
+                         # Stub for ip_address feature
+                         if ip_address:
+                             ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                             item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+                         if mac_address:
+                             mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                             item =  item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+                         data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
+                     else:
+                         self.logger.debug("add_network_adapter PrimaryNIC in data")
+                         new_item = """<NetworkConnection network="{}">
+                                     <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                     <IsConnected>true</IsConnected>
+                                     <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                     </NetworkConnection>""".format(network_name, nicIndex,
+                                                                           allocation_mode)
+                         # Stub for ip_address feature
+                         if ip_address:
+                             ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                             new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+                         if mac_address:
+                             mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                             new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+                         data = data + new_item + '</NetworkConnectionSection>'
+                     headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+                     response = self.perform_request(req_type='PUT',
+                                                     url=url_rest_call,
+                                                     headers=headers,
+                                                     data=data)
+                     if response.status_code == 403:
+                         add_headers = {'Content-Type': headers['Content-Type']}
+                         response = self.retry_rest('PUT', url_rest_call, add_headers, data)
+                     if response.status_code != 202:
+                         self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {} ".format(url_rest_call,
+                                                                     response.content,
+                                                                response.status_code))
+                         raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                             "network connection section")
+                     else:
+                         nic_task = self.get_task_from_response(response.content)
+                         result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                         if result.get('status') == 'success':
+                             self.logger.info("add_network_adapter_to_vms(): VM {} conneced to "\
+                                                                "default NIC type".format(vm_id))
+                         else:
+                             self.logger.error("add_network_adapter_to_vms(): VM {} failed to "\
+                                                               "connect NIC type".format(vm_id))
+             else:
+                 for vms in vapp.get_all_vms():
+                     vm_id = vms.get('id').split(':')[-1]
+                     url_rest_call = "{}/api/vApp/vm-{}/networkConnectionSection/".format(self.url, vm_id)
+                     headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+                     response = self.perform_request(req_type='GET',
+                                                     url=url_rest_call,
+                                                     headers=headers)
+                     if response.status_code == 403:
+                         response = self.retry_rest('GET', url_rest_call)
+                     if response.status_code != 200:
+                         self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {}".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                         raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to get "\
+                                                                         "network connection section")
+                     data = response.content
+                     data = data.split('<Link rel="edit"')[0]
+                     vcd_netadapter_type = nic_type
+                     if nic_type in ['SR-IOV', 'VF']:
+                         vcd_netadapter_type = "SRIOVETHERNETCARD"
+                     if '<PrimaryNetworkConnectionIndex>' not in data:
+                         self.logger.debug("add_network_adapter PrimaryNIC not in data nic_type {}".format(nic_type))
+                         item = """<PrimaryNetworkConnectionIndex>{}</PrimaryNetworkConnectionIndex>
+                                 <NetworkConnection network="{}">
+                                 <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                 <IsConnected>true</IsConnected>
+                                 <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                 <NetworkAdapterType>{}</NetworkAdapterType>
+                                 </NetworkConnection>""".format(primary_nic_index, network_name, nicIndex,
+                                                                                allocation_mode, vcd_netadapter_type)
+                         # Stub for ip_address feature
+                         if ip_address:
+                             ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                             item =  item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+                         if mac_address:
+                             mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                             item = item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+                         data = data.replace('</ovf:Info>\n','</ovf:Info>\n{}\n</NetworkConnectionSection>'.format(item))
+                     else:
+                         self.logger.debug("add_network_adapter PrimaryNIC in data nic_type {}".format(nic_type))
+                         new_item = """<NetworkConnection network="{}">
+                                     <NetworkConnectionIndex>{}</NetworkConnectionIndex>
+                                     <IsConnected>true</IsConnected>
+                                     <IpAddressAllocationMode>{}</IpAddressAllocationMode>
+                                     <NetworkAdapterType>{}</NetworkAdapterType>
+                                     </NetworkConnection>""".format(network_name, nicIndex,
+                                                                 allocation_mode, vcd_netadapter_type)
+                         # Stub for ip_address feature
+                         if ip_address:
+                             ip_tag = '<IpAddress>{}</IpAddress>'.format(ip_address)
+                             new_item =  new_item.replace('</NetworkConnectionIndex>\n','</NetworkConnectionIndex>\n{}\n'.format(ip_tag))
+                         if mac_address:
+                             mac_tag = '<MACAddress>{}</MACAddress>'.format(mac_address)
+                             new_item =  new_item.replace('</IsConnected>\n','</IsConnected>\n{}\n'.format(mac_tag))
+                         data = data + new_item + '</NetworkConnectionSection>'
+                     headers['Content-Type'] = 'application/vnd.vmware.vcloud.networkConnectionSection+xml'
+                     response = self.perform_request(req_type='PUT',
+                                                     url=url_rest_call,
+                                                     headers=headers,
+                                                     data=data)
+                     if response.status_code == 403:
+                         add_headers = {'Content-Type': headers['Content-Type']}
+                         response = self.retry_rest('PUT', url_rest_call, add_headers, data)
+                     if response.status_code != 202:
+                         self.logger.error("REST call {} failed reason : {}"\
+                                             "status code : {}".format(url_rest_call,
+                                                                    response.content,
+                                                               response.status_code))
+                         raise vimconn.vimconnException("add_network_adapter_to_vms : Failed to update "\
+                                                                            "network connection section")
+                     else:
+                         nic_task = self.get_task_from_response(response.content)
+                         result = self.client.get_task_monitor().wait_for_success(task=nic_task)
+                         if result.get('status') == 'success':
+                             self.logger.info("add_network_adapter_to_vms(): VM {} "\
+                                                "conneced to NIC type {}".format(vm_id, nic_type))
+                         else:
+                             self.logger.error("add_network_adapter_to_vms(): VM {} "\
+                                                "failed to connect NIC type {}".format(vm_id, nic_type))
+         except Exception as exp:
+             self.logger.error("add_network_adapter_to_vms() : exception occurred "\
+                                                "while adding Network adapter")
+             raise vimconn.vimconnException(message=exp)
+     def set_numa_affinity(self, vmuuid, paired_threads_id):
+         """
+             Method to assign numa affinity in vm configuration parammeters
+             Args :
+                 vmuuid - vm uuid
+                 paired_threads_id - one or more virtual processor
+                                     numbers
+             Returns:
+                 return if True
+         """
+         try:
+             vcenter_conect, content = self.get_vcenter_content()
+             vm_moref_id = self.get_vm_moref_id(vmuuid)
+             host_obj, vm_obj = self.get_vm_obj(content ,vm_moref_id)
+             if vm_obj:
+                 config_spec = vim.vm.ConfigSpec()
+                 config_spec.extraConfig = []
+                 opt = vim.option.OptionValue()
+                 opt.key = 'numa.nodeAffinity'
+                 opt.value = str(paired_threads_id)
+                 config_spec.extraConfig.append(opt)
+                 task = vm_obj.ReconfigVM_Task(config_spec)
+                 if task:
+                     result = self.wait_for_vcenter_task(task, vcenter_conect)
+                     extra_config = vm_obj.config.extraConfig
+                     flag = False
+                     for opts in extra_config:
+                         if 'numa.nodeAffinity' in opts.key:
+                             flag = True
+                             self.logger.info("set_numa_affinity: Sucessfully assign numa affinity "\
+                                                      "value {} for vm {}".format(opt.value, vm_obj))
+                         if flag:
+                             return
+             else:
+                 self.logger.error("set_numa_affinity: Failed to assign numa affinity")
+         except Exception as exp:
+             self.logger.error("set_numa_affinity : exception occurred while setting numa affinity "\
+                                                        "for VM {} : {}".format(vm_obj, vm_moref_id))
+             raise vimconn.vimconnException("set_numa_affinity : Error {} failed to assign numa "\
+                                                                            "affinity".format(exp))
+     def cloud_init(self, vapp, cloud_config):
+         """
+         Method to inject ssh-key
+         vapp - vapp object
+         cloud_config a dictionary with:
+                 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                 'users': (optional) list of users to be inserted, each item is a dict with:
+                     'name': (mandatory) user name,
+                     'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                     or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                 'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                     'dest': (mandatory) string with the destination absolute path
+                     'encoding': (optional, by default text). Can be one of:
+                         'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                     'content' (mandatory): string with the content of the file
+                     'permissions': (optional) string with file permissions, typically octal notation '0644'
+                     'owner': (optional) file owner, string with the format 'owner:group'
+                 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk
+         """
+         try:
+             if not isinstance(cloud_config, dict):
+                 raise Exception("cloud_init : parameter cloud_config is not a dictionary")
+             else:
+                 key_pairs = []
+                 userdata = []
+                 if "key-pairs" in cloud_config:
+                     key_pairs = cloud_config["key-pairs"]
+                 if "users" in cloud_config:
+                     userdata = cloud_config["users"]
+                 self.logger.debug("cloud_init : Guest os customization started..")
+                 customize_script = self.format_script(key_pairs=key_pairs, users_list=userdata)
+                 customize_script = customize_script.replace("&","&amp;")
+                 self.guest_customization(vapp, customize_script)
+         except Exception as exp:
+             self.logger.error("cloud_init : exception occurred while injecting "\
+                                                                        "ssh-key")
+             raise vimconn.vimconnException("cloud_init : Error {} failed to inject "\
+                                                                "ssh-key".format(exp))
+     def format_script(self, key_pairs=[], users_list=[]):
+         bash_script = """#!/bin/sh
+         echo performing customization tasks with param $1 at `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+         if [ "$1" = "precustomization" ];then
+             echo performing precustomization tasks   on `date "+DATE: %Y-%m-%d - TIME: %H:%M:%S"` >> /root/customization.log
+         """
+         keys = "\n".join(key_pairs)
+         if keys:
+             keys_data = """
+             if [ ! -d /root/.ssh ];then
+                 mkdir /root/.ssh
+                 chown root:root /root/.ssh
+                 chmod 700 /root/.ssh
+                 touch /root/.ssh/authorized_keys
+                 chown root:root /root/.ssh/authorized_keys
+                 chmod 600 /root/.ssh/authorized_keys
+                 # make centos with selinux happy
+                 which restorecon && restorecon -Rv /root/.ssh
+             else
+                 touch /root/.ssh/authorized_keys
+                 chown root:root /root/.ssh/authorized_keys
+                 chmod 600 /root/.ssh/authorized_keys
+             fi
+             echo '{key}' >> /root/.ssh/authorized_keys
+             """.format(key=keys)
+             bash_script+= keys_data
+         for user in users_list:
+             if 'name' in user: user_name = user['name']
+             if 'key-pairs' in user:
+                 user_keys = "\n".join(user['key-pairs'])
+             else:
+                 user_keys = None
+             add_user_name = """
+                 useradd -d /home/{user_name} -m -g users -s /bin/bash {user_name}
+                 """.format(user_name=user_name)
+             bash_script+= add_user_name
+             if user_keys:
+                 user_keys_data = """
+                 mkdir /home/{user_name}/.ssh
+                 chown {user_name}:{user_name} /home/{user_name}/.ssh
+                 chmod 700 /home/{user_name}/.ssh
+                 touch /home/{user_name}/.ssh/authorized_keys
+                 chown {user_name}:{user_name} /home/{user_name}/.ssh/authorized_keys
+                 chmod 600 /home/{user_name}/.ssh/authorized_keys
+                 # make centos with selinux happy
+                 which restorecon && restorecon -Rv /home/{user_name}/.ssh
+                 echo '{user_key}' >> /home/{user_name}/.ssh/authorized_keys
+                 """.format(user_name=user_name,user_key=user_keys)
+                 bash_script+= user_keys_data
+         return bash_script+"\n\tfi"
+     def guest_customization(self, vapp, customize_script):
+         """
+         Method to customize guest os
+         vapp - Vapp object
+         customize_script - Customize script to be run at first boot of VM.
+         """
+         for vm in vapp.get_all_vms():
+             vm_id = vm.get('id').split(':')[-1]
+             vm_name = vm.get('name')
+             vm_name = vm_name.replace('_','-')
+             vm_customization_url = "{}/api/vApp/vm-{}/guestCustomizationSection/".format(self.url, vm_id)
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             headers['Content-Type'] = "application/vnd.vmware.vcloud.guestCustomizationSection+xml"
+             data = """<GuestCustomizationSection
+                            xmlns="http://www.vmware.com/vcloud/v1.5"
+                            xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
+                            ovf:required="false" href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml">
+                            <ovf:Info>Specifies Guest OS Customization Settings</ovf:Info>
+                            <Enabled>true</Enabled>
+                            <ChangeSid>false</ChangeSid>
+                            <VirtualMachineId>{}</VirtualMachineId>
+                            <JoinDomainEnabled>false</JoinDomainEnabled>
+                            <UseOrgSettings>false</UseOrgSettings>
+                            <AdminPasswordEnabled>false</AdminPasswordEnabled>
+                            <AdminPasswordAuto>true</AdminPasswordAuto>
+                            <AdminAutoLogonEnabled>false</AdminAutoLogonEnabled>
+                            <AdminAutoLogonCount>0</AdminAutoLogonCount>
+                            <ResetPasswordRequired>false</ResetPasswordRequired>
+                            <CustomizationScript>{}</CustomizationScript>
+                            <ComputerName>{}</ComputerName>
+                            <Link href="{}" type="application/vnd.vmware.vcloud.guestCustomizationSection+xml" rel="edit"/>
+                        </GuestCustomizationSection>
+                    """.format(vm_customization_url,
+                                              vm_id,
+                                   customize_script,
+                                            vm_name,
+                               vm_customization_url)
+             response = self.perform_request(req_type='PUT',
+                                              url=vm_customization_url,
+                                              headers=headers,
+                                              data=data)
+             if response.status_code == 202:
+                 guest_task = self.get_task_from_response(response.content)
+                 self.client.get_task_monitor().wait_for_success(task=guest_task)
+                 self.logger.info("guest_customization : customized guest os task "\
+                                              "completed for VM {}".format(vm_name))
+             else:
+                 self.logger.error("guest_customization : task for customized guest os"\
+                                                     "failed for VM {}".format(vm_name))
+                 raise vimconn.vimconnException("guest_customization : failed to perform"\
+                                        "guest os customization on VM {}".format(vm_name))
+     def add_new_disk(self, vapp_uuid, disk_size):
+         """
+             Method to create an empty vm disk
+             Args:
+                 vapp_uuid - is vapp identifier.
+                 disk_size - size of disk to be created in GB
+             Returns:
+                 None
+         """
+         status = False
+         vm_details = None
+         try:
+             #Disk size in GB, convert it into MB
+             if disk_size is not None:
+                 disk_size_mb = int(disk_size) * 1024
+                 vm_details = self.get_vapp_details_rest(vapp_uuid)
+             if vm_details and "vm_virtual_hardware" in vm_details:
+                 self.logger.info("Adding disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+                 disk_href = vm_details["vm_virtual_hardware"]["disk_edit_href"]
+                 status = self.add_new_disk_rest(disk_href, disk_size_mb)
+         except Exception as exp:
+             msg = "Error occurred while creating new disk {}.".format(exp)
+             self.rollback_newvm(vapp_uuid, msg)
+         if status:
+             self.logger.info("Added new disk to VM: {} disk size:{}GB".format(vm_details["name"], disk_size))
+         else:
+             #If failed to add disk, delete VM
+             msg = "add_new_disk: Failed to add new disk to {}".format(vm_details["name"])
+             self.rollback_newvm(vapp_uuid, msg)
+     def add_new_disk_rest(self, disk_href, disk_size_mb):
+         """
+         Retrives vApp Disks section & add new empty disk
+         Args:
+             disk_href: Disk section href to addd disk
+             disk_size_mb: Disk size in MB
+             Returns: Status of add new disk task
+         """
+         status = False
+         if self.client._session:
+             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+             response = self.perform_request(req_type='GET',
+                                             url=disk_href,
+                                             headers=headers)
+         if response.status_code == 403:
+             response = self.retry_rest('GET', disk_href)
+         if response.status_code != requests.codes.ok:
+             self.logger.error("add_new_disk_rest: GET REST API call {} failed. Return status code {}"
+                               .format(disk_href, response.status_code))
+             return status
+         try:
+             #Find but type & max of instance IDs assigned to disks
+             lxmlroot_respond = lxmlElementTree.fromstring(response.content)
+             namespaces = {prefix: uri for prefix, uri in lxmlroot_respond.nsmap.items() if prefix}
+             namespaces["xmlns"]= "http://www.vmware.com/vcloud/v1.5"
+             instance_id = 0
+             for item in lxmlroot_respond.iterfind('xmlns:Item',namespaces):
+                 if item.find("rasd:Description",namespaces).text == "Hard disk":
+                     inst_id = int(item.find("rasd:InstanceID" ,namespaces).text)
+                     if inst_id > instance_id:
+                         instance_id = inst_id
+                         disk_item = item.find("rasd:HostResource" ,namespaces)
+                         bus_subtype = disk_item.attrib["{"+namespaces['xmlns']+"}busSubType"]
+                         bus_type = disk_item.attrib["{"+namespaces['xmlns']+"}busType"]
+             instance_id = instance_id + 1
+             new_item =   """<Item>
+                                 <rasd:Description>Hard disk</rasd:Description>
+                                 <rasd:ElementName>New disk</rasd:ElementName>
+                                 <rasd:HostResource
+                                     xmlns:vcloud="http://www.vmware.com/vcloud/v1.5"
+                                     vcloud:capacity="{}"
+                                     vcloud:busSubType="{}"
+                                     vcloud:busType="{}"></rasd:HostResource>
+                                 <rasd:InstanceID>{}</rasd:InstanceID>
+                                 <rasd:ResourceType>17</rasd:ResourceType>
+                             </Item>""".format(disk_size_mb, bus_subtype, bus_type, instance_id)
+             new_data = response.content
+             #Add new item at the bottom
+             new_data = new_data.replace('</Item>\n</RasdItemsList>', '</Item>\n{}\n</RasdItemsList>'.format(new_item))
+             # Send PUT request to modify virtual hardware section with new disk
+             headers['Content-Type'] = 'application/vnd.vmware.vcloud.rasdItemsList+xml; charset=ISO-8859-1'
+             response = self.perform_request(req_type='PUT',
+                                             url=disk_href,
+                                             data=new_data,
+                                             headers=headers)
+             if response.status_code == 403:
+                 add_headers = {'Content-Type': headers['Content-Type']}
+                 response = self.retry_rest('PUT', disk_href, add_headers, new_data)
+             if response.status_code != 202:
+                 self.logger.error("PUT REST API call {} failed. Return status code {}. Response Content:{}"
+                                   .format(disk_href, response.status_code, response.content))
+             else:
+                 add_disk_task = self.get_task_from_response(response.content)
+                 result = self.client.get_task_monitor().wait_for_success(task=add_disk_task)
+                 if result.get('status') == 'success':
+                     status = True
+                 else:
+                     self.logger.error("Add new disk REST task failed to add {} MB disk".format(disk_size_mb))
+         except Exception as exp:
+             self.logger.error("Error occurred calling rest api for creating new disk {}".format(exp))
+         return status
+     def add_existing_disk(self, catalogs=None, image_id=None, size=None, template_name=None, vapp_uuid=None):
+         """
+             Method to add existing disk to vm
+             Args :
+                 catalogs - List of VDC catalogs
+                 image_id - Catalog ID
+                 template_name - Name of template in catalog
+                 vapp_uuid - UUID of vApp
+             Returns:
+                 None
+         """
+         disk_info = None
+         vcenter_conect, content = self.get_vcenter_content()
+         #find moref-id of vm in image
+         catalog_vm_info = self.get_vapp_template_details(catalogs=catalogs,
+                                                          image_id=image_id,
+                                                         )
+         if catalog_vm_info and "vm_vcenter_info" in catalog_vm_info:
+             if "vm_moref_id" in catalog_vm_info["vm_vcenter_info"]:
+                 catalog_vm_moref_id = catalog_vm_info["vm_vcenter_info"].get("vm_moref_id", None)
+                 if catalog_vm_moref_id:
+                     self.logger.info("Moref_id of VM in catalog : {}" .format(catalog_vm_moref_id))
+                     host, catalog_vm_obj = self.get_vm_obj(content, catalog_vm_moref_id)
+                     if catalog_vm_obj:
+                         #find existing disk
+                         disk_info = self.find_disk(catalog_vm_obj)
+                     else:
+                         exp_msg = "No VM with image id {} found".format(image_id)
+                         self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+         else:
+             exp_msg = "No Image found with image ID {} ".format(image_id)
+             self.rollback_newvm(vapp_uuid, exp_msg, exp_type="NotFound")
+         if disk_info:
+             self.logger.info("Existing disk_info : {}".format(disk_info))
+             #get VM
+             vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+             host, vm_obj = self.get_vm_obj(content, vm_moref_id)
+             if vm_obj:
+                 status = self.add_disk(vcenter_conect=vcenter_conect,
+                                        vm=vm_obj,
+                                        disk_info=disk_info,
+                                        size=size,
+                                        vapp_uuid=vapp_uuid
+                                        )
+             if status:
+                 self.logger.info("Disk from image id {} added to {}".format(image_id,
+                                                                             vm_obj.config.name)
+                                  )
+         else:
+             msg = "No disk found with image id {} to add in VM {}".format(
+                                                             image_id,
+                                                             vm_obj.config.name)
+             self.rollback_newvm(vapp_uuid, msg, exp_type="NotFound")
+     def find_disk(self, vm_obj):
+         """
+          Method to find details of existing disk in VM
+             Args :
+                 vm_obj - vCenter object of VM
+                 image_id - Catalog ID
+             Returns:
+                 disk_info : dict of disk details
+         """
+         disk_info = {}
+         if vm_obj:
+             try:
+                 devices = vm_obj.config.hardware.device
+                 for device in devices:
+                     if type(device) is vim.vm.device.VirtualDisk:
+                         if isinstance(device.backing,vim.vm.device.VirtualDisk.FlatVer2BackingInfo) and hasattr(device.backing, 'fileName'):
+                             disk_info["full_path"] = device.backing.fileName
+                             disk_info["datastore"] = device.backing.datastore
+                             disk_info["capacityKB"] = device.capacityInKB
+                             break
+             except Exception as exp:
+                 self.logger.error("find_disk() : exception occurred while "\
+                                   "getting existing disk details :{}".format(exp))
+         return disk_info
+     def add_disk(self, vcenter_conect=None, vm=None, size=None, vapp_uuid=None, disk_info={}):
+         """
+          Method to add existing disk in VM
+             Args :
+                 vcenter_conect - vCenter content object
+                 vm - vCenter vm object
+                 disk_info : dict of disk details
+             Returns:
+                 status : status of add disk task
+         """
+         datastore = disk_info["datastore"] if "datastore" in disk_info else None
+         fullpath = disk_info["full_path"] if "full_path" in disk_info else None
+         capacityKB = disk_info["capacityKB"] if "capacityKB" in disk_info else None
+         if size is not None:
+             #Convert size from GB to KB
+             sizeKB = int(size) * 1024 * 1024
+             #compare size of existing disk and user given size.Assign whicherver is greater
+             self.logger.info("Add Existing disk : sizeKB {} , capacityKB {}".format(
+                                                                     sizeKB, capacityKB))
+             if sizeKB > capacityKB:
+                 capacityKB = sizeKB
+         if datastore and fullpath and capacityKB:
+             try:
+                 spec = vim.vm.ConfigSpec()
+                 # get all disks on a VM, set unit_number to the next available
+                 unit_number = 0
+                 for dev in vm.config.hardware.device:
+                     if hasattr(dev.backing, 'fileName'):
+                         unit_number = int(dev.unitNumber) + 1
+                         # unit_number 7 reserved for scsi controller
+                         if unit_number == 7:
+                             unit_number += 1
+                     if isinstance(dev, vim.vm.device.VirtualDisk):
+                         #vim.vm.device.VirtualSCSIController
+                         controller_key = dev.controllerKey
+                 self.logger.info("Add Existing disk : unit number {} , controller key {}".format(
+                                                                     unit_number, controller_key))
+                 # add disk here
+                 dev_changes = []
+                 disk_spec = vim.vm.device.VirtualDeviceSpec()
+                 disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+                 disk_spec.device = vim.vm.device.VirtualDisk()
+                 disk_spec.device.backing = \
+                     vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
+                 disk_spec.device.backing.thinProvisioned = True
+                 disk_spec.device.backing.diskMode = 'persistent'
+                 disk_spec.device.backing.datastore  = datastore
+                 disk_spec.device.backing.fileName  = fullpath
+                 disk_spec.device.unitNumber = unit_number
+                 disk_spec.device.capacityInKB = capacityKB
+                 disk_spec.device.controllerKey = controller_key
+                 dev_changes.append(disk_spec)
+                 spec.deviceChange = dev_changes
+                 task = vm.ReconfigVM_Task(spec=spec)
+                 status = self.wait_for_vcenter_task(task, vcenter_conect)
+                 return status
+             except Exception as exp:
+                 exp_msg = "add_disk() : exception {} occurred while adding disk "\
+                           "{} to vm {}".format(exp,
+                                                fullpath,
+                                                vm.config.name)
+                 self.rollback_newvm(vapp_uuid, exp_msg)
+         else:
+             msg = "add_disk() : Can not add disk to VM with disk info {} ".format(disk_info)
+             self.rollback_newvm(vapp_uuid, msg)
+     def get_vcenter_content(self):
+         """
+          Get the vsphere content object
+         """
+         try:
+             vm_vcenter_info = self.get_vm_vcenter_info()
+         except Exception as exp:
+             self.logger.error("Error occurred while getting vCenter infromationn"\
+                              " for VM : {}".format(exp))
+             raise vimconn.vimconnException(message=exp)
+         context = None
+         if hasattr(ssl, '_create_unverified_context'):
+             context = ssl._create_unverified_context()
+         vcenter_conect = SmartConnect(
+                     host=vm_vcenter_info["vm_vcenter_ip"],
+                     user=vm_vcenter_info["vm_vcenter_user"],
+                     pwd=vm_vcenter_info["vm_vcenter_password"],
+                     port=int(vm_vcenter_info["vm_vcenter_port"]),
+                     sslContext=context
+                 )
+         atexit.register(Disconnect, vcenter_conect)
+         content = vcenter_conect.RetrieveContent()
+         return vcenter_conect, content
+     def get_vm_moref_id(self, vapp_uuid):
+         """
+         Get the moref_id of given VM
+         """
+         try:
+             if vapp_uuid:
+                 vm_details = self.get_vapp_details_rest(vapp_uuid, need_admin_access=True)
+                 if vm_details and "vm_vcenter_info" in vm_details:
+                     vm_moref_id = vm_details["vm_vcenter_info"].get("vm_moref_id", None)
+             return vm_moref_id
+         except Exception as exp:
+             self.logger.error("Error occurred while getting VM moref ID "\
+                              " for VM : {}".format(exp))
+             return None
+     def get_vapp_template_details(self, catalogs=None, image_id=None , template_name=None):
+         """
+             Method to get vApp template details
+                 Args :
+                     catalogs - list of VDC catalogs
+                     image_id - Catalog ID to find
+                     template_name : template name in catalog
+                 Returns:
+                     parsed_respond : dict of vApp tempalte details
+         """
+         parsed_response = {}
+         vca = self.connect_as_admin()
+         if not vca:
+             raise vimconn.vimconnConnectionException("Failed to connect vCD")
+         try:
+             org, vdc = self.get_vdc_details()
+             catalog = self.get_catalog_obj(image_id, catalogs)
+             if catalog:
+                 items = org.get_catalog_item(catalog.get('name'), catalog.get('name'))
+                 catalog_items = [items.attrib]
+                 if len(catalog_items) == 1:
+                     headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                     response = self.perform_request(req_type='GET',
+                                                     url=catalog_items[0].get('href'),
+                                                     headers=headers)
+                     catalogItem = XmlElementTree.fromstring(response.content)
+                     entity = [child for child in catalogItem if child.get("type") == "application/vnd.vmware.vcloud.vAppTemplate+xml"][0]
+                     vapp_tempalte_href = entity.get("href")
+                     #get vapp details and parse moref id
+                     namespaces = {"vssd":"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" ,
+                                   'ovf': 'http://schemas.dmtf.org/ovf/envelope/1',
+                                   'vmw': 'http://www.vmware.com/schema/ovf',
+                                   'vm': 'http://www.vmware.com/vcloud/v1.5',
+                                   'rasd':"http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData",
+                                   'vmext':"http://www.vmware.com/vcloud/extension/v1.5",
+                                   'xmlns':"http://www.vmware.com/vcloud/v1.5"
+                                 }
+                     if vca._session:
+                         response = self.perform_request(req_type='GET',
+                                                     url=vapp_tempalte_href,
+                                                     headers=headers)
+                         if response.status_code != requests.codes.ok:
+                             self.logger.debug("REST API call {} failed. Return status code {}".format(
+                                                 vapp_tempalte_href, response.status_code))
+                         else:
+                             xmlroot_respond = XmlElementTree.fromstring(response.content)
+                             children_section = xmlroot_respond.find('vm:Children/', namespaces)
+                             if children_section is not None:
+                                 vCloud_extension_section = children_section.find('xmlns:VCloudExtension', namespaces)
+                             if vCloud_extension_section is not None:
+                                 vm_vcenter_info = {}
+                                 vim_info = vCloud_extension_section.find('vmext:VmVimInfo', namespaces)
+                                 vmext = vim_info.find('vmext:VmVimObjectRef', namespaces)
+                                 if vmext is not None:
+                                     vm_vcenter_info["vm_moref_id"] = vmext.find('vmext:MoRef', namespaces).text
+                                 parsed_response["vm_vcenter_info"]= vm_vcenter_info
+         except Exception as exp :
+             self.logger.info("Error occurred calling rest api for getting vApp details {}".format(exp))
+         return parsed_response
+     def rollback_newvm(self, vapp_uuid, msg , exp_type="Genric"):
+         """
+             Method to delete vApp
+                 Args :
+                     vapp_uuid - vApp UUID
+                     msg - Error message to be logged
+                     exp_type : Exception type
+                 Returns:
+                     None
+         """
+         if vapp_uuid:
+             status = self.delete_vminstance(vapp_uuid)
+         else:
+             msg = "No vApp ID"
+         self.logger.error(msg)
+         if exp_type == "Genric":
+             raise vimconn.vimconnException(msg)
+         elif exp_type == "NotFound":
+             raise vimconn.vimconnNotFoundException(message=msg)
+     def add_sriov(self, vapp_uuid, sriov_nets, vmname_andid):
+         """
+             Method to attach SRIOV adapters to VM
+              Args:
+                 vapp_uuid - uuid of vApp/VM
+                 sriov_nets - SRIOV devices infromation as specified in VNFD (flavor)
+                 vmname_andid - vmname
+             Returns:
+                 The status of add SRIOV adapter task , vm object and
+                 vcenter_conect object
+         """
+         vm_obj = None
+         vcenter_conect, content = self.get_vcenter_content()
+         vm_moref_id = self.get_vm_moref_id(vapp_uuid)
+         if vm_moref_id:
+             try:
+                 no_of_sriov_devices = len(sriov_nets)
+                 if no_of_sriov_devices > 0:
+                     #Get VM and its host
+                     host_obj, vm_obj = self.get_vm_obj(content, vm_moref_id)
+                     self.logger.info("VM {} is currently on host {}".format(vm_obj, host_obj))
+                     if host_obj and vm_obj:
+                         #get SRIOV devies from host on which vapp is currently installed
+                         avilable_sriov_devices = self.get_sriov_devices(host_obj,
+                                                                 no_of_sriov_devices,
+                                                                 )
+                         if len(avilable_sriov_devices) == 0:
+                             #find other hosts with active pci devices
+                             new_host_obj , avilable_sriov_devices = self.get_host_and_sriov_devices(
+                                                                 content,
+                                                                 no_of_sriov_devices,
+                                                                 )
+                             if new_host_obj is not None and len(avilable_sriov_devices)> 0:
+                                 #Migrate vm to the host where SRIOV devices are available
+                                 self.logger.info("Relocate VM {} on new host {}".format(vm_obj,
+                                                                                     new_host_obj))
+                                 task = self.relocate_vm(new_host_obj, vm_obj)
+                                 if task is not None:
+                                     result = self.wait_for_vcenter_task(task, vcenter_conect)
+                                     self.logger.info("Migrate VM status: {}".format(result))
+                                     host_obj = new_host_obj
+                                 else:
+                                     self.logger.info("Fail to migrate VM : {}".format(result))
+                                     raise vimconn.vimconnNotFoundException(
+                                     "Fail to migrate VM : {} to host {}".format(
+                                                     vmname_andid,
+                                                     new_host_obj)
+                                         )
+                         if host_obj is not None and avilable_sriov_devices is not None and len(avilable_sriov_devices)> 0:
+                             #Add SRIOV devices one by one
+                             for sriov_net in sriov_nets:
+                                 network_name = sriov_net.get('net_id')
+                                 dvs_portgr_name = self.create_dvPort_group(network_name)
+                                 if sriov_net.get('type') == "VF" or sriov_net.get('type') == "SR-IOV":
+                                     #add vlan ID ,Modify portgroup for vlan ID
+                                     self.configure_vlanID(content, vcenter_conect, network_name)
+                                 task = self.add_sriov_to_vm(content,
+                                                             vm_obj,
+                                                             host_obj,
+                                                             network_name,
+                                                             avilable_sriov_devices[0]
+                                                             )
+                                 if task:
+                                     status= self.wait_for_vcenter_task(task, vcenter_conect)
+                                     if status:
+                                         self.logger.info("Added SRIOV {} to VM {}".format(
+                                                                         no_of_sriov_devices,
+                                                                         str(vm_obj)))
+                                 else:
+                                     self.logger.error("Fail to add SRIOV {} to VM {}".format(
+                                                                         no_of_sriov_devices,
+                                                                         str(vm_obj)))
+                                     raise vimconn.vimconnUnexpectedResponse(
+                                     "Fail to add SRIOV adapter in VM ".format(str(vm_obj))
+                                         )
+                             return True, vm_obj, vcenter_conect
+                         else:
+                             self.logger.error("Currently there is no host with"\
+                                               " {} number of avaialble SRIOV "\
+                                               "VFs required for VM {}".format(
+                                                                 no_of_sriov_devices,
+                                                                 vmname_andid)
+                                               )
+                             raise vimconn.vimconnNotFoundException(
+                                     "Currently there is no host with {} "\
+                                     "number of avaialble SRIOV devices required for VM {}".format(
+                                                                             no_of_sriov_devices,
+                                                                             vmname_andid))
+                 else:
+                     self.logger.debug("No infromation about SRIOV devices {} ",sriov_nets)
+             except vmodl.MethodFault as error:
+                 self.logger.error("Error occurred while adding SRIOV {} ",error)
+         return None, vm_obj, vcenter_conect
+     def get_sriov_devices(self,host, no_of_vfs):
+         """
+             Method to get the details of SRIOV devices on given host
+              Args:
+                 host - vSphere host object
+                 no_of_vfs - number of VFs needed on host
+              Returns:
+                 array of SRIOV devices
+         """
+         sriovInfo=[]
+         if host:
+             for device in host.config.pciPassthruInfo:
+                 if isinstance(device,vim.host.SriovInfo) and device.sriovActive:
+                     if device.numVirtualFunction >= no_of_vfs:
+                         sriovInfo.append(device)
+                         break
+         return sriovInfo
+     def get_host_and_sriov_devices(self, content, no_of_vfs):
+         """
+          Method to get the details of SRIOV devices infromation on all hosts
+             Args:
+                 content - vSphere host object
+                 no_of_vfs - number of pci VFs needed on host
+             Returns:
+                  array of SRIOV devices and host object
+         """
+         host_obj = None
+         sriov_device_objs = None
+         try:
+             if content:
+                 container = content.viewManager.CreateContainerView(content.rootFolder,
+                                                             [vim.HostSystem], True)
+                 for host in container.view:
+                     devices = self.get_sriov_devices(host, no_of_vfs)
+                     if devices:
+                         host_obj = host
+                         sriov_device_objs = devices
+                         break
+         except Exception as exp:
+             self.logger.error("Error {} occurred while finding SRIOV devices on host: {}".format(exp, host_obj))
+         return host_obj,sriov_device_objs
+     def add_sriov_to_vm(self,content, vm_obj, host_obj, network_name, sriov_device):
+         """
+          Method to add SRIOV adapter to vm
+             Args:
+                 host_obj - vSphere host object
+                 vm_obj - vSphere vm object
+                 content - vCenter content object
+                 network_name - name of distributed virtaul portgroup
+                 sriov_device - SRIOV device info
+             Returns:
+                  task object
+         """
+         devices = []
+         vnic_label = "sriov nic"
+         try:
+             dvs_portgr = self.get_dvport_group(network_name)
+             network_name = dvs_portgr.name
+             nic = vim.vm.device.VirtualDeviceSpec()
+             # VM device
+             nic.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
+             nic.device = vim.vm.device.VirtualSriovEthernetCard()
+             nic.device.addressType = 'assigned'
+             #nic.device.key = 13016
+             nic.device.deviceInfo = vim.Description()
+             nic.device.deviceInfo.label = vnic_label
+             nic.device.deviceInfo.summary = network_name
+             nic.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
+             nic.device.backing.network = self.get_obj(content, [vim.Network], network_name)
+             nic.device.backing.deviceName = network_name
+             nic.device.backing.useAutoDetect = False
+             nic.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
+             nic.device.connectable.startConnected = True
+             nic.device.connectable.allowGuestControl = True
+             nic.device.sriovBacking = vim.vm.device.VirtualSriovEthernetCard.SriovBackingInfo()
+             nic.device.sriovBacking.physicalFunctionBacking = vim.vm.device.VirtualPCIPassthrough.DeviceBackingInfo()
+             nic.device.sriovBacking.physicalFunctionBacking.id = sriov_device.id
+             devices.append(nic)
+             vmconf = vim.vm.ConfigSpec(deviceChange=devices)
+             task = vm_obj.ReconfigVM_Task(vmconf)
+             return task
+         except Exception as exp:
+             self.logger.error("Error {} occurred while adding SRIOV adapter in VM: {}".format(exp, vm_obj))
+             return None
+     def create_dvPort_group(self, network_name):
+         """
+          Method to create disributed virtual portgroup
+             Args:
+                 network_name - name of network/portgroup
+             Returns:
+                 portgroup key
+         """
+         try:
+             new_network_name = [network_name, '-', str(uuid.uuid4())]
+             network_name=''.join(new_network_name)
+             vcenter_conect, content = self.get_vcenter_content()
+             dv_switch = self.get_obj(content, [vim.DistributedVirtualSwitch], self.dvs_name)
+             if dv_switch:
+                 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                 dv_pg_spec.name = network_name
+                 dv_pg_spec.type = vim.dvs.DistributedVirtualPortgroup.PortgroupType.earlyBinding
+                 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                 dv_pg_spec.defaultPortConfig.securityPolicy = vim.dvs.VmwareDistributedVirtualSwitch.SecurityPolicy()
+                 dv_pg_spec.defaultPortConfig.securityPolicy.allowPromiscuous = vim.BoolPolicy(value=False)
+                 dv_pg_spec.defaultPortConfig.securityPolicy.forgedTransmits = vim.BoolPolicy(value=False)
+                 dv_pg_spec.defaultPortConfig.securityPolicy.macChanges = vim.BoolPolicy(value=False)
+                 task = dv_switch.AddDVPortgroup_Task([dv_pg_spec])
+                 self.wait_for_vcenter_task(task, vcenter_conect)
+                 dvPort_group = self.get_obj(content, [vim.dvs.DistributedVirtualPortgroup], network_name)
+                 if dvPort_group:
+                     self.logger.info("Created disributed virtaul port group: {}".format(dvPort_group))
+                     return dvPort_group.key
+             else:
+                 self.logger.debug("No disributed virtual switch found with name {}".format(network_name))
+         except Exception as exp:
+             self.logger.error("Error occurred while creating disributed virtaul port group {}"\
+                              " : {}".format(network_name, exp))
+         return None
+     def reconfig_portgroup(self, content, dvPort_group_name , config_info={}):
+         """
+          Method to reconfigure disributed virtual portgroup
+             Args:
+                 dvPort_group_name - name of disributed virtual portgroup
+                 content - vCenter content object
+                 config_info - disributed virtual portgroup configuration
+             Returns:
+                 task object
+         """
+         try:
+             dvPort_group = self.get_dvport_group(dvPort_group_name)
+             if dvPort_group:
+                 dv_pg_spec = vim.dvs.DistributedVirtualPortgroup.ConfigSpec()
+                 dv_pg_spec.configVersion = dvPort_group.config.configVersion
+                 dv_pg_spec.defaultPortConfig = vim.dvs.VmwareDistributedVirtualSwitch.VmwarePortConfigPolicy()
+                 if "vlanID" in config_info:
+                     dv_pg_spec.defaultPortConfig.vlan = vim.dvs.VmwareDistributedVirtualSwitch.VlanIdSpec()
+                     dv_pg_spec.defaultPortConfig.vlan.vlanId = config_info.get('vlanID')
+                 task = dvPort_group.ReconfigureDVPortgroup_Task(spec=dv_pg_spec)
+                 return task
+             else:
+                 return None
+         except Exception as exp:
+             self.logger.error("Error occurred while reconfiguraing disributed virtaul port group {}"\
+                              " : {}".format(dvPort_group_name, exp))
+             return None
+     def destroy_dvport_group(self , dvPort_group_name):
+         """
+          Method to destroy disributed virtual portgroup
+             Args:
+                 network_name - name of network/portgroup
+             Returns:
+                 True if portgroup successfully got deleted else false
+         """
+         vcenter_conect, content = self.get_vcenter_content()
+         try:
+             status = None
+             dvPort_group = self.get_dvport_group(dvPort_group_name)
+             if dvPort_group:
+                 task = dvPort_group.Destroy_Task()
+                 status = self.wait_for_vcenter_task(task, vcenter_conect)
+             return status
+         except vmodl.MethodFault as exp:
+             self.logger.error("Caught vmodl fault {} while deleting disributed virtaul port group {}".format(
+                                                                     exp, dvPort_group_name))
+             return None
+     def get_dvport_group(self, dvPort_group_name):
+         """
+         Method to get disributed virtual portgroup
+             Args:
+                 network_name - name of network/portgroup
+             Returns:
+                 portgroup object
+         """
+         vcenter_conect, content = self.get_vcenter_content()
+         dvPort_group = None
+         try:
+             container = content.viewManager.CreateContainerView(content.rootFolder, [vim.dvs.DistributedVirtualPortgroup], True)
+             for item in container.view:
+                 if item.key == dvPort_group_name:
+                     dvPort_group = item
+                     break
+             return dvPort_group
+         except vmodl.MethodFault as exp:
+             self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                             exp, dvPort_group_name))
+             return None
+     def get_vlanID_from_dvs_portgr(self, dvPort_group_name):
+         """
+          Method to get disributed virtual portgroup vlanID
+             Args:
+                 network_name - name of network/portgroup
+             Returns:
+                 vlan ID
+         """
+         vlanId = None
+         try:
+             dvPort_group = self.get_dvport_group(dvPort_group_name)
+             if dvPort_group:
+                 vlanId = dvPort_group.config.defaultPortConfig.vlan.vlanId
+         except vmodl.MethodFault as exp:
+             self.logger.error("Caught vmodl fault {} for disributed virtaul port group {}".format(
+                                                                             exp, dvPort_group_name))
+         return vlanId
+     def configure_vlanID(self, content, vcenter_conect, dvPort_group_name):
+         """
+          Method to configure vlanID in disributed virtual portgroup vlanID
+             Args:
+                 network_name - name of network/portgroup
+             Returns:
+                 None
+         """
+         vlanID = self.get_vlanID_from_dvs_portgr(dvPort_group_name)
+         if vlanID == 0:
+             #configure vlanID
+             vlanID = self.genrate_vlanID(dvPort_group_name)
+             config = {"vlanID":vlanID}
+             task = self.reconfig_portgroup(content, dvPort_group_name,
+                                     config_info=config)
+             if task:
+                 status= self.wait_for_vcenter_task(task, vcenter_conect)
+                 if status:
+                     self.logger.info("Reconfigured Port group {} for vlan ID {}".format(
+                                                         dvPort_group_name,vlanID))
+             else:
+                 self.logger.error("Fail reconfigure portgroup {} for vlanID{}".format(
+                                         dvPort_group_name, vlanID))
+     def genrate_vlanID(self, network_name):
+         """
+          Method to get unused vlanID
+             Args:
+                 network_name - name of network/portgroup
+             Returns:
+                 vlanID
+         """
+         vlan_id = None
+         used_ids = []
+         if self.config.get('vlanID_range') == None:
+             raise vimconn.vimconnConflictException("You must provide a 'vlanID_range' "\
+                         "at config value before creating sriov network with vlan tag")
+         if "used_vlanIDs" not in self.persistent_info:
+                 self.persistent_info["used_vlanIDs"] = {}
+         else:
+             used_ids = list(self.persistent_info["used_vlanIDs"].values())
+         for vlanID_range in self.config.get('vlanID_range'):
+             start_vlanid, end_vlanid = vlanID_range.split("-")
+             if start_vlanid > end_vlanid:
+                 raise vimconn.vimconnConflictException("Invalid vlan ID range {}".format(
+                                                                         vlanID_range))
+             for id in range(int(start_vlanid), int(end_vlanid) + 1):
+                 if id not in used_ids:
+                     vlan_id = id
+                     self.persistent_info["used_vlanIDs"][network_name] = vlan_id
+                     return vlan_id
+         if vlan_id is None:
+             raise vimconn.vimconnConflictException("All Vlan IDs are in use")
+     def get_obj(self, content, vimtype, name):
+         """
+          Get the vsphere object associated with a given text name
+         """
+         obj = None
+         container = content.viewManager.CreateContainerView(content.rootFolder, vimtype, True)
+         for item in container.view:
+             if item.name == name:
+                 obj = item
+                 break
+         return obj
+     def insert_media_to_vm(self, vapp, image_id):
+         """
+         Method to insert media CD-ROM (ISO image) from catalog to vm.
+         vapp - vapp object to get vm id
+         Image_id - image id for cdrom to be inerted to vm
+         """
+         # create connection object
+         vca = self.connect()
+         try:
+             # fetching catalog details
+             rest_url = "{}/api/catalog/{}".format(self.url, image_id)
+             if vca._session:
+                 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                 response = self.perform_request(req_type='GET',
+                                                 url=rest_url,
+                                                 headers=headers)
+             if response.status_code != 200:
+                 self.logger.error("REST call {} failed reason : {}"\
+                              "status code : {}".format(url_rest_call,
+                                                     response.content,
+                                                response.status_code))
+                 raise vimconn.vimconnException("insert_media_to_vm(): Failed to get "\
+                                                                     "catalog details")
+             # searching iso name and id
+             iso_name,media_id = self.get_media_details(vca, response.content)
+             if iso_name and media_id:
+                 data ="""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+                      <ns6:MediaInsertOrEjectParams
+                      xmlns="http://www.vmware.com/vcloud/versions" xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1" 
+                      xmlns:ns3="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_VirtualSystemSettingData" 
+                      xmlns:ns4="http://schemas.dmtf.org/wbem/wscim/1/common" 
+                      xmlns:ns5="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/CIM_ResourceAllocationSettingData" 
+                      xmlns:ns6="http://www.vmware.com/vcloud/v1.5" 
+                      xmlns:ns7="http://www.vmware.com/schema/ovf" 
+                      xmlns:ns8="http://schemas.dmtf.org/ovf/environment/1" 
+                      xmlns:ns9="http://www.vmware.com/vcloud/extension/v1.5">
+                      <ns6:Media
+                         type="application/vnd.vmware.vcloud.media+xml"
+                         name="{}"
+                         id="urn:vcloud:media:{}"
+                         href="https://{}/api/media/{}"/>
+                      </ns6:MediaInsertOrEjectParams>""".format(iso_name, media_id,
+                                                                 self.url,media_id)
+                 for vms in vapp.get_all_vms():
+                     vm_id = vms.get('id').split(':')[-1]
+                     headers['Content-Type'] = 'application/vnd.vmware.vcloud.mediaInsertOrEjectParams+xml'
+                     rest_url = "{}/api/vApp/vm-{}/media/action/insertMedia".format(self.url,vm_id)
+                     response = self.perform_request(req_type='POST',
+                                                        url=rest_url,
+                                                           data=data,
+                                                     headers=headers)
+                     if response.status_code != 202:
+                         error_msg = "insert_media_to_vm() : Failed to insert CD-ROM to vm. Reason {}. " \
+                                     "Status code {}".format(response.text, response.status_code)
+                         self.logger.error(error_msg)
+                         raise vimconn.vimconnException(error_msg)
+                     else:
+                         task = self.get_task_from_response(response.content)
+                         result = self.client.get_task_monitor().wait_for_success(task=task)
+                         if result.get('status') == 'success':
+                             self.logger.info("insert_media_to_vm(): Sucessfully inserted media ISO"\
+                                                                     " image to vm {}".format(vm_id))
+         except Exception as exp:
+             self.logger.error("insert_media_to_vm() : exception occurred "\
+                                             "while inserting media CD-ROM")
+             raise vimconn.vimconnException(message=exp)
+     def get_media_details(self, vca, content):
+         """
+         Method to get catalog item details
+         vca - connection object
+         content - Catalog details
+         Return - Media name, media id
+         """
+         cataloghref_list = []
+         try:
+             if content:
+                 vm_list_xmlroot = XmlElementTree.fromstring(content)
+                 for child in vm_list_xmlroot.iter():
+                     if 'CatalogItem' in child.tag:
+                         cataloghref_list.append(child.attrib.get('href'))
+                 if cataloghref_list is not None:
+                     for href in cataloghref_list:
+                         if href:
+                             headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': vca._session.headers['x-vcloud-authorization']}
+                             response = self.perform_request(req_type='GET',
+                                                                   url=href,
+                                                            headers=headers)
+                             if response.status_code != 200:
+                                 self.logger.error("REST call {} failed reason : {}"\
+                                              "status code : {}".format(href,
+                                                            response.content,
+                                                       response.status_code))
+                                 raise vimconn.vimconnException("get_media_details : Failed to get "\
+                                                                          "catalogitem details")
+                             list_xmlroot = XmlElementTree.fromstring(response.content)
+                             for child in list_xmlroot.iter():
+                                 if 'Entity' in child.tag:
+                                     if 'media' in child.attrib.get('href'):
+                                         name = child.attrib.get('name')
+                                         media_id = child.attrib.get('href').split('/').pop()
+                                         return name,media_id
+                             else:
+                                 self.logger.debug("Media name and id not found")
+                                 return False,False
+         except Exception as exp:
+             self.logger.error("get_media_details : exception occurred "\
+                                                "getting media details")
+             raise vimconn.vimconnException(message=exp)
+     def retry_rest(self, method, url, add_headers=None, data=None):
+         """ Method to get Token & retry respective REST request
+             Args:
+                 api - REST API - Can be one of 'GET' or 'PUT' or 'POST'
+                 url - request url to be used
+                 add_headers - Additional headers (optional)
+                 data - Request payload data to be passed in request
+             Returns:
+                 response - Response of request
+         """
+         response = None
+         #Get token
+         self.get_token()
+         if self.client._session:
+                 headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                            'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+         if add_headers:
+             headers.update(add_headers)
+         if method == 'GET':
+             response = self.perform_request(req_type='GET',
+                                             url=url,
+                                             headers=headers)
+         elif method == 'PUT':
+             response = self.perform_request(req_type='PUT',
+                                             url=url,
+                                             headers=headers,
+                                             data=data)
+         elif method == 'POST':
+             response = self.perform_request(req_type='POST',
+                                             url=url,
+                                             headers=headers,
+                                             data=data)
+         elif method == 'DELETE':
+             response = self.perform_request(req_type='DELETE',
+                                             url=url,
+                                             headers=headers)
+         return response
+     def get_token(self):
+         """ Generate a new token if expired
+             Returns:
+                 The return client object that letter can be used to connect to vCloud director as admin for VDC
+         """
+         try:
+             self.logger.debug("Generate token for vca {} as {} to datacenter {}.".format(self.org_name,
+                                                                                       self.user,
+                                                                                       self.org_name))
+             host = self.url
+             client = Client(host, verify_ssl_certs=False)
+             client.set_highest_supported_version()
+             client.set_credentials(BasicLoginCredentials(self.user, self.org_name, self.passwd))
+             # connection object
+             self.client = client
+         except:
+             raise vimconn.vimconnConnectionException("Can't connect to a vCloud director org: "
+                                                      "{} as user: {}".format(self.org_name, self.user))
+         if not client:
+             raise vimconn.vimconnConnectionException("Failed while reconnecting vCD")
+     def get_vdc_details(self):
+         """ Get VDC details using pyVcloud Lib
+             Returns org and vdc object
+         """
+         vdc = None
+         try:
+             org = Org(self.client, resource=self.client.get_org())
+             vdc = org.get_vdc(self.tenant_name)
+         except Exception as e:
+             # pyvcloud not giving a specific exception, Refresh nevertheless
+             self.logger.debug("Received exception {}, refreshing token ".format(str(e)))
+         #Retry once, if failed by refreshing token
+         if vdc is None:
+             self.get_token()
+             org = Org(self.client, resource=self.client.get_org())
+             vdc = org.get_vdc(self.tenant_name)
+         return org, vdc
+     def perform_request(self, req_type, url, headers=None, data=None):
+         """Perform the POST/PUT/GET/DELETE request."""
+         #Log REST request details
+         self.log_request(req_type, url=url, headers=headers, data=data)
+         # perform request and return its result
+         if req_type == 'GET':
+             response = requests.get(url=url,
+                                 headers=headers,
+                                 verify=False)
+         elif req_type == 'PUT':
+             response = requests.put(url=url,
+                                 headers=headers,
+                                 data=data,
+                                 verify=False)
+         elif req_type == 'POST':
+             response = requests.post(url=url,
+                                  headers=headers,
+                                  data=data,
+                                  verify=False)
+         elif req_type == 'DELETE':
+             response = requests.delete(url=url,
+                                  headers=headers,
+                                  verify=False)
+         #Log the REST response
+         self.log_response(response)
+         return response
+     def log_request(self, req_type, url=None, headers=None, data=None):
+         """Logs REST request details"""
+         if req_type is not None:
+             self.logger.debug("Request type: {}".format(req_type))
+         if url is not None:
+             self.logger.debug("Request url: {}".format(url))
+         if headers is not None:
+             for header in headers:
+                 self.logger.debug("Request header: {}: {}".format(header, headers[header]))
+         if data is not None:
+             self.logger.debug("Request data: {}".format(data))
+     def log_response(self, response):
+         """Logs REST response details"""
+         self.logger.debug("Response status code: {} ".format(response.status_code))
+     def get_task_from_response(self, content):
+         """
+         content - API response content(response.content)
+         return task object
+         """
+         xmlroot = XmlElementTree.fromstring(content)
+         if xmlroot.tag.split('}')[1] == "Task":
+             return xmlroot
+         else:
+             for ele in xmlroot:
+                 if ele.tag.split("}")[1] == "Tasks":
+                     task = ele[0]
+                     break
+             return task
+     def power_on_vapp(self,vapp_id, vapp_name):
+         """
+         vapp_id - vApp uuid
+         vapp_name - vAapp name
+         return - Task object
+         """
+         headers = {'Accept':'application/*+xml;version=' + API_VERSION,
+                    'x-vcloud-authorization': self.client._session.headers['x-vcloud-authorization']}
+         poweron_href = "{}/api/vApp/vapp-{}/power/action/powerOn".format(self.url,
+                                                                           vapp_id)
+         response = self.perform_request(req_type='POST',
+                                        url=poweron_href,
+                                         headers=headers)
+         if response.status_code != 202:
+             self.logger.error("REST call {} failed reason : {}"\
+                          "status code : {} ".format(poweron_href,
+                                                 response.content,
+                                            response.status_code))
+             raise vimconn.vimconnException("power_on_vapp() : Failed to power on "\
+                                                       "vApp {}".format(vapp_name))
+         else:
+             poweron_task = self.get_task_from_response(response.content)
+             return poweron_task
index 0000000,6886f86..05a3ec1
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,25 +1,25 @@@
 -git+https://osm.etsi.org/gerrit/osm/RO.git@py3#egg=osm-ro&subdirectory=RO
+ ##
+ # Copyright VMware Inc.
+ # Licensed under the Apache License, Version 2.0 (the "License");
+ # you may not use this file except in compliance with the License.
+ # You may obtain a copy of the License at
+ #
+ #    http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS,
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ # implied.
+ # See the License for the specific language governing permissions and
+ # limitations under the License.
+ ##
+ PyYAML
+ requests
+ netaddr
+ pyvcloud==19.1.1
+ pyvmomi
+ progressbar
+ prettytable
+ # TODO py3 genisoimage
++git+https://osm.etsi.org/gerrit/osm/RO.git#egg=osm-ro&subdirectory=RO
index 0000000,5d03b96..ab26ade
mode 000000,100755..100755
--- /dev/null
@@@ -1,0 -1,1632 +1,1632 @@@
 -    sql "ALTER TABLE instance_interfaces ADD COLUMN instance_wim_net_id VARCHAR(36) NULL AFTER instance_net_id, "\
 -        "ADD COLUMN model VARCHAR(12) NULL DEFAULT NULL AFTER type, "\"
+ #!/bin/bash
+ ##
+ # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+ #
+ #Upgrade/Downgrade openmano database preserving the content
+ #
+ DBUTILS="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+ DBUSER="mano"
+ DBPASS=""
+ DEFAULT_DBPASS="manopw"
+ DBHOST=""
+ DBPORT="3306"
+ DBNAME="mano_db"
+ QUIET_MODE=""
+ BACKUP_DIR=""
+ BACKUP_FILE=""
+ #TODO update it with the last database version
+ LAST_DB_VERSION=40
+ # Detect paths
+ MYSQL=$(which mysql)
+ AWK=$(which awk)
+ GREP=$(which grep)
+ function usage(){
+     echo -e "Usage: $0 OPTIONS [version]"
+     echo -e "  Upgrades/Downgrades openmano database preserving the content."\
+             "If [version]  is not provided, it is upgraded to the last version"
+     echo -e "  OPTIONS"
+     echo -e "     -u USER  database user. '$DBUSER' by default. Prompts if DB access fails"
+     echo -e "     -p PASS  database password. If missing it tries without and '$DEFAULT_DBPASS' password before prompting"
+     echo -e "     -P PORT  database port. '$DBPORT' by default"
+     echo -e "     -h HOST  database host. 'localhost' by default"
+     echo -e "     -d NAME  database name. '$DBNAME' by default.  Prompts if DB access fails"
+     echo -e "     -b DIR   backup folder where to create rollback backup file"
+     echo -e "     -q --quiet: Do not prompt for credentials and exit if cannot access to database"
+     echo -e "     --help   shows this help"
+ }
+ while getopts ":u:p:b:P:h:d:q-:" o; do
+     case "${o}" in
+         u)
+             DBUSER="$OPTARG"
+             ;;
+         p)
+             DBPASS="$OPTARG"
+             ;;
+         P)
+             DBPORT="$OPTARG"
+             ;;
+         d)
+             DBNAME="$OPTARG"
+             ;;
+         h)
+             DBHOST="$OPTARG"
+             ;;
+         b)
+             BACKUP_DIR="$OPTARG"
+             ;;
+         q)
+             export QUIET_MODE=yes
+             ;;
+         -)
+             [ "${OPTARG}" == "help" ] && usage && exit 0
+             [ "${OPTARG}" == "quiet" ] && export QUIET_MODE=yes && continue
+             echo "Invalid option: '--$OPTARG'. Type --help for more information" >&2
+             exit 1
+             ;;
+         \?)
+             echo "Invalid option: '-$OPTARG'. Type --help for more information" >&2
+             exit 1
+             ;;
+         :)
+             echo "Option '-$OPTARG' requires an argument. Type --help for more information" >&2
+             exit 1
+             ;;
+         *)
+             usage >&2
+             exit 1
+             ;;
+     esac
+ done
+ shift $((OPTIND-1))
+ DB_VERSION=$1
+ if [ -n "$DB_VERSION" ] ; then
+     # check it is a number and an allowed one
+     [ "$DB_VERSION" -eq "$DB_VERSION" ] 2>/dev/null || 
+         ! echo "parameter 'version' requires a integer value" >&2 || exit 1
+     if [ "$DB_VERSION" -lt 0 ] || [ "$DB_VERSION" -gt "$LAST_DB_VERSION" ] ; then
+         echo "parameter 'version' requires a valid database version between '0' and '$LAST_DB_VERSION'"\
+              "If you need an upper version, get a newer version of this script '$0'" >&2
+         exit 1
+     fi
+ else
+     DB_VERSION="$LAST_DB_VERSION"
+ fi
+ # Creating temporary file
+ TEMPFILE="$(mktemp -q --tmpdir "migratemanodb.XXXXXX")"
+ trap 'rm -f "$TEMPFILE"' EXIT
+ chmod 0600 "$TEMPFILE"
+ DEF_EXTRA_FILE_PARAM="--defaults-extra-file=$TEMPFILE"
+ echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+ # Check and ask for database user password
+ FIRST_TRY="yes"
+ while ! DB_ERROR=`mysql "$DEF_EXTRA_FILE_PARAM" $DBNAME -e "quit" 2>&1 >/dev/null`
+ do
+     # if password is not provided, try silently with $DEFAULT_DBPASS before exit or prompt for credentials
+     [[ -n "$FIRST_TRY" ]] && [[ -z "$DBPASS" ]] && DBPASS="$DEFAULT_DBPASS" &&
+         echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE" &&
+         continue
+     echo "$DB_ERROR"
+     [[ -n "$QUIET_MODE" ]] && echo -e "Invalid database credentials!!!" >&2 && exit 1
+     echo -e "Provide database name and credentials (Ctrl+c to abort):"
+     read -e -p "    mysql database name($DBNAME): " KK
+     [ -n "$KK" ] && DBNAME="$KK"
+     read -e -p "    mysql user($DBUSER): " KK
+     [ -n "$KK" ] && DBUSER="$KK"
+     read -e -s -p "    mysql password: " DBPASS
+     echo -e "[client]\n user='${DBUSER}'\n password='$DBPASS'\n host='$DBHOST'\n port='$DBPORT'" > "$TEMPFILE"
+     FIRST_TRY=""
+     echo
+ done
+ DBCMD="mysql $DEF_EXTRA_FILE_PARAM $DBNAME"
+ #echo DBCMD $DBCMD
+ #check that the database seems a openmano database
+ if ! echo -e "show create table vnfs;\nshow create table scenarios" | $DBCMD >/dev/null 2>&1
+ then
+     echo "    database $DBNAME does not seem to be an openmano database" >&2
+     exit 1;
+ fi
+ #GET DATABASE TARGET VERSION
+ #DB_VERSION=0
+ #[ $OPENMANO_VER_NUM -ge 2002 ] && DB_VERSION=1   #0.2.2 =>  1
+ #[ $OPENMANO_VER_NUM -ge 2005 ] && DB_VERSION=2   #0.2.5 =>  2
+ #[ $OPENMANO_VER_NUM -ge 3003 ] && DB_VERSION=3   #0.3.3 =>  3
+ #[ $OPENMANO_VER_NUM -ge 3005 ] && DB_VERSION=4   #0.3.5 =>  4
+ #[ $OPENMANO_VER_NUM -ge 4001 ] && DB_VERSION=5   #0.4.1 =>  5
+ #[ $OPENMANO_VER_NUM -ge 4002 ] && DB_VERSION=6   #0.4.2 =>  6
+ #[ $OPENMANO_VER_NUM -ge 4003 ] && DB_VERSION=7   #0.4.3 =>  7
+ #[ $OPENMANO_VER_NUM -ge 4032 ] && DB_VERSION=8   #0.4.32=>  8
+ #[ $OPENMANO_VER_NUM -ge 4033 ] && DB_VERSION=9   #0.4.33=>  9
+ #[ $OPENMANO_VER_NUM -ge 4036 ] && DB_VERSION=10  #0.4.36=>  10
+ #[ $OPENMANO_VER_NUM -ge 4043 ] && DB_VERSION=11  #0.4.43=>  11
+ #[ $OPENMANO_VER_NUM -ge 4046 ] && DB_VERSION=12  #0.4.46=>  12
+ #[ $OPENMANO_VER_NUM -ge 4047 ] && DB_VERSION=13  #0.4.47=>  13
+ #[ $OPENMANO_VER_NUM -ge 4057 ] && DB_VERSION=14  #0.4.57=>  14
+ #[ $OPENMANO_VER_NUM -ge 4059 ] && DB_VERSION=15  #0.4.59=>  15
+ #[ $OPENMANO_VER_NUM -ge 5002 ] && DB_VERSION=16  #0.5.2 =>  16
+ #[ $OPENMANO_VER_NUM -ge 5003 ] && DB_VERSION=17  #0.5.3 =>  17
+ #[ $OPENMANO_VER_NUM -ge 5004 ] && DB_VERSION=18  #0.5.4 =>  18
+ #[ $OPENMANO_VER_NUM -ge 5005 ] && DB_VERSION=19  #0.5.5 =>  19
+ #[ $OPENMANO_VER_NUM -ge 5009 ] && DB_VERSION=20  #0.5.9 =>  20
+ #[ $OPENMANO_VER_NUM -ge 5015 ] && DB_VERSION=21  #0.5.15 =>  21
+ #[ $OPENMANO_VER_NUM -ge 5016 ] && DB_VERSION=22  #0.5.16 =>  22
+ #[ $OPENMANO_VER_NUM -ge 5020 ] && DB_VERSION=23  #0.5.20 =>  23
+ #[ $OPENMANO_VER_NUM -ge 5021 ] && DB_VERSION=24  #0.5.21 =>  24
+ #[ $OPENMANO_VER_NUM -ge 5022 ] && DB_VERSION=25  #0.5.22 =>  25
+ #[ $OPENMANO_VER_NUM -ge 5024 ] && DB_VERSION=26  #0.5.24 =>  26
+ #[ $OPENMANO_VER_NUM -ge 5025 ] && DB_VERSION=27  #0.5.25 =>  27
+ #[ $OPENMANO_VER_NUM -ge 5052 ] && DB_VERSION=28  #0.5.52 =>  28
+ #[ $OPENMANO_VER_NUM -ge 5059 ] && DB_VERSION=29  #0.5.59 =>  29
+ #[ $OPENMANO_VER_NUM -ge 5060 ] && DB_VERSION=30  #0.5.60 =>  30
+ #[ $OPENMANO_VER_NUM -ge 5061 ] && DB_VERSION=31  #0.5.61 =>  31
+ #[ $OPENMANO_VER_NUM -ge 5070 ] && DB_VERSION=32  #0.5.70 =>  32
+ #[ $OPENMANO_VER_NUM -ge 5082 ] && DB_VERSION=33  #0.5.82 =>  33
+ #[ $OPENMANO_VER_NUM -ge 6000 ] && DB_VERSION=34  #0.6.00 =>  34
+ #[ $OPENMANO_VER_NUM -ge 6001 ] && DB_VERSION=35  #0.6.01 =>  35
+ #[ $OPENMANO_VER_NUM -ge 6003 ] && DB_VERSION=36  #0.6.03 =>  36
+ #[ $OPENMANO_VER_NUM -ge 6009 ] && DB_VERSION=37  #0.6.09 =>  37
+ #[ $OPENMANO_VER_NUM -ge 6011 ] && DB_VERSION=38  #0.6.11 =>  38
+ #[ $OPENMANO_VER_NUM -ge 6020 ] && DB_VERSION=39  #0.6.20 =>  39
+ #[ $OPENMANO_VER_NUM -ge 6000004 ] && DB_VERSION=40  #6.0.4 =>  40
+ #TODO ... put next versions here
+ function upgrade_to_1(){
+     # echo "    upgrade database from version 0.0 to version 0.1"
+     echo "      CREATE TABLE \`schema_version\`"
+     sql "CREATE TABLE \`schema_version\` (
+       \`version_int\` INT NOT NULL COMMENT 'version as a number. Must not contain gaps',
+       \`version\` VARCHAR(20) NOT NULL COMMENT 'version as a text',
+       \`openmano_ver\` VARCHAR(20) NOT NULL COMMENT 'openmano version',
+       \`comments\` VARCHAR(2000) NULL COMMENT 'changes to database',
+       \`date\` DATE NULL,
+       PRIMARY KEY (\`version_int\`)
+       )
+       COMMENT='database schema control version'
+       COLLATE='utf8_general_ci'
+       ENGINE=InnoDB;"
+     sql "INSERT INTO \`schema_version\` (\`version_int\`, \`version\`, \`openmano_ver\`, \`comments\`, \`date\`)
+        VALUES (1, '0.1', '0.2.2', 'insert schema_version', '2015-05-08');"
+ }
+ function downgrade_from_1(){
+     # echo "    downgrade database from version 0.1 to version 0.0"
+     echo "      DROP TABLE IF EXISTS \`schema_version\`"
+     sql "DROP TABLE IF EXISTS \`schema_version\`;"
+ }
+ function upgrade_to_2(){
+     # echo "    upgrade database from version 0.1 to version 0.2"
+     echo "      Add columns user/passwd to table 'vim_tenants'"
+     sql "ALTER TABLE vim_tenants ADD COLUMN user VARCHAR(36) NULL COMMENT 'Credentials for vim' AFTER created,
+       ADD COLUMN passwd VARCHAR(50) NULL COMMENT 'Credentials for vim' AFTER user;"
+     echo "      Add table 'images' and 'datacenters_images'"
+     sql "CREATE TABLE images (
+       uuid VARCHAR(36) NOT NULL,
+       name VARCHAR(50) NOT NULL,
+       location VARCHAR(200) NOT NULL,
+       description VARCHAR(100) NULL,
+       metadata VARCHAR(400) NULL,
+       PRIMARY KEY (uuid),
+       UNIQUE INDEX location (location)  )
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"
+     sql "CREATE TABLE datacenters_images (
+       id INT NOT NULL AUTO_INCREMENT,
+       image_id VARCHAR(36) NOT NULL,
+       datacenter_id VARCHAR(36) NOT NULL,
+       vim_id VARCHAR(36) NOT NULL,
+       PRIMARY KEY (id),
+       CONSTRAINT FK__images FOREIGN KEY (image_id) REFERENCES images (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+       CONSTRAINT FK__datacenters_i FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE CASCADE ON DELETE CASCADE  )
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"
+     echo "      migrate data from table 'vms' into 'images'"
+     sql "INSERT INTO images (uuid, name, location) SELECT DISTINCT vim_image_id, vim_image_id, image_path FROM vms;"
+     sql "INSERT INTO datacenters_images (image_id, datacenter_id, vim_id)
+           SELECT DISTINCT vim_image_id, datacenters.uuid, vim_image_id FROM vms JOIN datacenters;"
+     echo "      Add table 'flavors' and 'datacenter_flavors'"
+     sql "CREATE TABLE flavors (
+       uuid VARCHAR(36) NOT NULL,
+       name VARCHAR(50) NOT NULL,
+       description VARCHAR(100) NULL,
+       disk SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
+       ram SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
+       vcpus SMALLINT(5) UNSIGNED NULL DEFAULT NULL,
+       extended VARCHAR(2000) NULL DEFAULT NULL COMMENT 'Extra description json format of needed resources and pining, orginized in sets per numa',
+       PRIMARY KEY (uuid)  )
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"
+     sql "CREATE TABLE datacenters_flavors (
+       id INT NOT NULL AUTO_INCREMENT,
+       flavor_id VARCHAR(36) NOT NULL,
+       datacenter_id VARCHAR(36) NOT NULL,
+       vim_id VARCHAR(36) NOT NULL,
+       PRIMARY KEY (id),
+       CONSTRAINT FK__flavors FOREIGN KEY (flavor_id) REFERENCES flavors (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+       CONSTRAINT FK__datacenters_f FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE CASCADE ON DELETE CASCADE  )
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"
+     echo "      migrate data from table 'vms' into 'flavors'"
+     sql "INSERT INTO flavors (uuid, name) SELECT DISTINCT vim_flavor_id, vim_flavor_id FROM vms;"
+     sql "INSERT INTO datacenters_flavors (flavor_id, datacenter_id, vim_id)
+           SELECT DISTINCT vim_flavor_id, datacenters.uuid, vim_flavor_id FROM vms JOIN datacenters;"
+     sql "ALTER TABLE vms ALTER vim_flavor_id DROP DEFAULT, ALTER vim_image_id DROP DEFAULT;
+           ALTER TABLE vms CHANGE COLUMN vim_flavor_id flavor_id VARCHAR(36) NOT NULL COMMENT 'Link to flavor table' AFTER vnf_id,
+           CHANGE COLUMN vim_image_id image_id VARCHAR(36) NOT NULL COMMENT 'Link to image table' AFTER flavor_id, 
+           ADD CONSTRAINT FK_vms_images  FOREIGN KEY (image_id) REFERENCES  images (uuid),
+           ADD CONSTRAINT FK_vms_flavors FOREIGN KEY (flavor_id) REFERENCES flavors (uuid);"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (2, '0.2', '0.2.5', 'new tables images,flavors', '2015-07-13');"
+ }   
+      
+ function downgrade_from_2(){
+     # echo "    downgrade database from version 0.2 to version 0.1"
+     echo "       migrate back data from 'datacenters_images' 'datacenters_flavors' into 'vms'"
+     sql "ALTER TABLE vms ALTER image_id DROP DEFAULT, ALTER flavor_id DROP DEFAULT;
+           ALTER TABLE vms CHANGE COLUMN flavor_id vim_flavor_id VARCHAR(36) NOT NULL COMMENT 'Flavor ID in the VIM DB' AFTER vnf_id,
+           CHANGE COLUMN image_id vim_image_id VARCHAR(36) NOT NULL COMMENT 'Image ID in the VIM DB' AFTER vim_flavor_id,
+           DROP FOREIGN KEY FK_vms_flavors, DROP INDEX FK_vms_flavors,
+           DROP FOREIGN KEY FK_vms_images, DROP INDEX FK_vms_images;"
+ #    echo "UPDATE v SET v.vim_image_id=di.vim_id
+ #          FROM  vms as v INNER JOIN images as i ON v.vim_image_id=i.uuid 
+ #          INNER JOIN datacenters_images as di ON i.uuid=di.image_id;"
+     echo "      Delete columns 'user/passwd' from 'vim_tenants'"
+     sql "ALTER TABLE vim_tenants DROP COLUMN user, DROP COLUMN passwd; "
+     echo "        delete tables 'datacenter_images', 'images'"
+     sql "DROP TABLE IF EXISTS \`datacenters_images\`;"
+     sql "DROP TABLE IF EXISTS \`images\`;"
+     echo "        delete tables 'datacenter_flavors', 'flavors'"
+     sql "DROP TABLE IF EXISTS \`datacenters_flavors\`;"
+     sql "DROP TABLE IF EXISTS \`flavors\`;"
+     sql "DELETE FROM schema_version WHERE version_int='2';"
+ }
+ function upgrade_to_3(){
+     # echo "    upgrade database from version 0.2 to version 0.3"
+     echo "      Change table 'logs', 'uuids"
+     sql "ALTER TABLE logs CHANGE COLUMN related related VARCHAR(36) NOT NULL COMMENT 'Relevant element for the log' AFTER nfvo_tenant_id;"
+     sql "ALTER TABLE uuids CHANGE COLUMN used_at used_at VARCHAR(36) NULL DEFAULT NULL COMMENT 'Table that uses this UUID' AFTER created_at;"
+     echo "      Add column created to table 'datacenters_images' and 'datacenters_flavors'"
+     for table in datacenters_images datacenters_flavors
+     do
+         sql "ALTER TABLE $table ADD COLUMN created ENUM('true','false') NOT NULL DEFAULT 'false' 
+             COMMENT 'Indicates if it has been created by openmano, or already existed' AFTER vim_id;"
+     done
+     sql "ALTER TABLE images CHANGE COLUMN metadata metadata VARCHAR(2000) NULL DEFAULT NULL AFTER description;"
+     echo "      Allow null to column 'vim_interface_id' in 'instance_interfaces'"
+     sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'vim identity for that interface' AFTER interface_id; "
+     echo "      Add column config to table 'datacenters'"
+     sql "ALTER TABLE datacenters ADD COLUMN config VARCHAR(4000) NULL DEFAULT NULL COMMENT 'extra config information in json' AFTER vim_url_admin;
+       "
+     echo "      Add column datacenter_id to table 'vim_tenants'"
+     sql "ALTER TABLE vim_tenants ADD COLUMN datacenter_id VARCHAR(36) NULL COMMENT 'Datacenter of this tenant' AFTER uuid,
+       DROP INDEX name, DROP INDEX vim_tenant_id;"
+     sql "ALTER TABLE vim_tenants CHANGE COLUMN name vim_tenant_name VARCHAR(36) NULL DEFAULT NULL COMMENT 'tenant name at VIM' AFTER datacenter_id,
+       CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
+     echo "UPDATE vim_tenants as vt LEFT JOIN tenants_datacenters as td ON vt.uuid=td.vim_tenant_id
+       SET vt.datacenter_id=td.datacenter_id;"
+     sql "DELETE FROM vim_tenants WHERE datacenter_id is NULL;"
+     sql "ALTER TABLE vim_tenants ALTER datacenter_id DROP DEFAULT;
+       ALTER TABLE vim_tenants
+       CHANGE COLUMN datacenter_id datacenter_id VARCHAR(36) NOT NULL COMMENT 'Datacenter of this tenant' AFTER uuid;"
+     sql "ALTER TABLE vim_tenants ADD CONSTRAINT FK_vim_tenants_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid)
+       ON UPDATE CASCADE ON DELETE CASCADE;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (3, '0.3', '0.3.3', 'alter vim_tenant tables', '2015-07-28');"
+ }
+ function downgrade_from_3(){
+     # echo "    downgrade database from version 0.3 to version 0.2"
+     echo "      Change back table 'logs', 'uuids'"
+     sql "ALTER TABLE logs CHANGE COLUMN related related ENUM('nfvo_tenants','datacenters','vim_tenants','tenants_datacenters','vnfs','vms','interfaces','nets','scenarios','sce_vnfs','sce_interfaces','sce_nets','instance_scenarios','instance_vnfs','instance_vms','instance_nets','instance_interfaces') NOT NULL COMMENT 'Relevant element for the log' AFTER nfvo_tenant_id;"
+     sql "ALTER TABLE uuids CHANGE COLUMN used_at used_at ENUM('nfvo_tenants','datacenters','vim_tenants','vnfs','vms','interfaces','nets','scenarios','sce_vnfs','sce_interfaces','sce_nets','instance_scenarios','instance_vnfs','instance_vms','instance_nets','instance_interfaces') NULL DEFAULT NULL COMMENT 'Table that uses this UUID' AFTER created_at;"
+     echo "      Delete column created from table 'datacenters_images' and 'datacenters_flavors'"
+     for table in datacenters_images datacenters_flavors
+     do
+         sql "ALTER TABLE $table DROP COLUMN created;"
+     done
+     sql "ALTER TABLE images CHANGE COLUMN metadata metadata VARCHAR(400) NULL DEFAULT NULL AFTER description;"
+     echo "      Deny back null to column 'vim_interface_id' in 'instance_interfaces'"
+     sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NOT NULL COMMENT 'vim identity for that interface' AFTER interface_id; "
+     echo "       Delete column config to table 'datacenters'"
+     sql "ALTER TABLE datacenters DROP COLUMN config;"
+     echo "       Delete column datacenter_id to table 'vim_tenants'"
+     sql "ALTER TABLE vim_tenants DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_vim_tenants_datacenters;"
+     sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name name VARCHAR(36) NULL DEFAULT NULL COMMENT '' AFTER uuid"
+     sql "ALTER TABLE vim_tenants ALTER name DROP DEFAULT;"
+     sql "ALTER TABLE vim_tenants CHANGE COLUMN name name VARCHAR(36) NOT NULL AFTER uuid" || ! echo "Warning changing column name at vim_tenants!"
+     sql "ALTER TABLE vim_tenants ADD UNIQUE INDEX name (name);" || ! echo "Warning add unique index name at vim_tenants!"
+     sql "ALTER TABLE vim_tenants ALTER vim_tenant_id DROP DEFAULT;"
+     sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NOT NULL COMMENT 'Tenant ID in the VIM DB' AFTER name;" ||
+         ! echo "Warning changing column vim_tenant_id at vim_tenants!"
+     sql "ALTER TABLE vim_tenants ADD UNIQUE INDEX vim_tenant_id (vim_tenant_id);" ||
+         ! echo "Warning add unique index vim_tenant_id at vim_tenants!"
+     sql "DELETE FROM schema_version WHERE version_int='3';"
+ }
+ function upgrade_to_4(){
+     # echo "    upgrade database from version 0.3 to version 0.4"
+     echo "      Enlarge graph field at tables 'sce_vnfs', 'sce_nets'"
+     for table in sce_vnfs sce_nets
+     do
+         sql "ALTER TABLE $table CHANGE COLUMN graph graph VARCHAR(2000) NULL DEFAULT NULL AFTER modified_at;"
+     done
+     sql "ALTER TABLE datacenters CHANGE COLUMN type type VARCHAR(36) NOT NULL DEFAULT 'openvim' AFTER description;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (4, '0.4', '0.3.5', 'enlarge graph field at sce_vnfs/nets', '2015-10-20');"
+ }
+ function downgrade_from_4(){
+     # echo "    downgrade database from version 0.4 to version 0.3"
+     echo "      Shorten back graph field at tables 'sce_vnfs', 'sce_nets'"
+     for table in sce_vnfs sce_nets
+     do
+         sql "ALTER TABLE $table CHANGE COLUMN graph graph VARCHAR(2000) NULL DEFAULT NULL AFTER modified_at;"
+     done
+     sql "ALTER TABLE datacenters CHANGE COLUMN type type ENUM('openvim','openstack') NOT NULL DEFAULT 'openvim' AFTER description;"
+     sql "DELETE FROM schema_version WHERE version_int='4';"
+ }
+ function upgrade_to_5(){
+     # echo "    upgrade database from version 0.4 to version 0.5"
+     echo "      Add 'mac' field for bridge interfaces in table 'interfaces'"
+     sql "ALTER TABLE interfaces ADD COLUMN mac CHAR(18) NULL DEFAULT NULL AFTER model;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (5, '0.5', '0.4.1', 'Add mac address for bridge interfaces', '2015-12-14');"
+ }
+ function downgrade_from_5(){
+     # echo "    downgrade database from version 0.5 to version 0.4"
+     echo "      Remove 'mac' field for bridge interfaces in table 'interfaces'"
+     sql "ALTER TABLE interfaces DROP COLUMN mac;"
+     sql "DELETE FROM schema_version WHERE version_int='5';"
+ }
+ function upgrade_to_6(){
+     # echo "    upgrade database from version 0.5 to version 0.6"
+     echo "      Add 'descriptor' field text to 'vnfd', 'scenarios'"
+     sql "ALTER TABLE vnfs ADD COLUMN descriptor TEXT NULL DEFAULT NULL COMMENT 'Original text descriptor used for create the VNF' AFTER class;"
+     sql "ALTER TABLE scenarios ADD COLUMN descriptor TEXT NULL DEFAULT NULL COMMENT 'Original text descriptor used for create the scenario' AFTER modified_at;"
+     echo "      Add 'last_error', 'vim_info' to 'instance_vms', 'instance_nets'"
+     sql "ALTER TABLE instance_vms  ADD COLUMN error_msg VARCHAR(1024) NULL DEFAULT NULL AFTER status;"
+     sql "ALTER TABLE instance_vms  ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER error_msg;"
+     sql "ALTER TABLE instance_vms  CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD' AFTER vim_vm_id;"
+     sql "ALTER TABLE instance_nets ADD COLUMN error_msg VARCHAR(1024) NULL DEFAULT NULL AFTER status;"
+     sql "ALTER TABLE instance_nets ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER error_msg;"
+     sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR','VIM_ERROR','INACTIVE','DELETED') NOT NULL DEFAULT 'BUILD' AFTER instance_scenario_id;"
+     echo "      Add 'mac_address', 'ip_address', 'vim_info' to 'instance_interfaces'"
+     sql "ALTER TABLE instance_interfaces ADD COLUMN mac_address VARCHAR(32) NULL DEFAULT NULL AFTER vim_interface_id, ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER mac_address, ADD COLUMN vim_info TEXT NULL DEFAULT NULL AFTER ip_address;"
+     echo "      Add 'sce_vnf_id','datacenter_id','vim_tenant_id' field to 'instance_vnfs'"
+     sql "ALTER TABLE instance_vnfs ADD COLUMN sce_vnf_id VARCHAR(36) NULL DEFAULT NULL AFTER vnf_id, ADD CONSTRAINT FK_instance_vnfs_sce_vnfs FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+     sql "ALTER TABLE instance_vnfs ADD COLUMN vim_tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER sce_vnf_id, ADD CONSTRAINT FK_instance_vnfs_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+     sql "ALTER TABLE instance_vnfs ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER vim_tenant_id, ADD CONSTRAINT FK_instance_vnfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+     echo "      Add 'sce_net_id','net_id','datacenter_id','vim_tenant_id' field to 'instance_nets'"
+     sql "ALTER TABLE instance_nets ADD COLUMN sce_net_id VARCHAR(36) NULL DEFAULT NULL AFTER instance_scenario_id, ADD CONSTRAINT FK_instance_nets_sce_nets FOREIGN KEY (sce_net_id) REFERENCES sce_nets (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+     sql "ALTER TABLE instance_nets ADD COLUMN net_id VARCHAR(36) NULL DEFAULT NULL AFTER sce_net_id, ADD CONSTRAINT FK_instance_nets_nets FOREIGN KEY (net_id) REFERENCES nets (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+     sql "ALTER TABLE instance_nets ADD COLUMN vim_tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER net_id, ADD CONSTRAINT FK_instance_nets_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+     sql "ALTER TABLE instance_nets ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER vim_tenant_id, ADD CONSTRAINT FK_instance_nets_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid) ON UPDATE RESTRICT ON DELETE RESTRICT;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (6, '0.6', '0.4.2', 'Adding VIM status info', '2015-12-22');"
+ }
+ function downgrade_from_6(){
+     # echo "    downgrade database from version 0.6 to version 0.5"
+     echo "      Remove 'descriptor' field from 'vnfd', 'scenarios' tables"
+     sql "ALTER TABLE vnfs      DROP COLUMN descriptor;"
+     sql "ALTER TABLE scenarios DROP COLUMN descriptor;"
+     echo "      Remove 'last_error', 'vim_info' from 'instance_vms', 'instance_nets'"
+     sql "ALTER TABLE instance_vms  DROP COLUMN error_msg, DROP COLUMN vim_info;"
+     sql "ALTER TABLE instance_vms  CHANGE COLUMN status status ENUM('ACTIVE','PAUSED','INACTIVE','CREATING','ERROR','DELETING') NOT NULL DEFAULT 'CREATING' AFTER vim_vm_id;"
+     sql "ALTER TABLE instance_nets DROP COLUMN error_msg, DROP COLUMN vim_info;"
+     sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR') NOT NULL DEFAULT 'BUILD' AFTER instance_scenario_id;"
+     echo "      Remove 'mac_address', 'ip_address', 'vim_info' from 'instance_interfaces'"
+     sql "ALTER TABLE instance_interfaces DROP COLUMN mac_address, DROP COLUMN ip_address, DROP COLUMN vim_info;"
+     echo "      Remove 'sce_vnf_id','datacenter_id','vim_tenant_id' field from 'instance_vnfs'"
+     sql "ALTER TABLE instance_vnfs DROP COLUMN sce_vnf_id, DROP FOREIGN KEY FK_instance_vnfs_sce_vnfs;"
+     sql "ALTER TABLE instance_vnfs DROP COLUMN vim_tenant_id, DROP FOREIGN KEY FK_instance_vnfs_vim_tenants;"
+     sql "ALTER TABLE instance_vnfs DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_instance_vnfs_datacenters;"
+     echo "      Remove 'sce_net_id','net_id','datacenter_id','vim_tenant_id' field from 'instance_nets'"
+     sql "ALTER TABLE instance_nets DROP COLUMN sce_net_id, DROP FOREIGN KEY FK_instance_nets_sce_nets;"
+     sql "ALTER TABLE instance_nets DROP COLUMN net_id, DROP FOREIGN KEY FK_instance_nets_nets;"
+     sql "ALTER TABLE instance_nets DROP COLUMN vim_tenant_id, DROP FOREIGN KEY FK_instance_nets_vim_tenants;"
+     sql "ALTER TABLE instance_nets DROP COLUMN datacenter_id, DROP FOREIGN KEY FK_instance_nets_datacenters;"
+     sql "DELETE FROM schema_version WHERE version_int='6';"
+ }
+ function upgrade_to_7(){
+     # echo "    upgrade database from version 0.6 to version 0.7"
+     echo "      Change created_at, modified_at from timestamp to unix float at all database"
+     for table in datacenters datacenter_nets instance_nets instance_scenarios instance_vms instance_vnfs interfaces nets nfvo_tenants scenarios sce_interfaces sce_nets sce_vnfs tenants_datacenters vim_tenants vms vnfs uuids
+     do
+          echo -en "        $table               \r"
+          sql "ALTER TABLE $table ADD COLUMN created_at_ DOUBLE NOT NULL after created_at;"
+          echo "UPDATE $table SET created_at_=unix_timestamp(created_at);"
+          sql "ALTER TABLE $table DROP COLUMN created_at, CHANGE COLUMN created_at_ created_at DOUBLE NOT NULL;"
+          [[ $table == uuids ]] || sql "ALTER TABLE $table CHANGE COLUMN modified_at modified_at DOUBLE NULL DEFAULT NULL;"
+     done
+     
+     echo "      Add 'descriptor' field text to 'vnfd', 'scenarios'"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (7, '0.7', '0.4.3', 'Changing created_at time at database', '2016-01-25');"
+ }
+ function downgrade_from_7(){
+     # echo "    downgrade database from version 0.7 to version 0.6"
+     echo "      Change back created_at, modified_at from unix float to timestamp at all database"
+     for table in datacenters datacenter_nets instance_nets instance_scenarios instance_vms instance_vnfs interfaces nets nfvo_tenants scenarios sce_interfaces sce_nets sce_vnfs tenants_datacenters vim_tenants vms vnfs uuids
+     do
+          echo -en "        $table               \r"
+          sql "ALTER TABLE $table ADD COLUMN created_at_ TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP after created_at;"
+          echo "UPDATE $table SET created_at_=from_unixtime(created_at);"
+          sql "ALTER TABLE $table DROP COLUMN created_at, CHANGE COLUMN created_at_ created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP;"
+          [[ $table == uuids ]] || sql "ALTER TABLE $table CHANGE COLUMN modified_at modified_at TIMESTAMP NULL DEFAULT NULL;"
+     done
+     echo "      Remove 'descriptor' field from 'vnfd', 'scenarios' tables"
+     sql "DELETE FROM schema_version WHERE version_int='7';"
+ }
+ function upgrade_to_8(){
+     # echo "    upgrade database from version 0.7 to version 0.8"
+     echo "      Change enalarge name, description to 255 at all database"
+     for table in datacenters datacenter_nets flavors images instance_scenarios nets nfvo_tenants scenarios sce_nets sce_vnfs vms vnfs
+     do
+          echo -en "        $table               \r"
+          sql "ALTER TABLE $table CHANGE COLUMN name name VARCHAR(255) NOT NULL;"
+          sql "ALTER TABLE $table CHANGE COLUMN description description VARCHAR(255) NULL DEFAULT NULL;"
+     done
+     echo -en "        interfaces           \r"
+     sql "ALTER TABLE interfaces CHANGE COLUMN internal_name internal_name VARCHAR(255) NOT NULL, CHANGE COLUMN external_name external_name VARCHAR(255) NULL DEFAULT NULL;"
+     sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(64) NULL DEFAULT NULL;"
+     echo -en "        vim_tenants          \r"
+     sql "ALTER TABLE vim_tenants CHANGE COLUMN user user VARCHAR(64) NULL DEFAULT NULL, CHANGE COLUMN passwd passwd VARCHAR(64) NULL DEFAULT NULL;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (8, '0.8', '0.4.32', 'Enlarging name at database', '2016-02-01');"
+ }
+ function downgrade_from_8(){
+     # echo "    downgrade database from version 0.8 to version 0.7"
+     echo "      Change back name,description to shorter length at all database"
+     for table in datacenters datacenter_nets flavors images instance_scenarios nets nfvo_tenants scenarios sce_nets sce_vnfs vms vnfs
+     do
+          name_length=50
+          [[ $table == flavors ]] || [[ $table == images ]] || name_length=36 
+          echo -en "        $table               \r"
+          sql "ALTER TABLE $table CHANGE COLUMN name name VARCHAR($name_length) NOT NULL;"
+          sql "ALTER TABLE $table CHANGE COLUMN description description VARCHAR(100) NULL DEFAULT NULL;"
+     done
+     echo -en "        interfaces           \r"
+     sql "ALTER TABLE interfaces CHANGE COLUMN internal_name internal_name VARCHAR(25) NOT NULL, CHANGE COLUMN external_name external_name VARCHAR(25) NULL DEFAULT NULL;"
+     echo -en "        vim_tenants          \r"
+     sql "ALTER TABLE vim_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(36) NULL DEFAULT NULL;"
+     sql "ALTER TABLE vim_tenants CHANGE COLUMN user user VARCHAR(36) NULL DEFAULT NULL, CHANGE COLUMN passwd passwd VARCHAR(50) NULL DEFAULT NULL;"
+     sql "DELETE FROM schema_version WHERE version_int='8';"
+ }
+ function upgrade_to_9(){
+     # echo "    upgrade database from version 0.8 to version 0.9"
+     echo "      Add more status to 'instance_vms'"
+     sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (9, '0.9', '0.4.33', 'Add ACTIVE:NoMgmtIP to instance_vms table', '2016-02-05');"
+ }
+ function downgrade_from_9(){
+     # echo "    downgrade database from version 0.9 to version 0.8"
+     echo "      Add more status to 'instance_vms'"
+     sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
+     sql "DELETE FROM schema_version WHERE version_int='9';"
+ }
+ function upgrade_to_10(){
+     # echo "    upgrade database from version 0.9 to version 0.10"
+     echo "      add tenant to 'vnfs'"
+     sql "ALTER TABLE vnfs ADD COLUMN tenant_id VARCHAR(36) NULL DEFAULT NULL AFTER name, ADD CONSTRAINT FK_vnfs_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid) ON UPDATE CASCADE ON DELETE SET NULL, CHANGE COLUMN public public ENUM('true','false') NOT NULL DEFAULT 'false' AFTER physical, DROP INDEX name, DROP INDEX path, DROP COLUMN path;"
+     sql "ALTER TABLE scenarios DROP FOREIGN KEY FK_scenarios_nfvo_tenants;"
+     sql "ALTER TABLE scenarios CHANGE COLUMN nfvo_tenant_id tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_scenarios_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid);"
+     sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_nfvo_tenants;"
+     sql "ALTER TABLE instance_scenarios CHANGE COLUMN nfvo_tenant_id tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_instance_scenarios_nfvo_tenants FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid);"
+     echo "      rename 'vim_tenants' table to 'datacenter_tenants'"
+     echo "RENAME TABLE vim_tenants TO datacenter_tenants;"
+     for table in tenants_datacenters instance_scenarios instance_vnfs instance_nets
+     do
+         NULL="NOT NULL"
+         [[ $table == instance_vnfs ]] && NULL="NULL DEFAULT NULL"
+         sql "ALTER TABLE ${table} DROP FOREIGN KEY FK_${table}_vim_tenants;"
+         sql "ALTER TABLE ${table} ALTER vim_tenant_id DROP DEFAULT;"
+         sql "ALTER TABLE ${table} CHANGE COLUMN vim_tenant_id datacenter_tenant_id VARCHAR(36)  ${NULL} AFTER datacenter_id, ADD CONSTRAINT FK_${table}_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid); "
+     done    
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (10, '0.10', '0.4.36', 'tenant management of vnfs,scenarios', '2016-03-08');"
+ }
+ function downgrade_from_10(){
+     # echo "    downgrade database from version 0.10 to version 0.9"
+     echo "      remove tenant from 'vnfs'"
+     sql "ALTER TABLE vnfs DROP COLUMN tenant_id, DROP FOREIGN KEY FK_vnfs_nfvo_tenants, ADD UNIQUE INDEX name (name), ADD COLUMN path VARCHAR(100) NULL DEFAULT NULL COMMENT 'Path where the YAML descriptor of the VNF can be found. NULL if it is a physical network function.' AFTER name, ADD UNIQUE INDEX path (path), CHANGE COLUMN public public ENUM('true','false') NOT NULL DEFAULT 'true' AFTER physical;"
+     sql "ALTER TABLE scenarios DROP FOREIGN KEY FK_scenarios_nfvo_tenants;"
+     sql "ALTER TABLE scenarios CHANGE COLUMN tenant_id nfvo_tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_scenarios_nfvo_tenants FOREIGN KEY (nfvo_tenant_id) REFERENCES nfvo_tenants (uuid);"
+     sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_nfvo_tenants;"
+     sql "ALTER TABLE instance_scenarios CHANGE COLUMN tenant_id nfvo_tenant_id VARCHAR(36) NULL DEFAULT NULL after name, ADD CONSTRAINT FK_instance_scenarios_nfvo_tenants FOREIGN KEY (nfvo_tenant_id) REFERENCES nfvo_tenants (uuid);"
+     echo "      rename back 'datacenter_tenants' table to 'vim_tenants'"
+     echo "RENAME TABLE datacenter_tenants TO vim_tenants;"
+     for table in tenants_datacenters instance_scenarios instance_vnfs instance_nets
+     do
+         sql "ALTER TABLE ${table} DROP FOREIGN KEY FK_${table}_datacenter_tenants;"
+         NULL="NOT NULL"
+         [[ $table == instance_vnfs ]] && NULL="NULL DEFAULT NULL"
+         sql "ALTER TABLE ${table} ALTER datacenter_tenant_id DROP DEFAULT;"
+         sql "ALTER TABLE ${table} CHANGE COLUMN datacenter_tenant_id vim_tenant_id VARCHAR(36) $NULL AFTER datacenter_id, ADD CONSTRAINT FK_${table}_vim_tenants FOREIGN KEY (vim_tenant_id) REFERENCES vim_tenants (uuid); "
+     done    
+     sql "DELETE FROM schema_version WHERE version_int='10';"
+ }
+ function upgrade_to_11(){
+     # echo "    upgrade database from version 0.10 to version 0.11"
+     echo "      remove unique name at 'scenarios', 'instance_scenarios'"
+     sql "ALTER TABLE scenarios DROP INDEX name;"
+     sql "ALTER TABLE instance_scenarios DROP INDEX name;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (11, '0.11', '0.4.43', 'remove unique name at scenarios,instance_scenarios', '2016-07-18');"
+ }
+ function downgrade_from_11(){
+     # echo "    downgrade database from version 0.11 to version 0.10"
+     echo "      add unique name at 'scenarios', 'instance_scenarios'"
+     sql "ALTER TABLE scenarios ADD UNIQUE INDEX name (name);"
+     sql "ALTER TABLE instance_scenarios ADD UNIQUE INDEX name (name);"
+     sql "DELETE FROM schema_version WHERE version_int='11';"
+ }
+ function upgrade_to_12(){
+     # echo "    upgrade database from version 0.11 to version 0.12"
+     echo "      create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to 'interfaces' and 'sce_interfaces'"
+     sql "CREATE TABLE IF NOT EXISTS ip_profiles (
+       id INT(11) NOT NULL AUTO_INCREMENT,
+       net_id VARCHAR(36) NULL DEFAULT NULL,
+       sce_net_id VARCHAR(36) NULL DEFAULT NULL,
+       instance_net_id VARCHAR(36) NULL DEFAULT NULL,
+       ip_version ENUM('IPv4','IPv6') NOT NULL DEFAULT 'IPv4',
+       subnet_address VARCHAR(64) NULL DEFAULT NULL,
+       gateway_address VARCHAR(64) NULL DEFAULT NULL,
+       dns_address VARCHAR(64) NULL DEFAULT NULL,
+       dhcp_enabled ENUM('true','false') NOT NULL DEFAULT 'true',
+       dhcp_start_address VARCHAR(64) NULL DEFAULT NULL,
+       dhcp_count INT(11) NULL DEFAULT NULL,
+       PRIMARY KEY (id),
+       CONSTRAINT FK_ipprofiles_nets FOREIGN KEY (net_id) REFERENCES nets (uuid) ON DELETE CASCADE,
+       CONSTRAINT FK_ipprofiles_scenets FOREIGN KEY (sce_net_id) REFERENCES sce_nets (uuid) ON DELETE CASCADE,
+       CONSTRAINT FK_ipprofiles_instancenets FOREIGN KEY (instance_net_id) REFERENCES instance_nets (uuid) ON DELETE CASCADE  )
+         COMMENT='Table containing the IP parameters of a network, either a net, a sce_net or and instance_net.'
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"
+     sql "ALTER TABLE interfaces ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER mac;"
+     sql "ALTER TABLE sce_interfaces ADD COLUMN ip_address VARCHAR(64) NULL DEFAULT NULL AFTER interface_id;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (12, '0.12', '0.4.46', 'create ip_profiles table, with foreign keys to all nets tables, and add ip_address column to interfaces and sce_interfaces', '2016-08-29');"
+ }
+ function downgrade_from_12(){
+     # echo "    downgrade database from version 0.12 to version 0.11"
+     echo "      delete ip_profiles table, and remove ip_address column in 'interfaces' and 'sce_interfaces'"
+     sql "DROP TABLE IF EXISTS ip_profiles;"
+     sql "ALTER TABLE interfaces DROP COLUMN ip_address;"
+     sql "ALTER TABLE sce_interfaces DROP COLUMN ip_address;"
+     sql "DELETE FROM schema_version WHERE version_int='12';"
+ }
+ function upgrade_to_13(){
+     # echo "    upgrade database from version 0.12 to version 0.13"
+     echo "      add cloud_config at 'scenarios', 'instance_scenarios'"
+     sql "ALTER TABLE scenarios ADD COLUMN cloud_config MEDIUMTEXT NULL DEFAULT NULL AFTER descriptor;"
+     sql "ALTER TABLE instance_scenarios ADD COLUMN cloud_config MEDIUMTEXT NULL DEFAULT NULL AFTER modified_at;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (13, '0.13', '0.4.47', 'insert cloud-config at scenarios,instance_scenarios', '2016-08-30');"
+ }
+ function downgrade_from_13(){
+     # echo "    downgrade database from version 0.13 to version 0.12"
+     echo "      remove cloud_config at 'scenarios', 'instance_scenarios'"
+     sql "ALTER TABLE scenarios DROP COLUMN cloud_config;"
+     sql "ALTER TABLE instance_scenarios DROP COLUMN cloud_config;"
+     sql "DELETE FROM schema_version WHERE version_int='13';"
+ }
+ function upgrade_to_14(){
+     # echo "    upgrade database from version 0.13 to version 0.14"
+     echo "      remove unique index vim_net_id, instance_scenario_id at table 'instance_nets'"
+     sql "ALTER TABLE instance_nets DROP INDEX vim_net_id_instance_scenario_id;"
+     sql "ALTER TABLE instance_nets CHANGE COLUMN external created ENUM('true','false') NOT NULL DEFAULT 'false' COMMENT 'Created or already exists at VIM' AFTER multipoint;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (14, '0.14', '0.4.57', 'remove unique index vim_net_id, instance_scenario_id', '2016-09-26');"
+ }
+ function downgrade_from_14(){
+     # echo "    downgrade database from version 0.14 to version 0.13"
+     echo "      remove cloud_config at 'scenarios', 'instance_scenarios'"
+     sql "ALTER TABLE instance_nets ADD UNIQUE INDEX vim_net_id_instance_scenario_id (vim_net_id, instance_scenario_id);"
+     sql "ALTER TABLE instance_nets CHANGE COLUMN created external ENUM('true','false') NOT NULL DEFAULT 'false' COMMENT 'If external, means that it already exists at VIM' AFTER multipoint;"
+     sql "DELETE FROM schema_version WHERE version_int='14';"
+ }
+ function upgrade_to_15(){
+     # echo "    upgrade database from version 0.14 to version 0.15"
+     echo "      add columns 'universal_name' and 'checksum' at table 'images', add unique index universal_name_checksum, and change location to allow NULL; change column 'image_path' in table 'vms' to allow NULL"
+     sql "ALTER TABLE images ADD COLUMN checksum VARCHAR(32) NULL DEFAULT NULL AFTER name;"
+     sql "ALTER TABLE images ALTER location DROP DEFAULT;"
+     sql "ALTER TABLE images ADD COLUMN universal_name VARCHAR(255) NULL AFTER name, CHANGE COLUMN location location VARCHAR(200) NULL AFTER checksum, ADD UNIQUE INDEX universal_name_checksum (universal_name, checksum);"
+     sql "ALTER TABLE vms ALTER image_path DROP DEFAULT;"
+     sql "ALTER TABLE vms CHANGE COLUMN image_path image_path VARCHAR(100) NULL COMMENT 'Path where the image of the VM is located' AFTER image_id;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (15, '0.15', '0.4.59', 'add columns universal_name and checksum at table images, add unique index universal_name_checksum, and change location to allow NULL; change column image_path in table vms to allow NULL', '2016-09-27');"
+ }
+ function downgrade_from_15(){
+     # echo "    downgrade database from version 0.15 to version 0.14"
+     echo "      remove columns 'universal_name' and 'checksum' from table 'images', remove index universal_name_checksum, change location NOT NULL; change column 'image_path' in table 'vms' to NOT NULL"
+     sql "ALTER TABLE images DROP INDEX universal_name_checksum;"
+     sql "ALTER TABLE images ALTER location DROP DEFAULT;"
+     sql "ALTER TABLE images CHANGE COLUMN location location VARCHAR(200) NOT NULL AFTER checksum;"
+     sql "ALTER TABLE images DROP COLUMN universal_name;"
+     sql "ALTER TABLE images DROP COLUMN checksum;"
+     sql "ALTER TABLE vms ALTER image_path DROP DEFAULT;"
+     sql "ALTER TABLE vms CHANGE COLUMN image_path image_path VARCHAR(100) NOT NULL COMMENT 'Path where the image of the VM is located' AFTER image_id;"
+     sql "DELETE FROM schema_version WHERE version_int='15';"
+ }
+ function upgrade_to_16(){
+     # echo "    upgrade database from version 0.15 to version 0.16"
+     echo "      add column 'config' at table 'datacenter_tenants', enlarge 'vim_tenant_name/id'"
+     sql "ALTER TABLE datacenter_tenants ADD COLUMN config VARCHAR(4000) NULL DEFAULT NULL AFTER passwd;"
+     sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(256) NULL DEFAULT NULL AFTER datacenter_id;"
+     sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(256) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (16, '0.16', '0.5.2', 'enlarge vim_tenant_name and id. New config at datacenter_tenants', '2016-10-11');"
+ }
+ function downgrade_from_16(){
+     # echo "    downgrade database from version 0.16 to version 0.15"
+     echo "      remove column 'config' at table 'datacenter_tenants', restoring lenght 'vim_tenant_name/id'"
+     sql "ALTER TABLE datacenter_tenants DROP COLUMN config;"
+     sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_name vim_tenant_name VARCHAR(64) NULL DEFAULT NULL AFTER datacenter_id;"
+     sql "ALTER TABLE datacenter_tenants CHANGE COLUMN vim_tenant_id vim_tenant_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'Tenant ID at VIM' AFTER vim_tenant_name;"
+     sql "DELETE FROM schema_version WHERE version_int='16';"
+ }
+ function upgrade_to_17(){
+     # echo "    upgrade database from version 0.16 to version 0.17"
+     echo "      add column 'extended' at table 'datacenter_flavors'"
+     sql "ALTER TABLE datacenters_flavors ADD extended varchar(2000) NULL COMMENT 'Extra description json format of additional devices';"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (17, '0.17', '0.5.3', 'Extra description json format of additional devices in datacenter_flavors', '2016-12-20');"
+ }
+ function downgrade_from_17(){
+     # echo "    downgrade database from version 0.17 to version 0.16"
+     echo "      remove column 'extended' from table 'datacenter_flavors'"
+     sql "ALTER TABLE datacenters_flavors DROP COLUMN extended;"
+     sql "DELETE FROM schema_version WHERE version_int='17';"
+ }
+ function upgrade_to_18(){
+     # echo "    upgrade database from version 0.17 to version 0.18"
+     echo "      add columns 'floating_ip' and 'port_security' at tables 'interfaces' and 'instance_interfaces'"
+     sql "ALTER TABLE interfaces ADD floating_ip BOOL DEFAULT 0 NOT NULL COMMENT 'Indicates if a floating_ip must be associated to this interface';"
+     sql "ALTER TABLE interfaces ADD port_security BOOL DEFAULT 1 NOT NULL COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled';"
+     sql "ALTER TABLE instance_interfaces ADD floating_ip BOOL DEFAULT 0 NOT NULL COMMENT 'Indicates if a floating_ip must be associated to this interface';"
+     sql "ALTER TABLE instance_interfaces ADD port_security BOOL DEFAULT 1 NOT NULL COMMENT 'Indicates if port security must be enabled or disabled. By default it is enabled';"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (18, '0.18', '0.5.4', 'Add columns \'floating_ip\' and \'port_security\' at tables \'interfaces\' and \'instance_interfaces\'', '2017-01-09');"
+ }
+ function downgrade_from_18(){
+     # echo "    downgrade database from version 0.18 to version 0.17"
+     echo "      remove columns 'floating_ip' and 'port_security' from tables 'interfaces' and 'instance_interfaces'"
+     sql "ALTER TABLE interfaces DROP COLUMN floating_ip;"
+     sql "ALTER TABLE interfaces DROP COLUMN port_security;"
+     sql "ALTER TABLE instance_interfaces DROP COLUMN floating_ip;"
+     sql "ALTER TABLE instance_interfaces DROP COLUMN port_security;"
+     sql "DELETE FROM schema_version WHERE version_int='18';"
+ }
+ function upgrade_to_19(){
+     # echo "    upgrade database from version 0.18 to version 0.19"
+     echo "      add column 'boot_data' at table 'vms'"
+     sql "ALTER TABLE vms ADD COLUMN boot_data TEXT NULL DEFAULT NULL AFTER image_path;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (19, '0.19', '0.5.5', 'Extra Boot-data content at VNFC (vms)', '2017-01-11');"
+ }
+ function downgrade_from_19(){
+     # echo "    downgrade database from version 0.19 to version 0.18"
+     echo "      remove column 'boot_data' from table 'vms'"
+     sql "ALTER TABLE vms DROP COLUMN boot_data;"
+     sql "DELETE FROM schema_version WHERE version_int='19';"
+ }
+ function upgrade_to_20(){
+     # echo "    upgrade database from version 0.19 to version 0.20"
+     echo "      add column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
+     sql "ALTER TABLE instance_nets ADD sdn_net_id varchar(36) DEFAULT NULL NULL COMMENT 'Network id in ovim';"
+     sql "ALTER TABLE instance_interfaces ADD sdn_port_id varchar(36) DEFAULT NULL NULL COMMENT 'Port id in ovim';"
+     sql "ALTER TABLE instance_interfaces ADD compute_node varchar(100) DEFAULT NULL NULL COMMENT 'Compute node id used to specify the SDN port mapping';"
+     sql "ALTER TABLE instance_interfaces ADD pci varchar(12) DEFAULT NULL NULL COMMENT 'PCI of the physical port in the host';"
+     sql "ALTER TABLE instance_interfaces ADD vlan SMALLINT UNSIGNED DEFAULT NULL NULL COMMENT 'VLAN tag used by the port';"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (20, '0.20', '0.5.9', 'Added columns to store dataplane connectivity info', '2017-03-13');"
+ }
+ function downgrade_from_20(){
+     # echo "    downgrade database from version 0.20 to version 0.19"
+     echo "      remove column 'sdn_net_id' at table 'instance_nets' and columns 'sdn_port_id', 'compute_node', 'pci' and 'vlan' to table 'instance_interfaces'"
+     sql "ALTER TABLE instance_nets DROP COLUMN sdn_net_id;"
+     sql "ALTER TABLE instance_interfaces DROP COLUMN vlan;"
+     sql "ALTER TABLE instance_interfaces DROP COLUMN pci;"
+     sql "ALTER TABLE instance_interfaces DROP COLUMN compute_node;"
+     sql "ALTER TABLE instance_interfaces DROP COLUMN sdn_port_id;"
+     sql "DELETE FROM schema_version WHERE version_int='20';"
+ }
+ function upgrade_to_21(){
+     # echo "    upgrade database from version 0.20 to version 0.21"
+     echo "      edit 'instance_nets' to allow instance_scenario_id=None"
+     sql "ALTER TABLE instance_nets MODIFY COLUMN instance_scenario_id varchar(36) NULL;"
+     echo "      enlarge column 'dns_address' at table 'ip_profiles'"
+     sql "ALTER TABLE ip_profiles MODIFY dns_address varchar(255) DEFAULT NULL NULL "\
+          "comment 'dns ip list separated by semicolon';"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (21, '0.21', '0.5.15', 'Edit instance_nets to allow instance_scenario_id=None and enlarge column dns_address at table ip_profiles', '2017-06-02');"
+ }
+ function downgrade_from_21(){
+     # echo "    downgrade database from version 0.21 to version 0.20"
+     echo "      edit 'instance_nets' to disallow instance_scenario_id=None"
+     #Delete all lines with a instance_scenario_id=NULL in order to disable this option
+     sql "DELETE FROM instance_nets WHERE instance_scenario_id IS NULL;"
+     sql "ALTER TABLE instance_nets MODIFY COLUMN instance_scenario_id varchar(36) NOT NULL;"
+     echo "      shorten column 'dns_address' at table 'ip_profiles'"
+     sql "ALTER TABLE ip_profiles MODIFY dns_address varchar(64) DEFAULT NULL NULL;"
+     sql "DELETE FROM schema_version WHERE version_int='21';"
+ }
+ function upgrade_to_22(){
+     # echo "    upgrade database from version 0.21 to version 0.22"
+     echo "      Changed type of ram in 'flavors' from SMALLINT to MEDIUMINT"
+     sql "ALTER TABLE flavors CHANGE COLUMN ram ram MEDIUMINT(7) UNSIGNED NULL DEFAULT NULL AFTER disk;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (22, '0.22', '0.5.16', 'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-06-02');"
+ }
+ function downgrade_from_22(){
+     # echo "    downgrade database from version 0.22 to version 0.21"
+     echo "      Changed type of ram in 'flavors' from MEDIUMINT to SMALLINT"
+     sql "ALTER TABLE flavors CHANGE COLUMN ram ram SMALLINT(5) UNSIGNED NULL DEFAULT NULL AFTER disk;"
+     sql "DELETE FROM schema_version WHERE version_int='22';"
+ }
+ function upgrade_to_23(){
+     # echo "    upgrade database from version 0.22 to version 0.23"
+     echo "      add column 'availability_zone' at table 'vms'"
+     sql "ALTER TABLE vms ADD COLUMN availability_zone VARCHAR(255) NULL AFTER modified_at;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES (23, '0.23', '0.5.20',"\
+         "'Changed type of ram in flavors from SMALLINT to MEDIUMINT', '2017-08-29');"
+ }
+ function downgrade_from_23(){
+     # echo "    downgrade database from version 0.23 to version 0.22"
+     echo "      remove column 'availability_zone' from table 'vms'"
+     sql "ALTER TABLE vms DROP COLUMN availability_zone;"
+     sql "DELETE FROM schema_version WHERE version_int='23';"
+ }
+ function upgrade_to_24(){
+     # echo "    upgrade database from version 0.23 to version 0.24"
+     echo "      Add 'count' to table 'vms'"
+     sql "ALTER TABLE vms ADD COLUMN count SMALLINT NOT NULL DEFAULT '1' AFTER vnf_id;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+          "VALUES (24, '0.24', '0.5.21', 'Added vnfd fields', '2017-08-29');"
+ }
+ function downgrade_from_24(){
+     # echo "    downgrade database from version 0.24 to version 0.23"
+     echo "      Remove 'count' from table 'vms'"
+     sql "ALTER TABLE vms DROP COLUMN count;"
+     sql "DELETE FROM schema_version WHERE version_int='24';"
+ }
+ function upgrade_to_25(){
+     # echo "    upgrade database from version 0.24 to version 0.25"
+     echo "      Add 'osm_id','short_name','vendor' to tables 'vnfs', 'scenarios'"
+     for table in vnfs scenarios; do
+         sql "ALTER TABLE $table ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid, "\
+              "ADD UNIQUE INDEX osm_id_tenant_id (osm_id, tenant_id), "\
+              "ADD COLUMN short_name VARCHAR(255) NULL AFTER name, "\
+              "ADD COLUMN vendor VARCHAR(255) NULL AFTER description;"
+     done
+     sql "ALTER TABLE vnfs ADD COLUMN mgmt_access VARCHAR(2000) NULL AFTER vendor;"
+     sql "ALTER TABLE vms ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+     sql "ALTER TABLE sce_vnfs ADD COLUMN member_vnf_index SMALLINT(6) NULL DEFAULT NULL AFTER uuid;"
+     echo "      Add 'security_group' to table 'ip_profiles'"
+     sql "ALTER TABLE ip_profiles ADD COLUMN security_group VARCHAR(255) NULL DEFAULT NULL AFTER dhcp_count;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+          "VALUES (25, '0.25', '0.5.22', 'Added osm_id to vnfs,scenarios', '2017-09-01');"
+ }
+ function downgrade_from_25(){
+     # echo "    downgrade database from version 0.25 to version 0.24"
+     echo "      Remove 'osm_id','short_name','vendor' from tables 'vnfs', 'scenarios'"
+     for table in vnfs scenarios; do
+         sql "ALTER TABLE $table DROP INDEX  osm_id_tenant_id, DROP COLUMN osm_id, "\
+              "DROP COLUMN short_name, DROP COLUMN vendor;"
+     done
+     sql "ALTER TABLE vnfs DROP COLUMN mgmt_access;"
+     sql "ALTER TABLE vms DROP COLUMN osm_id;"
+     sql "ALTER TABLE sce_vnfs DROP COLUMN member_vnf_index;"
+     echo "      Remove 'security_group' from table 'ip_profiles'"
+     sql "ALTER TABLE ip_profiles DROP COLUMN security_group;"
+     sql "DELETE FROM schema_version WHERE version_int='25';"
+ }
+ function upgrade_to_26(){
+     echo "      Add name to table datacenter_tenants"
+     sql "ALTER TABLE datacenter_tenants ADD COLUMN name VARCHAR(255) NULL AFTER uuid;"
+     sql "UPDATE datacenter_tenants as dt join datacenters as d on dt.datacenter_id = d.uuid set dt.name=d.name;"
+     echo "      Add 'SCHEDULED' to 'status' at tables 'instance_nets', 'instance_vms'"
+     sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD',"\
+          "'ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') "\
+          "NOT NULL DEFAULT 'BUILD';"
+     sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','INACTIVE','DOWN','BUILD','ERROR',"\
+          "'VIM_ERROR','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD';"
+     echo "      Enlarge pci at instance_interfaces to allow extended pci for SDN por mapping"
+     sql "ALTER TABLE instance_interfaces CHANGE COLUMN pci pci VARCHAR(50) NULL DEFAULT NULL COMMENT 'PCI of the "\
+         "physical port in the host' AFTER compute_node;"
+     for t in flavor image; do
+         echo "      Change 'datacenters_${t}s' to point to datacenter_tenant, add status, vim_info"
+         sql "ALTER TABLE datacenters_${t}s ADD COLUMN datacenter_vim_id VARCHAR(36) NULL DEFAULT NULL AFTER "\
+             "datacenter_id, ADD COLUMN status ENUM('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','DELETED',"\
+             "'SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD' AFTER vim_id, ADD COLUMN vim_info "\
+             "TEXT NULL AFTER status;"
+         sql "UPDATE datacenters_${t}s as df left join datacenter_tenants as dt on dt.datacenter_id=df.datacenter_id "\
+             "set df.datacenter_vim_id=dt.uuid;"
+         sql "DELETE FROM datacenters_${t}s WHERE datacenter_vim_id is NULL;"
+         sql "ALTER TABLE datacenters_${t}s CHANGE COLUMN datacenter_vim_id datacenter_vim_id VARCHAR(36) NOT NULL;"
+         sql "ALTER TABLE datacenters_${t}s ADD CONSTRAINT FK_datacenters_${t}s_datacenter_tenants FOREIGN KEY "\
+             "(datacenter_vim_id) REFERENCES datacenter_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE;"
+         sql "ALTER TABLE datacenters_${t}s DROP FOREIGN KEY FK__datacenters_${t:0:1};"
+         sql "ALTER TABLE datacenters_${t}s DROP COLUMN datacenter_id;"
+       done
+     echo "      Decoupling 'instance_interfaces' from scenarios/vnfs to allow scale actions"
+     sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(128) NULL DEFAULT NULL;"
+     sql "ALTER TABLE instance_interfaces CHANGE COLUMN interface_id interface_id VARCHAR(36) NULL DEFAULT NULL;"
+       sql "ALTER TABLE instance_interfaces DROP FOREIGN KEY FK_instance_ids"
+       sql "ALTER TABLE instance_interfaces ADD CONSTRAINT FK_instance_ids FOREIGN KEY (interface_id) "\
+           "REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+     echo "      Decoupling 'instance_vms' from scenarios/vnfs to allow scale actions"
+     sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(128) NULL DEFAULT NULL;"
+     sql "ALTER TABLE instance_vms CHANGE COLUMN vm_id vm_id VARCHAR(36) NULL DEFAULT NULL;"
+       sql "ALTER TABLE instance_vms DROP FOREIGN KEY FK_instance_vms_vms;"
+       sql "ALTER TABLE instance_vms ADD CONSTRAINT FK_instance_vms_vms FOREIGN KEY (vm_id) "\
+           "REFERENCES vms (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+     echo "      Decoupling 'instance_nets' from scenarios/vnfs to allow scale actions"
+     sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(128) NULL DEFAULT NULL;"
+     echo "      Decoupling 'instance_scenarios' from scenarios"
+     sql "ALTER TABLE instance_scenarios CHANGE COLUMN scenario_id scenario_id VARCHAR(36) NULL DEFAULT NULL;"
+       sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_scenarios;"
+       sql "ALTER TABLE instance_scenarios ADD CONSTRAINT FK_instance_scenarios_scenarios FOREIGN KEY (scenario_id) "\
+           "REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE SET NULL;"
+     echo "      Create table instance_actions, vim_actions"
+     sql "CREATE TABLE IF NOT EXISTS instance_actions (
+           uuid VARCHAR(36) NOT NULL,
+           tenant_id VARCHAR(36) NULL DEFAULT NULL,
+           instance_id VARCHAR(36) NULL DEFAULT NULL,
+           description VARCHAR(64) NULL DEFAULT NULL COMMENT 'CREATE, DELETE, SCALE OUT/IN, ...',
+           number_tasks SMALLINT(6) NOT NULL DEFAULT '1',
+           number_done SMALLINT(6) NOT NULL DEFAULT '0',
+           number_failed SMALLINT(6) NOT NULL DEFAULT '0',
+           created_at DOUBLE NOT NULL,
+           modified_at DOUBLE NULL DEFAULT NULL,
+           PRIMARY KEY (uuid),
+         INDEX FK_actions_tenants (tenant_id),
+       CONSTRAINT FK_actions_tenant FOREIGN KEY (tenant_id) REFERENCES nfvo_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+               COMMENT='Contains client actions over instances'
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"  
+     sql "CREATE TABLE IF NOT EXISTS vim_actions (
+           instance_action_id VARCHAR(36) NOT NULL,
+           task_index INT(6) NOT NULL,
+           datacenter_vim_id VARCHAR(36) NOT NULL,
+           vim_id VARCHAR(64) NULL DEFAULT NULL,
+           action VARCHAR(36) NOT NULL COMMENT 'CREATE,DELETE,START,STOP...',
+           item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored',
+           item_id VARCHAR(36) NULL DEFAULT NULL COMMENT 'uuid of the entry in the table',
+           status ENUM('SCHEDULED', 'BUILD', 'DONE', 'FAILED', 'SUPERSEDED') NOT NULL DEFAULT 'SCHEDULED',
+           extra TEXT NULL DEFAULT NULL COMMENT 'json with params:, depends_on: for the task',
+           error_msg VARCHAR(1024) NULL DEFAULT NULL,
+           created_at DOUBLE NOT NULL,
+           modified_at DOUBLE NULL DEFAULT NULL,
+           PRIMARY KEY (task_index, instance_action_id),
+         INDEX FK_actions_instance_actions (instance_action_id),
+       CONSTRAINT FK_actions_instance_actions FOREIGN KEY (instance_action_id) REFERENCES instance_actions (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+         INDEX FK_actions_vims (datacenter_vim_id),
+       CONSTRAINT FK_actions_vims FOREIGN KEY (datacenter_vim_id) REFERENCES datacenter_tenants (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+         COMMENT='Table with the individual VIM actions.'
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"  
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+          "VALUES (26, '0.26', '0.5.23', 'Several changes', '2017-09-09');"
+ }
+ function downgrade_from_26(){
+     echo "      Remove name from table datacenter_tenants"
+     sql "ALTER TABLE datacenter_tenants DROP COLUMN name;"
+     echo "      Remove 'SCHEDULED' from the 'status' at tables 'instance_nets', 'instance_vms'"
+     sql "ALTER TABLE instance_vms CHANGE COLUMN status status ENUM('ACTIVE:NoMgmtIP','ACTIVE','INACTIVE','BUILD',"\
+          "'ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED') NOT NULL DEFAULT 'BUILD';"
+     sql "ALTER TABLE instance_nets CHANGE COLUMN status status ENUM('ACTIVE','DOWN','BUILD','ERROR','VIM_ERROR',"\
+          "'INACTIVE','DELETED') NOT NULL DEFAULT 'BUILD';"
+     echo "      Shorten back pci at instance_interfaces to allow extended pci for SDN por mapping"
+     sql "ALTER TABLE instance_interfaces CHANGE COLUMN pci pci VARCHAR(12) NULL DEFAULT NULL COMMENT 'PCI of the "\
+         "physical port in the host' AFTER compute_node;"
+     for t in flavor image; do
+         echo "      Restore back 'datacenters_${t}s'"
+         sql "ALTER TABLE datacenters_${t}s ADD COLUMN datacenter_id VARCHAR(36) NULL DEFAULT NULL AFTER "\
+             "${t}_id, DROP COLUMN status, DROP COLUMN vim_info ;"
+         sql "UPDATE datacenters_${t}s as df left join datacenter_tenants as dt on dt.uuid=df.datacenter_vim_id set "\
+             "df.datacenter_id=dt.datacenter_id;"
+         sql "ALTER TABLE datacenters_${t}s CHANGE COLUMN datacenter_id datacenter_id VARCHAR(36) NOT NULL;"
+         sql "ALTER TABLE datacenters_${t}s ADD CONSTRAINT FK__datacenters_${t:0:1} FOREIGN KEY "\
+             "(datacenter_id) REFERENCES datacenters (uuid), DROP FOREIGN KEY FK_datacenters_${t}s_datacenter_tenants, "\
+             "DROP COLUMN datacenter_vim_id;"
+     done
+     echo "      Restore back 'instance_interfaces' coupling to scenarios/vnfs"
+     sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(36) NULL DEFAULT NULL;"
+       sql "ALTER TABLE instance_interfaces DROP FOREIGN KEY FK_instance_ids"
+     sql "ALTER TABLE instance_interfaces CHANGE COLUMN interface_id interface_id VARCHAR(36) NOT NULL;"
+       sql "ALTER TABLE instance_interfaces ADD CONSTRAINT FK_instance_ids FOREIGN KEY (interface_id) "\
+           "REFERENCES interfaces (uuid);"
+     echo "      Restore back 'instance_vms' coupling to scenarios/vnfs"
+     echo "      Decoupling 'instance vms' from scenarios/vnfs to allow scale actions"
+     sql "UPDATE instance_vms SET vim_vm_id='' WHERE vim_vm_id is NULL;"
+     sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(36) NOT NULL;"
+       sql "ALTER TABLE instance_vms DROP FOREIGN KEY FK_instance_vms_vms;"
+     sql "ALTER TABLE instance_vms CHANGE COLUMN vm_id vm_id VARCHAR(36) NOT NULL;"
+       sql "ALTER TABLE instance_vms ADD CONSTRAINT FK_instance_vms_vms FOREIGN KEY (vm_id) "\
+           "REFERENCES vms (uuid);"
+     echo "      Restore back 'instance_nets' coupling to scenarios/vnfs"
+     sql "UPDATE instance_nets SET vim_net_id='' WHERE vim_net_id is NULL;"
+     sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(36) NOT NULL;"
+     echo "      Restore back  'instance_scenarios' coupling to scenarios"
+       sql "ALTER TABLE instance_scenarios DROP FOREIGN KEY FK_instance_scenarios_scenarios;"
+     sql "ALTER TABLE instance_scenarios CHANGE COLUMN scenario_id scenario_id VARCHAR(36) NOT NULL;"
+       sql "ALTER TABLE instance_scenarios ADD CONSTRAINT FK_instance_scenarios_scenarios FOREIGN KEY (scenario_id) "\
+           "REFERENCES scenarios (uuid);"
+     echo "      Delete table instance_actions"
+     sql "DROP TABLE IF EXISTS vim_actions"
+     sql "DROP TABLE IF EXISTS instance_actions"
+     sql "DELETE FROM schema_version WHERE version_int='26';"
+ }
+ function upgrade_to_27(){
+     echo "      Added 'encrypted_RO_priv_key','RO_pub_key' to table 'nfvo_tenants'"
+     sql "ALTER TABLE nfvo_tenants ADD COLUMN encrypted_RO_priv_key VARCHAR(2000) NULL AFTER description;"
+     sql "ALTER TABLE nfvo_tenants ADD COLUMN RO_pub_key VARCHAR(510) NULL AFTER encrypted_RO_priv_key;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+          "VALUES (27, '0.27', '0.5.25', 'Added encrypted_RO_priv_key,RO_pub_key to table nfvo_tenants', '2017-09-29');"
+ }
+ function downgrade_from_27(){
+     echo "      Remove 'encrypted_RO_priv_key','RO_pub_key' from table 'nfvo_tenants'"
+     sql "ALTER TABLE nfvo_tenants DROP COLUMN encrypted_RO_priv_key;"
+     sql "ALTER TABLE nfvo_tenants DROP COLUMN RO_pub_key;"
+     sql "DELETE FROM schema_version WHERE version_int='27';"
+ }
+ function upgrade_to_28(){
+     echo "      [Adding necessary tables for VNFFG]"
+     echo "      Adding sce_vnffgs"
+     sql "CREATE TABLE IF NOT EXISTS sce_vnffgs (
+             uuid VARCHAR(36) NOT NULL,
+             tenant_id VARCHAR(36) NULL DEFAULT NULL,
+             name VARCHAR(255) NOT NULL,
+             description VARCHAR(255) NULL DEFAULT NULL,
+             vendor VARCHAR(255) NULL DEFAULT NULL,
+             scenario_id VARCHAR(36) NOT NULL,
+             created_at DOUBLE NOT NULL,
+             modified_at DOUBLE NULL DEFAULT NULL,
+         PRIMARY KEY (uuid),
+         INDEX FK_scenarios_sce_vnffg (scenario_id),
+         CONSTRAINT FK_scenarios_vnffg FOREIGN KEY (tenant_id) REFERENCES scenarios (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"
+     echo "      Adding sce_rsps"
+     sql "CREATE TABLE IF NOT EXISTS sce_rsps (
+             uuid VARCHAR(36) NOT NULL,
+             tenant_id VARCHAR(36) NULL DEFAULT NULL,
+             name VARCHAR(255) NOT NULL,
+             sce_vnffg_id VARCHAR(36) NOT NULL,
+             created_at DOUBLE NOT NULL,
+             modified_at DOUBLE NULL DEFAULT NULL,
+         PRIMARY KEY (uuid),
+         INDEX FK_sce_vnffgs_rsp (sce_vnffg_id),
+         CONSTRAINT FK_sce_vnffgs_rsp FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"
+     echo "      Adding sce_rsp_hops"
+     sql "CREATE TABLE IF NOT EXISTS sce_rsp_hops (
+             uuid VARCHAR(36) NOT NULL,
+             if_order INT DEFAULT 0 NOT NULL,
+             interface_id VARCHAR(36) NOT NULL,
+             sce_vnf_id VARCHAR(36) NOT NULL,
+             sce_rsp_id VARCHAR(36) NOT NULL,
+             created_at DOUBLE NOT NULL,
+             modified_at DOUBLE NULL DEFAULT NULL,
+         PRIMARY KEY (uuid),
+         INDEX FK_interfaces_rsp_hop (interface_id),
+         INDEX FK_sce_vnfs_rsp_hop (sce_vnf_id),
+         INDEX FK_sce_rsps_rsp_hop (sce_rsp_id),
+         CONSTRAINT FK_interfaces_rsp_hop FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+         CONSTRAINT FK_sce_vnfs_rsp_hop FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+         CONSTRAINT FK_sce_rsps_rsp_hop FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"
+     echo "      Adding sce_classifiers"
+     sql "CREATE TABLE IF NOT EXISTS sce_classifiers (
+             uuid VARCHAR(36) NOT NULL,
+             tenant_id VARCHAR(36) NULL DEFAULT NULL,
+             name VARCHAR(255) NOT NULL,
+             sce_vnffg_id VARCHAR(36) NOT NULL,
+             sce_rsp_id VARCHAR(36) NOT NULL,
+             sce_vnf_id VARCHAR(36) NOT NULL,
+             interface_id VARCHAR(36) NOT NULL,
+             created_at DOUBLE NOT NULL,
+             modified_at DOUBLE NULL DEFAULT NULL,
+         PRIMARY KEY (uuid),
+         INDEX FK_sce_vnffgs_classifier (sce_vnffg_id),
+         INDEX FK_sce_rsps_classifier (sce_rsp_id),
+         INDEX FK_sce_vnfs_classifier (sce_vnf_id),
+         INDEX FK_interfaces_classifier (interface_id),
+         CONSTRAINT FK_sce_vnffgs_classifier FOREIGN KEY (sce_vnffg_id) REFERENCES sce_vnffgs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+         CONSTRAINT FK_sce_rsps_classifier FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+         CONSTRAINT FK_sce_vnfs_classifier FOREIGN KEY (sce_vnf_id) REFERENCES sce_vnfs (uuid) ON UPDATE CASCADE ON DELETE CASCADE,
+         CONSTRAINT FK_interfaces_classifier FOREIGN KEY (interface_id) REFERENCES interfaces (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"
+     echo "      Adding sce_classifier_matches"
+     sql "CREATE TABLE IF NOT EXISTS sce_classifier_matches (
+             uuid VARCHAR(36) NOT NULL,
+             ip_proto VARCHAR(2) NOT NULL,
+             source_ip VARCHAR(16) NOT NULL,
+             destination_ip VARCHAR(16) NOT NULL,
+             source_port VARCHAR(5) NOT NULL,
+             destination_port VARCHAR(5) NOT NULL,
+             sce_classifier_id VARCHAR(36) NOT NULL,
+             created_at DOUBLE NOT NULL,
+             modified_at DOUBLE NULL DEFAULT NULL,
+         PRIMARY KEY (uuid),
+         INDEX FK_classifiers_classifier_match (sce_classifier_id),
+         CONSTRAINT FK_sce_classifiers_classifier_match FOREIGN KEY (sce_classifier_id) REFERENCES sce_classifiers (uuid) ON UPDATE CASCADE ON DELETE CASCADE)
+         COLLATE='utf8_general_ci'
+         ENGINE=InnoDB;"
+     echo "      [Adding necessary tables for VNFFG-SFC instance mapping]"
+     echo "      Adding instance_sfis"
+     sql "CREATE TABLE IF NOT EXISTS instance_sfis (
+           uuid varchar(36) NOT NULL,
+           instance_scenario_id varchar(36) NOT NULL,
+           vim_sfi_id varchar(36) DEFAULT NULL,
+           sce_rsp_hop_id varchar(36) DEFAULT NULL,
+           datacenter_id varchar(36) DEFAULT NULL,
+           datacenter_tenant_id varchar(36) DEFAULT NULL,
+           status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+           error_msg varchar(1024) DEFAULT NULL,
+           vim_info text,
+           created_at double NOT NULL,
+           modified_at double DEFAULT NULL,
+           PRIMARY KEY (uuid),
+       KEY FK_instance_sfis_instance_scenarios (instance_scenario_id),
+       KEY FK_instance_sfis_sce_rsp_hops (sce_rsp_hop_id),
+       KEY FK_instance_sfis_datacenters (datacenter_id),
+       KEY FK_instance_sfis_datacenter_tenants (datacenter_tenant_id),
+       CONSTRAINT FK_instance_sfis_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+       CONSTRAINT FK_instance_sfis_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+       CONSTRAINT FK_instance_sfis_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+       CONSTRAINT FK_instance_sfis_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+       COLLATE='utf8_general_ci'
+       ENGINE=InnoDB;"
+     echo "      Adding instance_sfs"
+     sql "CREATE TABLE IF NOT EXISTS instance_sfs (
+           uuid varchar(36) NOT NULL,
+           instance_scenario_id varchar(36) NOT NULL,
+           vim_sf_id varchar(36) DEFAULT NULL,
+           sce_rsp_hop_id varchar(36) DEFAULT NULL,
+           datacenter_id varchar(36) DEFAULT NULL,
+           datacenter_tenant_id varchar(36) DEFAULT NULL,
+           status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+           error_msg varchar(1024) DEFAULT NULL,
+           vim_info text,
+           created_at double NOT NULL,
+           modified_at double DEFAULT NULL,
+       PRIMARY KEY (uuid),
+       KEY FK_instance_sfs_instance_scenarios (instance_scenario_id),
+       KEY FK_instance_sfs_sce_rsp_hops (sce_rsp_hop_id),
+       KEY FK_instance_sfs_datacenters (datacenter_id),
+       KEY FK_instance_sfs_datacenter_tenants (datacenter_tenant_id),
+       CONSTRAINT FK_instance_sfs_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+       CONSTRAINT FK_instance_sfs_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+       CONSTRAINT FK_instance_sfs_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+       CONSTRAINT FK_instance_sfs_sce_rsp_hops FOREIGN KEY (sce_rsp_hop_id) REFERENCES sce_rsp_hops (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+       COLLATE='utf8_general_ci'
+       ENGINE=InnoDB;"
+     echo "      Adding instance_classifications"
+     sql "CREATE TABLE IF NOT EXISTS instance_classifications (
+           uuid varchar(36) NOT NULL,
+           instance_scenario_id varchar(36) NOT NULL,
+           vim_classification_id varchar(36) DEFAULT NULL,
+           sce_classifier_match_id varchar(36) DEFAULT NULL,
+           datacenter_id varchar(36) DEFAULT NULL,
+           datacenter_tenant_id varchar(36) DEFAULT NULL,
+           status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+           error_msg varchar(1024) DEFAULT NULL,
+           vim_info text,
+           created_at double NOT NULL,
+           modified_at double DEFAULT NULL,
+       PRIMARY KEY (uuid),
+       KEY FK_instance_classifications_instance_scenarios (instance_scenario_id),
+       KEY FK_instance_classifications_sce_classifier_matches (sce_classifier_match_id),
+       KEY FK_instance_classifications_datacenters (datacenter_id),
+       KEY FK_instance_classifications_datacenter_tenants (datacenter_tenant_id),
+       CONSTRAINT FK_instance_classifications_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+       CONSTRAINT FK_instance_classifications_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+       CONSTRAINT FK_instance_classifications_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+       CONSTRAINT FK_instance_classifications_sce_classifier_matches FOREIGN KEY (sce_classifier_match_id) REFERENCES sce_classifier_matches (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+       COLLATE='utf8_general_ci'
+       ENGINE=InnoDB;"
+     echo "      Adding instance_sfps"
+     sql "CREATE TABLE IF NOT EXISTS instance_sfps (
+           uuid varchar(36) NOT NULL,
+           instance_scenario_id varchar(36) NOT NULL,
+           vim_sfp_id varchar(36) DEFAULT NULL,
+           sce_rsp_id varchar(36) DEFAULT NULL,
+           datacenter_id varchar(36) DEFAULT NULL,
+           datacenter_tenant_id varchar(36) DEFAULT NULL,
+           status enum('ACTIVE','INACTIVE','BUILD','ERROR','VIM_ERROR','PAUSED','SUSPENDED','DELETED','SCHEDULED_CREATION','SCHEDULED_DELETION') NOT NULL DEFAULT 'BUILD',
+           error_msg varchar(1024) DEFAULT NULL,
+           vim_info text,
+           created_at double NOT NULL,
+           modified_at double DEFAULT NULL,
+       PRIMARY KEY (uuid),
+       KEY FK_instance_sfps_instance_scenarios (instance_scenario_id),
+       KEY FK_instance_sfps_sce_rsps (sce_rsp_id),
+       KEY FK_instance_sfps_datacenters (datacenter_id),
+       KEY FK_instance_sfps_datacenter_tenants (datacenter_tenant_id),
+       CONSTRAINT FK_instance_sfps_datacenter_tenants FOREIGN KEY (datacenter_tenant_id) REFERENCES datacenter_tenants (uuid),
+       CONSTRAINT FK_instance_sfps_datacenters FOREIGN KEY (datacenter_id) REFERENCES datacenters (uuid),
+       CONSTRAINT FK_instance_sfps_instance_scenarios FOREIGN KEY (instance_scenario_id) REFERENCES instance_scenarios (uuid) ON DELETE CASCADE ON UPDATE CASCADE,
+       CONSTRAINT FK_instance_sfps_sce_rsps FOREIGN KEY (sce_rsp_id) REFERENCES sce_rsps (uuid) ON DELETE SET NULL ON UPDATE CASCADE)
+       COLLATE='utf8_general_ci'
+       ENGINE=InnoDB;"
+     echo "      [Altering vim_actions table]"
+     sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces','instance_sfis','instance_sfs','instance_classifications','instance_sfps') NOT NULL COMMENT 'table where the item is stored'"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+          "VALUES (28, '0.28', '0.5.28', 'Adding VNFFG-related tables', '2017-11-20');"
+ }
+ function downgrade_from_28(){
+     echo "      [Undo adding the VNFFG tables]"
+     echo "      Dropping instance_sfps"
+     sql "DROP TABLE IF EXISTS instance_sfps;"
+     echo "      Dropping sce_classifications"
+     sql "DROP TABLE IF EXISTS instance_classifications;"
+     echo "      Dropping instance_sfs"
+     sql "DROP TABLE IF EXISTS instance_sfs;"
+     echo "      Dropping instance_sfis"
+     sql "DROP TABLE IF EXISTS instance_sfis;"
+     echo "      Dropping sce_classifier_matches"
+     echo "      [Undo adding the VNFFG-SFC instance mapping tables]"
+     sql "DROP TABLE IF EXISTS sce_classifier_matches;"
+     echo "      Dropping sce_classifiers"
+     sql "DROP TABLE IF EXISTS sce_classifiers;"
+     echo "      Dropping sce_rsp_hops"
+     sql "DROP TABLE IF EXISTS sce_rsp_hops;"
+     echo "      Dropping sce_rsps"
+     sql "DROP TABLE IF EXISTS sce_rsps;"
+     echo "      Dropping sce_vnffgs"
+     sql "DROP TABLE IF EXISTS sce_vnffgs;"
+     echo "      [Altering vim_actions table]"
+     sql "ALTER TABLE vim_actions MODIFY COLUMN item ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces') NOT NULL COMMENT 'table where the item is stored'"
+     sql "DELETE FROM schema_version WHERE version_int='28';"
+ }
+ function upgrade_to_29(){
+     echo "      Change 'member_vnf_index' from int to str at 'sce_vnfs'"
+     sql "ALTER TABLE sce_vnfs CHANGE COLUMN member_vnf_index member_vnf_index VARCHAR(255) NULL DEFAULT NULL AFTER uuid;"
+     echo "      Add osm_id to 'nets's and 'sce_nets'"
+     sql "ALTER TABLE nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+     sql "ALTER TABLE sce_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+          "VALUES (29, '0.29', '0.5.59', 'Change member_vnf_index to str accordingly to the model', '2018-04-11');"
+ }
+ function downgrade_from_29(){
+     echo "      Change back 'member_vnf_index' from str to int at 'sce_vnfs'"
+     sql "ALTER TABLE sce_vnfs CHANGE COLUMN member_vnf_index member_vnf_index SMALLINT NULL DEFAULT NULL AFTER uuid;"
+     echo "      Remove osm_id from 'nets's and 'sce_nets'"
+     sql "ALTER TABLE nets DROP COLUMN osm_id;"
+     sql "ALTER TABLE sce_nets DROP COLUMN osm_id;"
+     sql "DELETE FROM schema_version WHERE version_int='29';"
+ }
+ function upgrade_to_30(){
+     echo "      Add 'image_list' at 'vms' to allocate alternative images"
+     sql "ALTER TABLE vms ADD COLUMN image_list TEXT NULL COMMENT 'Alternative images' AFTER image_id;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+          "VALUES (30, '0.30', '0.5.60', 'Add image_list to vms', '2018-04-24');"
+ }
+ function downgrade_from_30(){
+     echo "      Remove back 'image_list' from 'vms' to allocate alternative images"
+     sql "ALTER TABLE vms DROP COLUMN image_list;"
+     sql "DELETE FROM schema_version WHERE version_int='30';"
+ }
+ function upgrade_to_31(){
+     echo "      Add 'vim_network_name' at 'sce_nets'"
+     sql "ALTER TABLE sce_nets ADD COLUMN vim_network_name VARCHAR(255) NULL DEFAULT NULL AFTER description;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+          "VALUES (31, '0.31', '0.5.61', 'Add vim_network_name to sce_nets', '2018-05-03');"
+ }
+ function downgrade_from_31(){
+     echo "      Remove back 'vim_network_name' from 'sce_nets'"
+     sql "ALTER TABLE sce_nets DROP COLUMN vim_network_name;"
+     sql "DELETE FROM schema_version WHERE version_int='31';"
+ }
+ function upgrade_to_32(){
+     echo "      Add 'vim_name' to 'instance_vms'"
+     sql "ALTER TABLE instance_vms ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_vm_id;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+          "VALUES (32, '0.32', '0.5.70', 'Add vim_name to instance vms', '2018-06-28');"
+ }
+ function downgrade_from_32(){
+     echo "      Remove back 'vim_name' from 'instance_vms'"
+     sql "ALTER TABLE instance_vms DROP COLUMN vim_name;"
+     sql "DELETE FROM schema_version WHERE version_int='32';"
+ }
+ function upgrade_to_33(){
+     echo "      Add PDU information to 'vms'"
+     sql "ALTER TABLE vms ADD COLUMN pdu_type VARCHAR(255) NULL DEFAULT NULL AFTER osm_id;"
+     sql "ALTER TABLE instance_nets ADD COLUMN vim_name VARCHAR(255) NULL DEFAULT NULL AFTER vim_net_id;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+          "VALUES (33, '0.33', '0.5.82', 'Add pdu information to vms', '2018-11-13');"
+ }
+ function downgrade_from_33(){
+     echo "      Remove back PDU information from 'vms'"
+     sql "ALTER TABLE vms DROP COLUMN pdu_type;"
+     sql "ALTER TABLE instance_nets DROP COLUMN vim_name;"
+     sql "DELETE FROM schema_version WHERE version_int='33';"
+ }
+ function upgrade_to_X(){
+     echo "      change 'datacenter_nets'"
+     sql "ALTER TABLE datacenter_nets ADD COLUMN vim_tenant_id VARCHAR(36) NOT NULL AFTER datacenter_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id, vim_tenant_id);"
+ }
+ function downgrade_from_X(){
+     echo "      Change back 'datacenter_nets'"
+     sql "ALTER TABLE datacenter_nets DROP COLUMN vim_tenant_id, DROP INDEX name_datacenter_id, ADD UNIQUE INDEX name_datacenter_id (name, datacenter_id);"
+ }
+ function upgrade_to_34() {
+     echo "      Create databases required for WIM features"
+     script="$(find "${DBUTILS}/migrations/up" -iname "34*.sql" | tail -1)"
+     sql "source ${script}"
+ }
+ function downgrade_from_34() {
+     echo "      Drop databases required for WIM features"
+     script="$(find "${DBUTILS}/migrations/down" -iname "34*.sql" | tail -1)"
+     sql "source ${script}"
+ }
+ function upgrade_to_35(){
+     echo "      Create databases required for WIM features"
+     script="$(find "${DBUTILS}/migrations/up" -iname "35*.sql" | tail -1)"
+     sql "source ${script}"
+ }
+ function downgrade_from_35(){
+     echo "      Drop databases required for WIM features"
+     script="$(find "${DBUTILS}/migrations/down" -iname "35*.sql" | tail -1)"
+     sql "source ${script}"
+ }
+ function upgrade_to_36(){
+     echo "      Allow null for image_id at 'vms'"
+     sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
+     sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NULL COMMENT 'Link to image table' AFTER " \
+         "flavor_id;"
+     echo "      Enlarge config at 'wims' and 'wim_accounts'"
+     sql "ALTER TABLE wims CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER wim_url;"
+     sql "ALTER TABLE wim_accounts CHANGE COLUMN config config TEXT NULL DEFAULT NULL AFTER password;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) "\
+          "VALUES (36, '0.36', '0.6.03', 'Allow vm without image_id for PDUs', '2018-12-19');"
+ }
+ function downgrade_from_36(){
+     echo "      Force back not null for image_id at 'vms'"
+     sql "ALTER TABLE vms ALTER image_id DROP DEFAULT;"
+     sql "ALTER TABLE vms CHANGE COLUMN image_id image_id VARCHAR(36) NOT NULL COMMENT 'Link to image table' AFTER " \
+         "flavor_id;"
+     # For downgrade do not restore wims/wim_accounts config to varchar 4000
+     sql "DELETE FROM schema_version WHERE version_int='36';"
+ }
+ function upgrade_to_37(){
+     echo "      Adding the enum tags for SFC"
+     sql "ALTER TABLE vim_wim_actions " \
+         "MODIFY COLUMN item " \
+         "ENUM('datacenters_flavors','datacenter_images','instance_nets','instance_vms','instance_interfaces'," \
+             "'instance_sfis','instance_sfs','instance_classifications','instance_sfps','instance_wim_nets') " \
+         "NOT NULL COMMENT 'table where the item is stored';"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+          "VALUES (37, '0.37', '0.6.09', 'Adding the enum tags for SFC', '2019-02-07');"
+ }
+ function downgrade_from_37(){
+     echo "      Adding the enum tags for SFC isn't going to be reversed"
+     # It doesn't make sense to reverse to a bug state.
+     sql "DELETE FROM schema_version WHERE version_int='37';"
+ }
+ function upgrade_to_38(){
+     echo "      Change vim_wim_actions, add worker, related"
+     sql "ALTER TABLE vim_wim_actions ADD COLUMN worker VARCHAR(64) NULL AFTER task_index, " \
+           "ADD COLUMN related VARCHAR(36) NULL AFTER worker, " \
+           "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED','FINISHED') " \
+           "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
+       sql "UPDATE vim_wim_actions set related=item_id;"
+       echo "      Change DONE to FINISHED when DELETE has been completed"
+       sql "UPDATE vim_wim_actions as v1 join vim_wim_actions as v2 on (v1.action='CREATE' or v1.action='FIND') and " \
+           "v2.action='DELETE' and (v2.status='SUPERSEDED' or v2.status='DONE') and v1.item_id=v2.item_id " \
+         "SET v1.status='FINISHED', v2.status='FINISHED';"
+     echo "      Add osm_id to instance_nets"
+     sql "ALTER TABLE instance_nets ADD COLUMN osm_id VARCHAR(255) NULL AFTER uuid;"
+     echo "      Add related to instance_xxxx"
+     for table in instance_classifications instance_nets instance_sfis instance_sfps instance_sfs \
+         instance_vms
+     do
+         sql "ALTER TABLE $table ADD COLUMN related VARCHAR(36) NULL AFTER vim_info;"
+       sql "UPDATE $table set related=uuid;"
+     done
+     sql "ALTER TABLE instance_wim_nets ADD COLUMN related VARCHAR(36) NULL AFTER wim_info;"
+       sql "UPDATE instance_wim_nets set related=uuid;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+         "VALUES (38, '0.38', '0.6.11', 'Adding related to vim_wim_actions', '2019-03-07');"
+ }
+ function downgrade_from_38(){
+     echo "      Change vim_wim_actions, delete worker, related"
+       sql "UPDATE vim_wim_actions SET status='DONE' WHERE status='FINISHED';"
+     sql "ALTER TABLE vim_wim_actions DROP COLUMN worker, DROP COLUMN related, " \
+           "CHANGE COLUMN status status ENUM('SCHEDULED','BUILD','DONE','FAILED','SUPERSEDED') " \
+           "NOT NULL DEFAULT 'SCHEDULED' AFTER item_id;"
+     echo "      Remove related from instance_xxxx"
+     for table in instance_classifications instance_nets instance_wim_nets instance_sfis instance_sfps instance_sfs \
+         instance_vms
+     do
+         sql "ALTER TABLE $table DROP COLUMN related;"
+     done
+     echo "      Remove osm_id from instance_nets"
+     sql "ALTER TABLE instance_nets DROP COLUMN osm_id;"
+     sql "DELETE FROM schema_version WHERE version_int='38';"
+ }
+ function upgrade_to_39(){
+     echo "      Enlarge vim_id to 300 at all places"
+     sql "ALTER TABLE datacenters_flavors CHANGE COLUMN vim_id vim_id VARCHAR(300) NOT NULL AFTER datacenter_vim_id;"
+     sql "ALTER TABLE datacenters_images CHANGE COLUMN vim_id vim_id VARCHAR(300) NOT NULL AFTER datacenter_vim_id;"
+     sql "ALTER TABLE datacenter_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(300) NOT NULL AFTER name;"
+     sql "ALTER TABLE instance_classifications CHANGE COLUMN vim_classification_id vim_classification_id VARCHAR(300)" \
+         " NULL DEFAULT NULL AFTER instance_scenario_id;"
+     sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(300) NULL DEFAULT " \
+         " NULL AFTER interface_id;"
+     sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(300) NULL DEFAULT NULL" \
+         " AFTER osm_id;"
+     sql "ALTER TABLE instance_sfis CHANGE COLUMN vim_sfi_id vim_sfi_id VARCHAR(300) NULL DEFAULT NULL" \
+         " AFTER instance_scenario_id;"
+     sql "ALTER TABLE instance_sfps CHANGE COLUMN vim_sfp_id vim_sfp_id VARCHAR(300) NULL DEFAULT NULL" \
+         " AFTER instance_scenario_id;"
+     sql "ALTER TABLE instance_sfs CHANGE COLUMN vim_sf_id vim_sf_id VARCHAR(300) NULL DEFAULT NULL" \
+         " AFTER instance_scenario_id;"
+     sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(300) NULL DEFAULT NULL" \
+         " AFTER instance_vnf_id, DROP INDEX vim_vm_id;"
+     sql "ALTER TABLE instance_wim_nets CHANGE COLUMN wim_internal_id wim_internal_id VARCHAR(300) NULL DEFAULT NULL" \
+         " COMMENT 'Internal ID used by the WIM to refer to the network' AFTER uuid;"
+     sql "ALTER TABLE vim_wim_actions CHANGE COLUMN vim_id vim_id VARCHAR(300) NULL DEFAULT NULL" \
+         " AFTER datacenter_vim_id;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+         "VALUES (39, '0.39', '0.6.20', 'Enlarge vim_id to 300 at all places', '2019-05-23');"
+ }
+ function downgrade_from_39(){
+     echo "      Set vim_id to original lenght at all places"
+     sql "ALTER TABLE datacenters_flavors CHANGE COLUMN vim_id vim_id VARCHAR(36) NOT NULL AFTER datacenter_vim_id;"
+     sql "ALTER TABLE datacenters_images CHANGE COLUMN vim_id vim_id VARCHAR(36) NOT NULL AFTER datacenter_vim_id;"
+     sql "ALTER TABLE datacenter_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(36) NOT NULL AFTER name;"
+     sql "ALTER TABLE instance_classifications CHANGE COLUMN vim_classification_id vim_classification_id VARCHAR(36)" \
+         " NULL DEFAULT NULL AFTER instance_scenario_id;"
+     sql "ALTER TABLE instance_interfaces CHANGE COLUMN vim_interface_id vim_interface_id VARCHAR(128) NULL DEFAULT " \
+         " NULL AFTER interface_id;"
+     sql "ALTER TABLE instance_nets CHANGE COLUMN vim_net_id vim_net_id VARCHAR(128) NULL DEFAULT NULL" \
+         " AFTER osm_id;"
+     sql "ALTER TABLE instance_sfis CHANGE COLUMN vim_sfi_id vim_sfi_id VARCHAR(36) NULL DEFAULT NULL" \
+         " AFTER instance_scenario_id;"
+     sql "ALTER TABLE instance_sfps CHANGE COLUMN vim_sfp_id vim_sfp_id VARCHAR(36) NULL DEFAULT NULL" \
+         " AFTER instance_scenario_id;"
+     sql "ALTER TABLE instance_sfs CHANGE COLUMN vim_sf_id vim_sf_id VARCHAR(36) NULL DEFAULT NULL" \
+         " AFTER instance_scenario_id;"
+     sql "ALTER TABLE instance_vms CHANGE COLUMN vim_vm_id vim_vm_id VARCHAR(36) NULL DEFAULT NULL" \
+         " AFTER instance_vnf_id, ADD UNIQUE INDEX vim_vm_id (vim_vm_id);"
+     sql "ALTER TABLE instance_wim_nets CHANGE COLUMN wim_internal_id wim_internal_id VARCHAR(128) NULL DEFAULT NULL" \
+         " COMMENT 'Internal ID used by the WIM to refer to the network' AFTER uuid;"
+     sql "ALTER TABLE vim_wim_actions CHANGE COLUMN vim_id vim_id VARCHAR(64) NULL DEFAULT NULL" \
+         " AFTER datacenter_vim_id;"
+     sql "DELETE FROM schema_version WHERE version_int='39';"
+ }
+ function upgrade_to_40(){
+     echo "      Adding instance_wim_net_id, created_at, modified_at at 'instance_interfaces'"
 -        " ADD COLUMN switch_dpid VARCHAR(64) NULL AFTER wan_service_endpoint_id," \
++    sql "ALTER TABLE instance_interfaces ADD COLUMN instance_wim_net_id VARCHAR(36) NULL AFTER instance_net_id, " \
++        "ADD COLUMN model VARCHAR(12) NULL DEFAULT NULL AFTER type, " \
+         "ADD COLUMN created_at DOUBLE NULL DEFAULT NULL AFTER vlan, " \
+         "ADD COLUMN modified_at DOUBLE NULL DEFAULT NULL AFTER created_at;"
+     echo "      Adding sdn to 'instance_wim_nets'"
+     sql "ALTER TABLE instance_wim_nets ADD COLUMN sdn ENUM('true','false') NOT NULL DEFAULT 'false' AFTER created;"
+     echo "      Change from created to sdn at 'wim_accounts'"
+     sql "ALTER TABLE wim_accounts CHANGE COLUMN created sdn ENUM('true','false') NOT NULL DEFAULT 'false' AFTER wim_id;"
+     echo "      Remove unique_datacenter_port_mapping at 'wim_port_mappings'"
+     sql "ALTER TABLE wim_port_mappings DROP INDEX unique_datacenter_port_mapping;"
+     echo "      change 'wim_port_mappings' pop_x to device_x, adding switch_dpid, switch_port"
+     sql "ALTER TABLE wim_port_mappings ALTER pop_switch_dpid DROP DEFAULT, ALTER pop_switch_port DROP DEFAULT;"
+     sql "ALTER TABLE wim_port_mappings CHANGE COLUMN pop_switch_dpid device_id VARCHAR(64) NULL AFTER datacenter_id," \
+         " CHANGE COLUMN pop_switch_port device_interface_id VARCHAR(64) NULL AFTER device_id, " \
+         " CHANGE COLUMN wan_service_endpoint_id service_endpoint_id VARCHAR(256) NOT NULL AFTER device_interface_id, " \
+         " CHANGE COLUMN wan_service_mapping_info service_mapping_info TEXT NULL AFTER service_endpoint_id, " \
 -    sql "ALTER TABLE wim_port_mappings ADD UNIQUE INDEX unique_datacenter_port_mapping(datacenter_id, pop_switch_dpid,
 -         pop_switch_port);"
++        " ADD COLUMN switch_dpid VARCHAR(64) NULL AFTER service_endpoint_id," \
+         " ADD COLUMN switch_port VARCHAR(64) NULL AFTER switch_dpid;"
+     echo "      remove unique name to 'datacenters'"
+     sql "ALTER TABLE datacenters DROP INDEX name;"
+     sql "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) " \
+         "VALUES (40, '0.40', '6.0.4', 'Chagnes to SDN ', '2019-10-23');"
+ }
+ function downgrade_from_40(){
+     echo "      Removing instance_wim_net_id, created_at, modified_at from 'instance_interfaces'"
+     sql "ALTER TABLE instance_interfaces DROP COLUMN instance_wim_net_id, DROP COLUMN created_at, " \
+         "DROP COLUMN modified_at, DROP COLUMN model;"
+     echo "      Removing sdn from 'instance_wim_nets'"
+     sql "ALTER TABLE instance_wim_nets DROP COLUMN sdn;"
+     echo "      Change back from sdn to created at 'wim_accounts'"
+     sql "ALTER TABLE wim_accounts CHANGE COLUMN sdn created ENUM('true','false') NOT NULL DEFAULT 'false' AFTER wim_id;"
+     echo "      Restore back unique_datacenter_port_mapping at 'wim_port_mappings'"
+     echo "      change 'wim_port_mappings' device_x to pop_x, remove switch_dpid, switch_port"
+     sql "ALTER TABLE wim_port_mappings ALTER device_id DROP DEFAULT, ALTER device_interface_id DROP DEFAULT;"
+     sql "ALTER TABLE wim_port_mappings CHANGE COLUMN device_id pop_switch_dpid VARCHAR(64) NOT NULL AFTER " \
+         "datacenter_id,       CHANGE COLUMN device_interface_id pop_switch_port VARCHAR(64) NOT NULL AFTER pop_switch_dpid," \
+         " CHANGE COLUMN service_endpoint_id wan_service_endpoint_id VARCHAR(256) NOT NULL AFTER pop_switch_port, " \
+         " CHANGE COLUMN service_mapping_info wan_service_mapping_info TEXT NULL AFTER wan_service_endpoint_id, " \
+             " DROP COLUMN switch_dpid, DROP COLUMN switch_port;"
++    sql "ALTER TABLE wim_port_mappings ADD UNIQUE INDEX unique_datacenter_port_mapping(datacenter_id, " \
++        "pop_switch_dpid, pop_switch_port);"
+     echo "      add unique name to 'datacenters'"
+     sql "ALTER TABLE datacenters ADD UNIQUE INDEX name (name);"
+     sql "DELETE FROM schema_version WHERE version_int='40';"
+ }
+ #TODO ... put functions here
+ function del_schema_version_process()
+ {
+     echo "DELETE FROM schema_version WHERE version_int='0';" | $DBCMD ||
+         ! echo "    ERROR writing on schema_version" >&2 || exit 1
+ }
+ function set_schema_version_process()
+ {
+     echo "INSERT INTO schema_version (version_int, version, openmano_ver, comments, date) VALUES "\
+         "(0, '0.0', '0.0.0', 'migration from $DATABASE_VER_NUM to $DB_VERSION backup: $BACKUP_FILE',"\
+         "'$(date +%Y-%m-%d)');" | $DBCMD ||
+         ! echo  "    Cannot set database at migration process writing into schema_version" >&2 || exit 1
+ }
+ function rollback_db()
+ {
+     if echo $DATABASE_PROCESS | grep -q init ; then   # Empty database. No backup needed
+         echo "    Aborted! Rollback database not needed" && exit 1
+     else   # migration a non empty database or Recovering a migration process
+         cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM && echo "    Aborted! Rollback database OK" &&
+             del_schema_version_process && rm -f "$BACKUP_FILE" && exit 1
+         echo "    Aborted! Rollback database FAIL" && exit 1
+     fi
+ }
+ function sql()    # send a sql command
+ {
+     echo "$*" | $DBCMD || ! echo "    ERROR with command '$*'" || rollback_db
+     return 0
+ }
+ function migrate()
+ {
+     #UPGRADE DATABASE step by step
+     while [ $DB_VERSION -gt $DATABASE_VER_NUM ]
+     do
+         echo "    upgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM+1))'"
+         DATABASE_VER_NUM=$((DATABASE_VER_NUM+1))
+         upgrade_to_${DATABASE_VER_NUM}
+         #FILE_="${DIRNAME}/upgrade_to_${DATABASE_VER_NUM}.sh"
+         #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to upgrade" >&2 && exit -1
+         #$FILE_ || exit -1  # if fail return
+     done
+     #DOWNGRADE DATABASE step by step
+     while [ $DB_VERSION -lt $DATABASE_VER_NUM ]
+     do
+         echo "    downgrade database from version '$DATABASE_VER_NUM' to '$((DATABASE_VER_NUM-1))'"
+         #FILE_="${DIRNAME}/downgrade_from_${DATABASE_VER_NUM}.sh"
+         #[ ! -x "$FILE_" ] && echo "Error, can not find script '$FILE_' to downgrade" >&2 && exit -1
+         #$FILE_ || exit -1  # if fail return
+         downgrade_from_${DATABASE_VER_NUM}
+         DATABASE_VER_NUM=$((DATABASE_VER_NUM-1))
+     done
+ }
+ # check if current database is ok
+ function check_migration_needed()
+ {
+     DATABASE_VER_NUM=`echo "select max(version_int) from schema_version;" | $DBCMD | tail -n+2` ||
+     ! echo "    ERROR cannot read from schema_version" || exit 1
+     if [[ -z "$DATABASE_VER_NUM" ]] || [[ "$DATABASE_VER_NUM" -lt 0 ]] || [[ "$DATABASE_VER_NUM" -gt 100 ]] ; then
+         echo "    Error can not get database version ($DATABASE_VER_NUM?)" >&2
+         exit 1
+     fi
+     [[ $DB_VERSION -eq $DATABASE_VER_NUM ]] && echo "    current database version '$DATABASE_VER_NUM' is ok" && return 1
+     [[ "$DATABASE_VER_NUM" -gt "$LAST_DB_VERSION" ]] &&
+         echo "Database has been upgraded with a newer version of this script. Use this version to downgrade" >&2 &&
+         exit 1
+     return 0
+ }
+ DATABASE_PROCESS=`echo "select comments from schema_version where version_int=0;" | $DBCMD | tail -n+2` ||
+     ! echo "    ERROR cannot read from schema_version" || exit 1
+ if [[ -z "$DATABASE_PROCESS" ]] ; then  # migration a non empty database
+     check_migration_needed || exit 0
+     # Create a backup database content
+     [[ -n "$BACKUP_DIR" ]] && BACKUP_FILE=$(mktemp -q  "${BACKUP_DIR}/backupdb.XXXXXX.sql")
+     [[ -z "$BACKUP_DIR" ]] && BACKUP_FILE=$(mktemp -q --tmpdir "backupdb.XXXXXX.sql")
+     mysqldump $DEF_EXTRA_FILE_PARAM --add-drop-table --add-drop-database --routines --databases $DBNAME > $BACKUP_FILE ||
+         ! echo "Cannot create Backup file '$BACKUP_FILE'" >&2 || exit 1
+     echo "    Backup file '$BACKUP_FILE' created"
+     # Set schema version
+     set_schema_version_process
+     migrate
+     del_schema_version_process
+     rm -f "$BACKUP_FILE"
+ elif echo $DATABASE_PROCESS | grep -q init ; then   # Empty database. No backup needed
+     echo "    Migrating an empty database"
+     if check_migration_needed ; then
+         migrate
+     fi
+     del_schema_version_process
+ else  # Recover Migration process
+     BACKUP_FILE=${DATABASE_PROCESS##*backup: }
+     [[ -f "$BACKUP_FILE" ]] || ! echo "Previous migration process fail and cannot recover backup file '$BACKUP_FILE'" >&2 ||
+         exit 1
+     echo "    Previous migration was killed. Restoring database from rollback file'$BACKUP_FILE'"
+     cat $BACKUP_FILE | mysql $DEF_EXTRA_FILE_PARAM || ! echo "    Cannot load backup file '$BACKUP_FILE'" >&2 || exit 1
+     if check_migration_needed ; then
+         set_schema_version_process
+         migrate
+     fi
+     del_schema_version_process
+     rm -f "$BACKUP_FILE"
+ fi
+ exit 0
+ #echo done
diff --combined RO/osm_ro/nfvo.py
index 0000000,6a06a4c..b33bda3
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,5898 +1,5925 @@@
 -
+ # -*- coding: utf-8 -*-
+ ##
+ # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
 -            if vnfd["mgmt-interface"].get("cp"):
++ 
+ '''
+ NFVO engine, implementing all the methods for the creation, deletion and management of vnfs, scenarios and instances
+ '''
+ __author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes"
+ __date__ ="$16-sep-2014 22:05:01$"
+ # import imp
+ import json
+ import yaml
+ from osm_ro import utils
+ from osm_ro.utils import deprecated
+ from osm_ro.vim_thread import vim_thread
+ import osm_ro.console_proxy_thread as cli
+ from osm_ro import vimconn
+ import logging
+ import collections
+ import math
+ from uuid import uuid4
+ from osm_ro.db_base import db_base_Exception
+ from osm_ro import nfvo_db
+ from threading import Lock
+ import time as t
+ # TODO py3 BEGIN
+ from osm_ro.sdn import Sdn, SdnException as ovimException
+ # from lib_osm_openvim.ovim import ovimException
+ # from unittest.mock  import MagicMock
+ # class ovimException(Exception):
+ #    pass
+ # TODO py3 END
+ from Crypto.PublicKey import RSA
+ import osm_im.vnfd as vnfd_catalog
+ import osm_im.nsd as nsd_catalog
+ from pyangbind.lib.serialise import pybindJSONDecoder
+ from copy import deepcopy
+ from pkg_resources import iter_entry_points
+ # WIM
+ from .wim import sdnconn
+ from .wim.wimconn_fake import FakeConnector
+ from .wim.failing_connector import FailingConnector
+ from .http_tools import errors as httperrors
+ from .wim.engine import WimEngine
+ from .wim.persistence import WimPersistence
+ from copy import deepcopy
+ from pprint import pformat
+ #
+ global global_config
+ # WIM
+ global wim_engine
+ wim_engine  = None
+ global sdnconn_imported
+ #
+ global logger
+ global default_volume_size
+ default_volume_size = '5' #size in GB
+ global ovim
+ ovim = None
+ global_config = None
+ plugins = {}   # dictionary with VIM type as key, loaded module as value
+ vim_threads = {"running":{}, "deleting": {}, "names": []}      # threads running for attached-VIMs
+ vim_persistent_info = {}
+ # WIM
+ sdnconn_imported = {}   # dictionary with WIM type as key, loaded module as value
+ wim_threads = {"running":{}, "deleting": {}, "names": []}      # threads running for attached-WIMs
+ wim_persistent_info = {}
+ #
+ logger = logging.getLogger('openmano.nfvo')
+ task_lock = Lock()
+ last_task_id = 0.0
+ db = None
+ db_lock = Lock()
+ class NfvoException(httperrors.HttpMappedError):
+     """Common Class for NFVO errors"""
+ def _load_plugin(name, type="vim"):
+     # type can be vim or sdn
+     global plugins
+     try:
+         for v in iter_entry_points('osm_ro{}.plugins'.format(type), name):
+             plugins[name] = v.load()
+     except Exception as e:
+         logger.critical("Cannot load osm_{}: {}".format(name, e))
+         if name:
+             plugins[name] = FailingConnector("Cannot load osm_{}: {}".format(name, e))
+     if name and name not in plugins:
+         error_text = "Cannot load a module for {t} type '{n}'. The plugin 'osm_{n}' has not been" \
+                      " registered".format(t=type, n=name)
+         logger.critical(error_text)
+         plugins[name] = FailingConnector(error_text)
+         # raise NfvoException("Cannot load a module for {t} type '{n}'. The plugin 'osm_{n}' has not been registered".
+         #                     format(t=type, n=name), httperrors.Bad_Request)
+ def get_task_id():
+     global last_task_id
+     task_id = t.time()
+     if task_id <= last_task_id:
+         task_id = last_task_id + 0.000001
+     last_task_id = task_id
+     return "ACTION-{:.6f}".format(task_id)
+     # return (t.strftime("%Y%m%dT%H%M%S.{}%Z", t.localtime(task_id))).format(int((task_id % 1)*1e6))
+ def new_task(name, params, depends=None):
+     """Deprected!!!"""
+     task_id = get_task_id()
+     task = {"status": "enqueued", "id": task_id, "name": name, "params": params}
+     if depends:
+         task["depends"] = depends
+     return task
+ def is_task_id(id):
+     return True if id[:5] == "TASK-" else False
+ def get_non_used_vim_name(datacenter_name, datacenter_id, tenant_name, tenant_id):
+     name = datacenter_name[:16]
+     if name not in vim_threads["names"]:
+         vim_threads["names"].append(name)
+         return name
+     if tenant_name:
+         name = datacenter_name[:16] + "." + tenant_name[:16]
+         if name not in vim_threads["names"]:
+             vim_threads["names"].append(name)
+             return name
+     name = datacenter_id
+     vim_threads["names"].append(name)
+     return name
+ # -- Move
+ def get_non_used_wim_name(wim_name, wim_id, tenant_name, tenant_id):
+     name = wim_name[:16]
+     if name not in wim_threads["names"]:
+         wim_threads["names"].append(name)
+         return name
+     name = wim_name[:16] + "." + tenant_name[:16]
+     if name not in wim_threads["names"]:
+         wim_threads["names"].append(name)
+         return name
+     name = wim_id + "-" + tenant_id
+     wim_threads["names"].append(name)
+     return name
+ def start_service(mydb, persistence=None, wim=None):
+     global db, global_config, plugins, ovim
+     db = nfvo_db.nfvo_db(lock=db_lock)
+     mydb.lock = db_lock
+     db.connect(global_config['db_host'], global_config['db_user'], global_config['db_passwd'], global_config['db_name'])
+     persistence = persistence or  WimPersistence(db)
+     try:
+         if "rosdn_fake" not in plugins:
+             plugins["rosdn_fake"] = FakeConnector
+         # starts ovim library
+         ovim = Sdn(db, plugins)
+         global wim_engine
+         wim_engine = wim or WimEngine(persistence, plugins)
+         wim_engine.ovim = ovim
+         ovim.start_service()
+         #delete old unneeded vim_wim_actions
+         clean_db(mydb)
+         # starts vim_threads
+         from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join '\
+                 'datacenter_tenants as dt on td.datacenter_tenant_id=dt.uuid'
+         select_ = ('type', 'd.config as config', 'd.uuid as datacenter_id', 'vim_url', 'vim_url_admin',
+                    'd.name as datacenter_name', 'dt.uuid as datacenter_tenant_id',
+                    'dt.vim_tenant_name as vim_tenant_name', 'dt.vim_tenant_id as vim_tenant_id',
+                    'user', 'passwd', 'dt.config as dt_config', 'nfvo_tenant_id')
+         vims = mydb.get_rows(FROM=from_, SELECT=select_)
+         for vim in vims:
+             extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
+                    'datacenter_id': vim.get('datacenter_id')}
+             if vim["config"]:
+                 extra.update(yaml.load(vim["config"], Loader=yaml.Loader))
+             if vim.get('dt_config'):
+                 extra.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
+             plugin_name = "rovim_" + vim["type"]
+             if plugin_name not in plugins:
+                 _load_plugin(plugin_name, type="vim")
+             thread_id = vim['datacenter_tenant_id']
+             vim_persistent_info[thread_id] = {}
+             try:
+                 #if not tenant:
+                 #    return -httperrors.Bad_Request, "You must provide a valid tenant name or uuid for VIM  %s" % ( vim["type"])
+                 myvim = plugins[plugin_name].vimconnector(
+                     uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+                     tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
+                     url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+                     user=vim['user'], passwd=vim['passwd'],
+                     config=extra, persistent_info=vim_persistent_info[thread_id]
+                 )
+             except vimconn.vimconnException as e:
+                 myvim = e
+                 logger.error("Cannot launch thread for VIM {} '{}': {}".format(vim['datacenter_name'],
+                                                                                vim['datacenter_id'], e))
+             except Exception as e:
+                 logger.critical("Cannot launch thread for VIM {} '{}': {}".format(vim['datacenter_name'],
+                                                                                   vim['datacenter_id'], e))
+                 # raise NfvoException("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, e),
+                 #                     httperrors.Internal_Server_Error)
+             thread_name = get_non_used_vim_name(vim['datacenter_name'], vim['datacenter_id'], vim['vim_tenant_name'],
+                                                 vim['vim_tenant_id'])
+             new_thread = vim_thread(task_lock, plugins, thread_name, None,
+                                     vim['datacenter_tenant_id'], db=db)
+             new_thread.start()
+             vim_threads["running"][thread_id] = new_thread
+         wims = mydb.get_rows(FROM="wim_accounts join wims on wim_accounts.wim_id=wims.uuid",
+                              WHERE={"sdn": "true"},
+                              SELECT=("wim_accounts.uuid as uuid", "type", "wim_accounts.name as name"))
+         for wim in wims:
+             plugin_name = "rosdn_" + wim["type"]
+             if plugin_name not in plugins:
+                 _load_plugin(plugin_name, type="sdn")
+             thread_id = wim['uuid']
+             thread_name = get_non_used_vim_name(wim['name'], wim['uuid'], wim['uuid'], None)
+             new_thread = vim_thread(task_lock, plugins, thread_name, wim['uuid'], None, db=db)
+             new_thread.start()
+             vim_threads["running"][thread_id] = new_thread
+         wim_engine.start_threads()
+     except db_base_Exception as e:
+         raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
+     except ovimException as e:
+         message = str(e)
+         if message[:22] == "DATABASE wrong version":
+             message = "DATABASE wrong version of lib_osm_openvim {msg} -d{dbname} -u{dbuser} -p{dbpass} {ver}' "\
+                       "at host {dbhost}".format(
+                             msg=message[22:-3], dbname=global_config["db_ovim_name"],
+                             dbuser=global_config["db_ovim_user"], dbpass=global_config["db_ovim_passwd"],
+                             ver=message[-3:-1], dbhost=global_config["db_ovim_host"])
+         raise NfvoException(message, httperrors.Bad_Request)
+ def stop_service():
+     global ovim, global_config
+     if ovim:
+         ovim.stop_service()
+     for thread_id, thread in vim_threads["running"].items():
+         thread.insert_task("exit")
+         vim_threads["deleting"][thread_id] = thread
+     vim_threads["running"] = {}
+     if wim_engine:
+         wim_engine.stop_threads()
+     if global_config and global_config.get("console_thread"):
+         for thread in global_config["console_thread"]:
+             thread.terminate = True
+ def get_version():
+     return  ("openmanod version {} {}\n(c) Copyright Telefonica".format(global_config["version"],
+                                                                         global_config["version_date"] ))
+ def clean_db(mydb):
+     """
+     Clean unused or old entries at database to avoid unlimited growing
+     :param mydb: database connector
+     :return: None
+     """
+     # get and delete unused vim_wim_actions: all elements deleted, one week before, instance not present
+     now = t.time()-3600*24*7
+     instance_action_id = None
+     nb_deleted = 0
+     while True:
+         actions_to_delete = mydb.get_rows(
+             SELECT=("item", "item_id", "instance_action_id"),
+             FROM="vim_wim_actions as va join instance_actions as ia on va.instance_action_id=ia.uuid "
+                     "left join instance_scenarios as i on ia.instance_id=i.uuid",
+             WHERE={"va.action": "DELETE", "va.modified_at<": now, "i.uuid": None,
+                    "va.status": ("DONE", "SUPERSEDED")},
+             LIMIT=100
+         )
+         for to_delete in actions_to_delete:
+             mydb.delete_row(FROM="vim_wim_actions", WHERE=to_delete)
+             if instance_action_id != to_delete["instance_action_id"]:
+                 instance_action_id = to_delete["instance_action_id"]
+                 mydb.delete_row(FROM="instance_actions", WHERE={"uuid": instance_action_id})
+         nb_deleted += len(actions_to_delete)
+         if len(actions_to_delete) < 100:
+             break
+     # clean locks
+     mydb.update_rows("vim_wim_actions", UPDATE={"worker": None}, WHERE={"worker<>": None})
+     if nb_deleted:
+         logger.debug("Removed {} unused vim_wim_actions".format(nb_deleted))
+ def get_flavorlist(mydb, vnf_id, nfvo_tenant=None):
+     '''Obtain flavorList
+     return result, content:
+         <0, error_text upon error
+         nb_records, flavor_list on success
+     '''
+     WHERE_dict={}
+     WHERE_dict['vnf_id'] = vnf_id
+     if nfvo_tenant is not None:
+         WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
+     #result, content = mydb.get_table(FROM='vms join vnfs on vms.vnf_id = vnfs.uuid',SELECT=('uuid'),WHERE=WHERE_dict )
+     #result, content = mydb.get_table(FROM='vms',SELECT=('vim_flavor_id',),WHERE=WHERE_dict )
+     flavors = mydb.get_rows(FROM='vms join flavors on vms.flavor_id=flavors.uuid',SELECT=('flavor_id',),WHERE=WHERE_dict )
+     #print "get_flavor_list result:", result
+     #print "get_flavor_list content:", content
+     flavorList=[]
+     for flavor in flavors:
+         flavorList.append(flavor['flavor_id'])
+     return flavorList
+ def get_imagelist(mydb, vnf_id, nfvo_tenant=None):
+     """
+     Get used images of all vms belonging to this VNFD
+     :param mydb: database conector
+     :param vnf_id: vnfd uuid
+     :param nfvo_tenant: tenant, not used
+     :return: The list of image uuid used
+     """
+     image_list = []
+     vms = mydb.get_rows(SELECT=('image_id','image_list'), FROM='vms', WHERE={'vnf_id': vnf_id})
+     for vm in vms:
+         if vm["image_id"] and vm["image_id"] not in image_list:
+             image_list.append(vm["image_id"])
+         if vm["image_list"]:
+             vm_image_list = yaml.load(vm["image_list"], Loader=yaml.Loader)
+             for image_dict in vm_image_list:
+                 if image_dict["image_id"] not in image_list:
+                     image_list.append(image_dict["image_id"])
+     return image_list
+ def get_vim(mydb, nfvo_tenant=None, datacenter_id=None, datacenter_name=None, datacenter_tenant_id=None,
+             vim_tenant=None, vim_tenant_name=None, vim_user=None, vim_passwd=None, ignore_errors=False):
+     '''Obtain a dictionary of VIM (datacenter) classes with some of the input parameters
+     return dictionary with {datacenter_id: vim_class, ... }. vim_class contain:
+             'nfvo_tenant_id','datacenter_id','vim_tenant_id','vim_url','vim_url_admin','datacenter_name','type','user','passwd'
+         raise exception upon error
+     '''
+     global plugins
+     WHERE_dict={}
+     if nfvo_tenant     is not None:  WHERE_dict['nfvo_tenant_id'] = nfvo_tenant
+     if datacenter_id   is not None:  WHERE_dict['d.uuid']  = datacenter_id
+     if datacenter_tenant_id is not None:  WHERE_dict['datacenter_tenant_id']  = datacenter_tenant_id
+     if datacenter_name is not None:  WHERE_dict['d.name']  = datacenter_name
+     if vim_tenant      is not None:  WHERE_dict['dt.vim_tenant_id']  = vim_tenant
+     if vim_tenant_name is not None:  WHERE_dict['vim_tenant_name']  = vim_tenant_name
+     if nfvo_tenant or vim_tenant or vim_tenant_name or datacenter_tenant_id:
+         from_= 'tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join datacenter_tenants as dt on td.datacenter_tenant_id=dt.uuid'
+         select_ = ('type','d.config as config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name',
+                    'dt.uuid as datacenter_tenant_id','dt.vim_tenant_name as vim_tenant_name','dt.vim_tenant_id as vim_tenant_id',
+                    'user','passwd', 'dt.config as dt_config')
+     else:
+         from_ = 'datacenters as d'
+         select_ = ('type','config','d.uuid as datacenter_id', 'vim_url', 'vim_url_admin', 'd.name as datacenter_name')
+     try:
+         vims = mydb.get_rows(FROM=from_, SELECT=select_, WHERE=WHERE_dict )
+         vim_dict={}
+         for vim in vims:
+             extra={'datacenter_tenant_id': vim.get('datacenter_tenant_id'),
+                    'datacenter_id': vim.get('datacenter_id'),
+                    '_vim_type_internal': vim.get('type')}
+             if vim["config"]:
+                 extra.update(yaml.load(vim["config"], Loader=yaml.Loader))
+             if vim.get('dt_config'):
+                 extra.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
+             plugin_name = "rovim_" + vim["type"]
+             if plugin_name not in plugins:
+                 try:
+                     _load_plugin(plugin_name, type="vim")
+                 except NfvoException as e:
+                     if ignore_errors:
+                         logger.error("{}".format(e))
+                         continue
+                     else:
+                         raise
+             try:
+                 if 'datacenter_tenant_id' in vim:
+                     thread_id = vim["datacenter_tenant_id"]
+                     if thread_id not in vim_persistent_info:
+                         vim_persistent_info[thread_id] = {}
+                     persistent_info = vim_persistent_info[thread_id]
+                 else:
+                     persistent_info = {}
+                 #if not tenant:
+                 #    return -httperrors.Bad_Request, "You must provide a valid tenant name or uuid for VIM  %s" % ( vim["type"])
+                 vim_dict[vim['datacenter_id']] = plugins[plugin_name].vimconnector(
+                                 uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+                                 tenant_id=vim.get('vim_tenant_id',vim_tenant),
+                                 tenant_name=vim.get('vim_tenant_name',vim_tenant_name),
+                                 url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+                                 user=vim.get('user',vim_user), passwd=vim.get('passwd',vim_passwd),
+                                 config=extra, persistent_info=persistent_info
+                         )
+             except Exception as e:
+                 if ignore_errors:
+                     logger.error("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, str(e)))
+                     continue
+                 http_code = httperrors.Internal_Server_Error
+                 if isinstance(e, vimconn.vimconnException):
+                     http_code = e.http_code
+                 raise NfvoException("Error at VIM  {}; {}: {}".format(vim["type"], type(e).__name__, str(e)), http_code)
+         return vim_dict
+     except db_base_Exception as e:
+         raise NfvoException(str(e) + " at nfvo.get_vim", e.http_code)
+ def rollback(mydb,  vims, rollback_list):
+     undeleted_items=[]
+     #delete things by reverse order
+     for i in range(len(rollback_list)-1, -1, -1):
+         item = rollback_list[i]
+         if item["where"]=="vim":
+             if item["vim_id"] not in vims:
+                 continue
+             if is_task_id(item["uuid"]):
+                 continue
+             vim = vims[item["vim_id"]]
+             try:
+                 if item["what"]=="image":
+                     vim.delete_image(item["uuid"])
+                     mydb.delete_row(FROM="datacenters_images", WHERE={"datacenter_vim_id": vim["id"], "vim_id":item["uuid"]})
+                 elif item["what"]=="flavor":
+                     vim.delete_flavor(item["uuid"])
+                     mydb.delete_row(FROM="datacenters_flavors", WHERE={"datacenter_vim_id": vim["id"], "vim_id":item["uuid"]})
+                 elif item["what"]=="network":
+                     vim.delete_network(item["uuid"])
+                 elif item["what"]=="vm":
+                     vim.delete_vminstance(item["uuid"])
+             except vimconn.vimconnException as e:
+                 logger.error("Error in rollback. Not possible to delete VIM %s '%s'. Message: %s", item['what'], item["uuid"], str(e))
+                 undeleted_items.append("{} {} from VIM {}".format(item['what'], item["uuid"], vim["name"]))
+             except db_base_Exception as e:
+                 logger.error("Error in rollback. Not possible to delete %s '%s' from DB.datacenters Message: %s", item['what'], item["uuid"], str(e))
+         else: # where==mano
+             try:
+                 if item["what"]=="image":
+                     mydb.delete_row(FROM="images", WHERE={"uuid": item["uuid"]})
+                 elif item["what"]=="flavor":
+                     mydb.delete_row(FROM="flavors", WHERE={"uuid": item["uuid"]})
+             except db_base_Exception as e:
+                 logger.error("Error in rollback. Not possible to delete %s '%s' from DB. Message: %s", item['what'], item["uuid"], str(e))
+                 undeleted_items.append("{} '{}'".format(item['what'], item["uuid"]))
+     if len(undeleted_items)==0:
+         return True," Rollback successful."
+     else:
+         return False," Rollback fails to delete: " + str(undeleted_items)
+ def check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1):
+     global global_config
+     #create a dictionary with vnfc-name: vnfc:interface-list  key:values pairs
+     vnfc_interfaces={}
+     for vnfc in vnf_descriptor["vnf"]["VNFC"]:
+         name_dict = {}
+         #dataplane interfaces
+         for numa in vnfc.get("numas",() ):
+             for interface in numa.get("interfaces",()):
+                 if interface["name"] in name_dict:
+                     raise NfvoException(
+                         "Error at vnf:VNFC[name:'{}']:numas:interfaces:name, interface name '{}' already used in this VNFC".format(
+                             vnfc["name"], interface["name"]),
+                         httperrors.Bad_Request)
+                 name_dict[ interface["name"] ] = "underlay"
+         #bridge interfaces
+         for interface in vnfc.get("bridge-ifaces",() ):
+             if interface["name"] in name_dict:
+                 raise NfvoException(
+                     "Error at vnf:VNFC[name:'{}']:bridge-ifaces:name, interface name '{}' already used in this VNFC".format(
+                         vnfc["name"], interface["name"]),
+                     httperrors.Bad_Request)
+             name_dict[ interface["name"] ] = "overlay"
+         vnfc_interfaces[ vnfc["name"] ] = name_dict
+         # check bood-data info
+         # if "boot-data" in vnfc:
+         #     # check that user-data is incompatible with users and config-files
+         #     if (vnfc["boot-data"].get("users") or vnfc["boot-data"].get("config-files")) and vnfc["boot-data"].get("user-data"):
+         #         raise NfvoException(
+         #             "Error at vnf:VNFC:boot-data, fields 'users' and 'config-files' are not compatible with 'user-data'",
+         #             httperrors.Bad_Request)
+     #check if the info in external_connections matches with the one in the vnfcs
+     name_list=[]
+     for external_connection in vnf_descriptor["vnf"].get("external-connections",() ):
+         if external_connection["name"] in name_list:
+             raise NfvoException(
+                 "Error at vnf:external-connections:name, value '{}' already used as an external-connection".format(
+                     external_connection["name"]),
+                 httperrors.Bad_Request)
+         name_list.append(external_connection["name"])
+         if external_connection["VNFC"] not in vnfc_interfaces:
+             raise NfvoException(
+                 "Error at vnf:external-connections[name:'{}']:VNFC, value '{}' does not match any VNFC".format(
+                     external_connection["name"], external_connection["VNFC"]),
+                 httperrors.Bad_Request)
+         if external_connection["local_iface_name"] not in vnfc_interfaces[ external_connection["VNFC"] ]:
+             raise NfvoException(
+                 "Error at vnf:external-connections[name:'{}']:local_iface_name, value '{}' does not match any interface of this VNFC".format(
+                     external_connection["name"],
+                     external_connection["local_iface_name"]),
+                 httperrors.Bad_Request )
+     #check if the info in internal_connections matches with the one in the vnfcs
+     name_list=[]
+     for internal_connection in vnf_descriptor["vnf"].get("internal-connections",() ):
+         if internal_connection["name"] in name_list:
+             raise NfvoException(
+                 "Error at vnf:internal-connections:name, value '{}' already used as an internal-connection".format(
+                     internal_connection["name"]),
+                 httperrors.Bad_Request)
+         name_list.append(internal_connection["name"])
+         #We should check that internal-connections of type "ptp" have only 2 elements
+         if len(internal_connection["elements"])>2 and (internal_connection.get("type") == "ptp" or internal_connection.get("type") == "e-line"):
+             raise NfvoException(
+                 "Error at 'vnf:internal-connections[name:'{}']:elements', size must be 2 for a '{}' type. Consider change it to '{}' type".format(
+                     internal_connection["name"],
+                     'ptp' if vnf_descriptor_version==1 else 'e-line',
+                     'data' if vnf_descriptor_version==1 else "e-lan"),
+                 httperrors.Bad_Request)
+         for port in internal_connection["elements"]:
+             vnf = port["VNFC"]
+             iface = port["local_iface_name"]
+             if vnf not in vnfc_interfaces:
+                 raise NfvoException(
+                     "Error at vnf:internal-connections[name:'{}']:elements[]:VNFC, value '{}' does not match any VNFC".format(
+                         internal_connection["name"], vnf),
+                     httperrors.Bad_Request)
+             if iface not in vnfc_interfaces[ vnf ]:
+                 raise NfvoException(
+                     "Error at vnf:internal-connections[name:'{}']:elements[]:local_iface_name, value '{}' does not match any interface of this VNFC".format(
+                         internal_connection["name"], iface),
+                     httperrors.Bad_Request)
+                 return -httperrors.Bad_Request,
+             if vnf_descriptor_version==1 and "type" not in internal_connection:
+                 if vnfc_interfaces[vnf][iface] == "overlay":
+                     internal_connection["type"] = "bridge"
+                 else:
+                     internal_connection["type"] = "data"
+             if vnf_descriptor_version==2 and "implementation" not in internal_connection:
+                 if vnfc_interfaces[vnf][iface] == "overlay":
+                     internal_connection["implementation"] = "overlay"
+                 else:
+                     internal_connection["implementation"] = "underlay"
+             if (internal_connection.get("type") == "data" or internal_connection.get("type") == "ptp" or \
+                 internal_connection.get("implementation") == "underlay") and vnfc_interfaces[vnf][iface] == "overlay":
+                 raise NfvoException(
+                     "Error at vnf:internal-connections[name:'{}']:elements[]:{}, interface of type {} connected to an {} network".format(
+                         internal_connection["name"],
+                         iface, 'bridge' if vnf_descriptor_version==1 else 'overlay',
+                         'data' if vnf_descriptor_version==1 else 'underlay'),
+                     httperrors.Bad_Request)
+             if (internal_connection.get("type") == "bridge" or internal_connection.get("implementation") == "overlay") and \
+                 vnfc_interfaces[vnf][iface] == "underlay":
+                 raise NfvoException(
+                     "Error at vnf:internal-connections[name:'{}']:elements[]:{}, interface of type {} connected to an {} network".format(
+                         internal_connection["name"], iface,
+                         'data' if vnf_descriptor_version==1 else 'underlay',
+                         'bridge' if vnf_descriptor_version==1 else 'overlay'),
+                     httperrors.Bad_Request)
+ def create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=None):
+     #look if image exist
+     if only_create_at_vim:
+         image_mano_id = image_dict['uuid']
+         if return_on_error == None:
+             return_on_error = True
+     else:
+         if image_dict['location']:
+             images = mydb.get_rows(FROM="images", WHERE={'location':image_dict['location'], 'metadata':image_dict['metadata']})
+         else:
+             images = mydb.get_rows(FROM="images", WHERE={'universal_name':image_dict['universal_name'], 'checksum':image_dict['checksum']})
+         if len(images)>=1:
+             image_mano_id = images[0]['uuid']
+         else:
+             #create image in MANO DB
+             temp_image_dict={'name':image_dict['name'],         'description':image_dict.get('description',None),
+                             'location':image_dict['location'],  'metadata':image_dict.get('metadata',None),
+                             'universal_name':image_dict['universal_name'] , 'checksum':image_dict['checksum']
+                             }
+             #temp_image_dict['location'] = image_dict.get('new_location') if image_dict['location'] is None
+             image_mano_id = mydb.new_row('images', temp_image_dict, add_uuid=True)
+             rollback_list.append({"where":"mano", "what":"image","uuid":image_mano_id})
+     #create image at every vim
+     for vim_id,vim in vims.items():
+         datacenter_vim_id = vim["config"]["datacenter_tenant_id"]
+         image_created="false"
+         #look at database
+         image_db = mydb.get_rows(FROM="datacenters_images",
+                                  WHERE={'datacenter_vim_id': datacenter_vim_id, 'image_id': image_mano_id})
+         #look at VIM if this image exist
+         try:
+             if image_dict['location'] is not None:
+                 image_vim_id = vim.get_image_id_from_path(image_dict['location'])
+             else:
+                 filter_dict = {}
+                 filter_dict['name'] = image_dict['universal_name']
+                 if image_dict.get('checksum') != None:
+                     filter_dict['checksum'] = image_dict['checksum']
+                 #logger.debug('>>>>>>>> Filter dict: %s', str(filter_dict))
+                 vim_images = vim.get_image_list(filter_dict)
+                 #logger.debug('>>>>>>>> VIM images: %s', str(vim_images))
+                 if len(vim_images) > 1:
+                     raise vimconn.vimconnException("More than one candidate VIM image found for filter: {}".format(str(filter_dict)), httperrors.Conflict)
+                 elif len(vim_images) == 0:
+                     raise vimconn.vimconnNotFoundException("Image not found at VIM with filter: '{}'".format(str(filter_dict)))
+                 else:
+                     #logger.debug('>>>>>>>> VIM image 0: %s', str(vim_images[0]))
+                     image_vim_id = vim_images[0]['id']
+         except vimconn.vimconnNotFoundException as e:
+             #Create the image in VIM only if image_dict['location'] or image_dict['new_location'] is not None
+             try:
+                 #image_dict['location']=image_dict.get('new_location') if image_dict['location'] is None
+                 if image_dict['location']:
+                     image_vim_id = vim.new_image(image_dict)
+                     rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"image","uuid":image_vim_id})
+                     image_created="true"
+                 else:
+                     #If we reach this point, then the image has image name, and optionally checksum, and could not be found
+                     raise vimconn.vimconnException(str(e))
+             except vimconn.vimconnException as e:
+                 if return_on_error:
+                     logger.error("Error creating image at VIM '%s': %s", vim["name"], str(e))
+                     raise
+                 image_vim_id = None
+                 logger.warn("Error creating image at VIM '%s': %s", vim["name"], str(e))
+                 continue
+         except vimconn.vimconnException as e:
+             if return_on_error:
+                 logger.error("Error contacting VIM to know if the image exists at VIM: %s", str(e))
+                 raise
+             logger.warn("Error contacting VIM to know if the image exists at VIM: %s", str(e))
+             image_vim_id = None
+             continue
+         #if we reach here, the image has been created or existed
+         if len(image_db)==0:
+             #add new vim_id at datacenters_images
+             mydb.new_row('datacenters_images', {'datacenter_vim_id': datacenter_vim_id,
+                                                 'image_id':image_mano_id,
+                                                 'vim_id': image_vim_id,
+                                                 'created':image_created})
+         elif image_db[0]["vim_id"]!=image_vim_id:
+             #modify existing vim_id at datacenters_images
+             mydb.update_rows('datacenters_images', UPDATE={'vim_id':image_vim_id}, WHERE={'datacenter_vim_id':vim_id, 'image_id':image_mano_id})
+     return image_vim_id if only_create_at_vim else image_mano_id
+ def create_or_use_flavor(mydb, vims, flavor_dict, rollback_list, only_create_at_vim=False, return_on_error = None):
+     temp_flavor_dict= {'disk':flavor_dict.get('disk',0),
+             'ram':flavor_dict.get('ram'),
+             'vcpus':flavor_dict.get('vcpus'),
+         }
+     if 'extended' in flavor_dict and flavor_dict['extended']==None:
+         del flavor_dict['extended']
+     if 'extended' in flavor_dict:
+         temp_flavor_dict['extended']=yaml.safe_dump(flavor_dict['extended'],default_flow_style=True,width=256)
+     #look if flavor exist
+     if only_create_at_vim:
+         flavor_mano_id = flavor_dict['uuid']
+         if return_on_error == None:
+             return_on_error = True
+     else:
+         flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict)
+         if len(flavors)>=1:
+             flavor_mano_id = flavors[0]['uuid']
+         else:
+             #create flavor
+             #create one by one the images of aditional disks
+             dev_image_list=[] #list of images
+             if 'extended' in flavor_dict and flavor_dict['extended']!=None:
+                 dev_nb=0
+                 for device in flavor_dict['extended'].get('devices',[]):
+                     if "image" not in device and "image name" not in device:
+                         continue
+                     image_dict={}
+                     image_dict['name']=device.get('image name',flavor_dict['name']+str(dev_nb)+"-img")
+                     image_dict['universal_name']=device.get('image name')
+                     image_dict['description']=flavor_dict['name']+str(dev_nb)+"-img"
+                     image_dict['location']=device.get('image')
+                     #image_dict['new_location']=vnfc.get('image location')
+                     image_dict['checksum']=device.get('image checksum')
+                     image_metadata_dict = device.get('image metadata', None)
+                     image_metadata_str = None
+                     if image_metadata_dict != None:
+                         image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+                     image_dict['metadata']=image_metadata_str
+                     image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+                     #print "Additional disk image id for VNFC %s: %s" % (flavor_dict['name']+str(dev_nb)+"-img", image_id)
+                     dev_image_list.append(image_id)
+                     dev_nb += 1
+             temp_flavor_dict['name'] = flavor_dict['name']
+             temp_flavor_dict['description'] = flavor_dict.get('description',None)
+             content = mydb.new_row('flavors', temp_flavor_dict, add_uuid=True)
+             flavor_mano_id= content
+             rollback_list.append({"where":"mano", "what":"flavor","uuid":flavor_mano_id})
+     #create flavor at every vim
+     if 'uuid' in flavor_dict:
+         del flavor_dict['uuid']
+     flavor_vim_id=None
+     for vim_id,vim in vims.items():
+         datacenter_vim_id = vim["config"]["datacenter_tenant_id"]
+         flavor_created="false"
+         #look at database
+         flavor_db = mydb.get_rows(FROM="datacenters_flavors",
+                                   WHERE={'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id})
+         #look at VIM if this flavor exist  SKIPPED
+         #res_vim, flavor_vim_id = vim.get_flavor_id_from_path(flavor_dict['location'])
+         #if res_vim < 0:
+         #    print "Error contacting VIM to know if the flavor %s existed previously." %flavor_vim_id
+         #    continue
+         #elif res_vim==0:
+         # Create the flavor in VIM
+         # Translate images at devices from MANO id to VIM id
+         disk_list = []
+         if 'extended' in flavor_dict and flavor_dict['extended']!=None and "devices" in flavor_dict['extended']:
+             # make a copy of original devices
+             devices_original=[]
+             for device in flavor_dict["extended"].get("devices",[]):
+                 dev={}
+                 dev.update(device)
+                 devices_original.append(dev)
+                 if 'image' in device:
+                     del device['image']
+                 if 'image metadata' in device:
+                     del device['image metadata']
+                 if 'image checksum' in device:
+                     del device['image checksum']
+             dev_nb = 0
+             for index in range(0,len(devices_original)) :
+                 device=devices_original[index]
+                 if "image" not in device and "image name" not in device:
+                     # if 'size' in device:
+                     disk_list.append({'size': device.get('size', default_volume_size), 'name': device.get('name')})
+                     continue
+                 image_dict={}
+                 image_dict['name']=device.get('image name',flavor_dict['name']+str(dev_nb)+"-img")
+                 image_dict['universal_name']=device.get('image name')
+                 image_dict['description']=flavor_dict['name']+str(dev_nb)+"-img"
+                 image_dict['location']=device.get('image')
+                 # image_dict['new_location']=device.get('image location')
+                 image_dict['checksum']=device.get('image checksum')
+                 image_metadata_dict = device.get('image metadata', None)
+                 image_metadata_str = None
+                 if image_metadata_dict != None:
+                     image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+                 image_dict['metadata']=image_metadata_str
+                 image_mano_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=False, return_on_error=return_on_error )
+                 image_dict["uuid"]=image_mano_id
+                 image_vim_id=create_or_use_image(mydb, vims, image_dict, rollback_list, only_create_at_vim=True, return_on_error=return_on_error)
+                 #save disk information (image must be based on and size
+                 disk_list.append({'image_id': image_vim_id, 'size': device.get('size', default_volume_size)})
+                 flavor_dict["extended"]["devices"][index]['imageRef']=image_vim_id
+                 dev_nb += 1
+         if len(flavor_db)>0:
+             #check that this vim_id exist in VIM, if not create
+             flavor_vim_id=flavor_db[0]["vim_id"]
+             try:
+                 vim.get_flavor(flavor_vim_id)
+                 continue #flavor exist
+             except vimconn.vimconnException:
+                 pass
+         #create flavor at vim
+         logger.debug("nfvo.create_or_use_flavor() adding flavor to VIM %s", vim["name"])
+         try:
+             flavor_vim_id = None
+             flavor_vim_id=vim.get_flavor_id_from_data(flavor_dict)
+             flavor_created="false"
+         except vimconn.vimconnException as e:
+             pass
+         try:
+             if not flavor_vim_id:
+                 flavor_vim_id = vim.new_flavor(flavor_dict)
+                 rollback_list.append({"where":"vim", "vim_id": vim_id, "what":"flavor","uuid":flavor_vim_id})
+                 flavor_created="true"
+         except vimconn.vimconnException as e:
+             if return_on_error:
+                 logger.error("Error creating flavor at VIM %s: %s.", vim["name"], str(e))
+                 raise
+             logger.warn("Error creating flavor at VIM %s: %s.", vim["name"], str(e))
+             flavor_vim_id = None
+             continue
+         #if reach here the flavor has been create or exist
+         if len(flavor_db)==0:
+             #add new vim_id at datacenters_flavors
+             extended_devices_yaml = None
+             if len(disk_list) > 0:
+                 extended_devices = dict()
+                 extended_devices['disks'] = disk_list
+                 extended_devices_yaml = yaml.safe_dump(extended_devices,default_flow_style=True,width=256)
+             mydb.new_row('datacenters_flavors',
+                         {'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id, 'vim_id': flavor_vim_id,
+                         'created': flavor_created, 'extended': extended_devices_yaml})
+         elif flavor_db[0]["vim_id"]!=flavor_vim_id:
+             #modify existing vim_id at datacenters_flavors
+             mydb.update_rows('datacenters_flavors', UPDATE={'vim_id':flavor_vim_id},
+                              WHERE={'datacenter_vim_id': datacenter_vim_id, 'flavor_id': flavor_mano_id})
+     return flavor_vim_id if only_create_at_vim else flavor_mano_id
+ def get_str(obj, field, length):
+     """
+     Obtain the str value,
+     :param obj:
+     :param length:
+     :return:
+     """
+     value = obj.get(field)
+     if value is not None:
+         value = str(value)[:length]
+     return value
+ def _lookfor_or_create_image(db_image, mydb, descriptor):
+     """
+     fill image content at db_image dictionary. Check if the image with this image and checksum exist
+     :param db_image: dictionary to insert data
+     :param mydb: database connector
+     :param descriptor: yang descriptor
+     :return: uuid if the image exist at DB, or None if a new image must be created with the data filled at db_image
+     """
+     db_image["name"] = get_str(descriptor, "image", 255)
+     db_image["checksum"] = get_str(descriptor, "image-checksum", 32)
+     if not db_image["checksum"]:  # Ensure that if empty string, None is stored
+         db_image["checksum"] = None
+     if db_image["name"].startswith("/"):
+         db_image["location"] = db_image["name"]
+         existing_images = mydb.get_rows(FROM="images", WHERE={'location': db_image["location"]})
+     else:
+         db_image["universal_name"] = db_image["name"]
+         existing_images = mydb.get_rows(FROM="images", WHERE={'universal_name': db_image['universal_name'],
+                                                               'checksum': db_image['checksum']})
+     if existing_images:
+         return existing_images[0]["uuid"]
+     else:
+         image_uuid = str(uuid4())
+         db_image["uuid"] = image_uuid
+         return None
+ def get_resource_allocation_params(quota_descriptor):
+     """
+     read the quota_descriptor from vnfd and fetch the resource allocation properties from the descriptor object
+     :param quota_descriptor: cpu/mem/vif/disk-io quota descriptor
+     :return: quota params for limit, reserve, shares from the descriptor object
+     """
+     quota = {}
+     if quota_descriptor.get("limit"):
+         quota["limit"] = int(quota_descriptor["limit"])
+     if quota_descriptor.get("reserve"):
+         quota["reserve"] = int(quota_descriptor["reserve"])
+     if quota_descriptor.get("shares"):
+         quota["shares"] = int(quota_descriptor["shares"])
+     return quota
+ def new_vnfd_v3(mydb, tenant_id, vnf_descriptor):
+     """
+     Parses an OSM IM vnfd_catalog and insert at DB
+     :param mydb:
+     :param tenant_id:
+     :param vnf_descriptor:
+     :return: The list of cretated vnf ids
+     """
+     try:
+         myvnfd = vnfd_catalog.vnfd()
+         try:
+             pybindJSONDecoder.load_ietf_json(vnf_descriptor, None, None, obj=myvnfd, path_helper=True,
+                                              skip_unknown=True)
+         except Exception as e:
+             raise NfvoException("Error. Invalid VNF descriptor format " + str(e), httperrors.Bad_Request)
+         db_vnfs = []
+         db_nets = []
+         db_vms = []
+         db_vms_index = 0
+         db_interfaces = []
+         db_images = []
+         db_flavors = []
+         db_ip_profiles_index = 0
+         db_ip_profiles = []
+         uuid_list = []
+         vnfd_uuid_list = []
+         vnfd_catalog_descriptor = vnf_descriptor.get("vnfd:vnfd-catalog")
+         if not vnfd_catalog_descriptor:
+             vnfd_catalog_descriptor = vnf_descriptor.get("vnfd-catalog")
+         vnfd_descriptor_list = vnfd_catalog_descriptor.get("vnfd")
+         if not vnfd_descriptor_list:
+             vnfd_descriptor_list = vnfd_catalog_descriptor.get("vnfd:vnfd")
+         for vnfd_yang in myvnfd.vnfd_catalog.vnfd.values():
+             vnfd = vnfd_yang.get()
+             # table vnf
+             vnf_uuid = str(uuid4())
+             uuid_list.append(vnf_uuid)
+             vnfd_uuid_list.append(vnf_uuid)
+             vnfd_id = get_str(vnfd, "id", 255)
+             db_vnf = {
+                 "uuid": vnf_uuid,
+                 "osm_id": vnfd_id,
+                 "name": get_str(vnfd, "name", 255),
+                 "description": get_str(vnfd, "description", 255),
+                 "tenant_id": tenant_id,
+                 "vendor": get_str(vnfd, "vendor", 255),
+                 "short_name": get_str(vnfd, "short-name", 255),
+                 "descriptor": str(vnf_descriptor)[:60000]
+             }
+             for vnfd_descriptor in vnfd_descriptor_list:
+                 if vnfd_descriptor["id"] == str(vnfd["id"]):
+                     break
+             # table ip_profiles (ip-profiles)
+             ip_profile_name2db_table_index = {}
+             for ip_profile in vnfd.get("ip-profiles").values():
+                 db_ip_profile = {
+                     "ip_version": str(ip_profile["ip-profile-params"].get("ip-version", "ipv4")),
+                     "subnet_address": str(ip_profile["ip-profile-params"].get("subnet-address")),
+                     "gateway_address": str(ip_profile["ip-profile-params"].get("gateway-address")),
+                     "dhcp_enabled": str(ip_profile["ip-profile-params"]["dhcp-params"].get("enabled", True)),
+                     "dhcp_start_address": str(ip_profile["ip-profile-params"]["dhcp-params"].get("start-address")),
+                     "dhcp_count": str(ip_profile["ip-profile-params"]["dhcp-params"].get("count")),
+                 }
+                 dns_list = []
+                 for dns in ip_profile["ip-profile-params"]["dns-server"].values():
+                     dns_list.append(str(dns.get("address")))
+                 db_ip_profile["dns_address"] = ";".join(dns_list)
+                 if ip_profile["ip-profile-params"].get('security-group'):
+                     db_ip_profile["security_group"] = ip_profile["ip-profile-params"]['security-group']
+                 ip_profile_name2db_table_index[str(ip_profile["name"])] = db_ip_profiles_index
+                 db_ip_profiles_index += 1
+                 db_ip_profiles.append(db_ip_profile)
+             # table nets (internal-vld)
+             net_id2uuid = {}  # for mapping interface with network
+             for vld in vnfd.get("internal-vld").values():
+                 net_uuid = str(uuid4())
+                 uuid_list.append(net_uuid)
+                 db_net = {
+                     "name": get_str(vld, "name", 255),
+                     "vnf_id": vnf_uuid,
+                     "uuid": net_uuid,
+                     "description": get_str(vld, "description", 255),
+                     "osm_id": get_str(vld, "id", 255),
+                     "type": "bridge",   # TODO adjust depending on connection point type
+                 }
+                 net_id2uuid[vld.get("id")] = net_uuid
+                 db_nets.append(db_net)
+                 # ip-profile, link db_ip_profile with db_sce_net
+                 if vld.get("ip-profile-ref"):
+                     ip_profile_name = vld.get("ip-profile-ref")
+                     if ip_profile_name not in ip_profile_name2db_table_index:
+                         raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vld[{}]':'ip-profile-ref':"
+                                             "'{}'. Reference to a non-existing 'ip_profiles'".format(
+                                                 str(vnfd["id"]), str(vld["id"]), str(vld["ip-profile-ref"])),
+                                             httperrors.Bad_Request)
+                     db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]]["net_id"] = net_uuid
+                 else:  #check no ip-address has been defined
+                     for icp in vld.get("internal-connection-point").values():
+                         if icp.get("ip-address"):
+                             raise NfvoException("Error at 'vnfd[{}]':'vld[{}]':'internal-connection-point[{}]' "
+                                             "contains an ip-address but no ip-profile has been defined at VLD".format(
+                                                 str(vnfd["id"]), str(vld["id"]), str(icp["id"])),
+                                             httperrors.Bad_Request)
+             # connection points vaiable declaration
+             cp_name2iface_uuid = {}
+             cp_name2vdu_id = {}
+             cp_name2vm_uuid = {}
+             cp_name2db_interface = {}
+             vdu_id2cp_name = {}  # stored only when one external connection point is presented at this VDU
+             # table vms (vdus)
+             vdu_id2uuid = {}
+             vdu_id2db_table_index = {}
+             mgmt_access = {}
+             for vdu in vnfd.get("vdu").values():
+                 for vdu_descriptor in vnfd_descriptor["vdu"]:
+                     if vdu_descriptor["id"] == str(vdu["id"]):
+                         break
+                 vm_uuid = str(uuid4())
+                 uuid_list.append(vm_uuid)
+                 vdu_id = get_str(vdu, "id", 255)
+                 db_vm = {
+                     "uuid": vm_uuid,
+                     "osm_id": vdu_id,
+                     "name": get_str(vdu, "name", 255),
+                     "description": get_str(vdu, "description", 255),
+                     "pdu_type": get_str(vdu, "pdu-type", 255),
+                     "vnf_id": vnf_uuid,
+                 }
+                 vdu_id2uuid[db_vm["osm_id"]] = vm_uuid
+                 vdu_id2db_table_index[db_vm["osm_id"]] = db_vms_index
+                 if vdu.get("count"):
+                     db_vm["count"] = int(vdu["count"])
+                 # table image
+                 image_present = False
+                 if vdu.get("image"):
+                     image_present = True
+                     db_image = {}
+                     image_uuid = _lookfor_or_create_image(db_image, mydb, vdu)
+                     if not image_uuid:
+                         image_uuid = db_image["uuid"]
+                         db_images.append(db_image)
+                     db_vm["image_id"] = image_uuid
+                 if vdu.get("alternative-images"):
+                     vm_alternative_images = []
+                     for alt_image in vdu.get("alternative-images").values():
+                         db_image = {}
+                         image_uuid = _lookfor_or_create_image(db_image, mydb, alt_image)
+                         if not image_uuid:
+                             image_uuid = db_image["uuid"]
+                             db_images.append(db_image)
+                         vm_alternative_images.append({
+                             "image_id": image_uuid,
+                             "vim_type": str(alt_image["vim-type"]),
+                             # "universal_name": str(alt_image["image"]),
+                             # "checksum": str(alt_image["image-checksum"]) if alt_image.get("image-checksum") else None
+                         })
+                     db_vm["image_list"] = yaml.safe_dump(vm_alternative_images, default_flow_style=True, width=256)
+                 # volumes
+                 devices = []
+                 if vdu.get("volumes"):
+                     for volume_key in vdu["volumes"]:
+                         volume = vdu["volumes"][volume_key]
+                         if not image_present:
+                             # Convert the first volume to vnfc.image
+                             image_present = True
+                             db_image = {}
+                             image_uuid = _lookfor_or_create_image(db_image, mydb, volume)
+                             if not image_uuid:
+                                 image_uuid = db_image["uuid"]
+                                 db_images.append(db_image)
+                             db_vm["image_id"] = image_uuid
+                         else:
+                             # Add Openmano devices
+                             device = {"name": str(volume.get("name"))}
+                             device["type"] = str(volume.get("device-type"))
+                             if volume.get("size"):
+                                 device["size"] = int(volume["size"])
+                             if volume.get("image"):
+                                 device["image name"] = str(volume["image"])
+                                 if volume.get("image-checksum"):
+                                     device["image checksum"] = str(volume["image-checksum"])
+                             devices.append(device)
+                 if not db_vm.get("image_id"):
+                     if not db_vm["pdu_type"]:
+                         raise NfvoException("Not defined image for VDU")
+                     # create a fake image
+                 # cloud-init
+                 boot_data = {}
+                 if vdu.get("cloud-init"):
+                     boot_data["user-data"] = str(vdu["cloud-init"])
+                 elif vdu.get("cloud-init-file"):
+                     # TODO Where this file content is present???
+                     # boot_data["user-data"] = vnfd_yang.files[vdu["cloud-init-file"]]
+                     boot_data["user-data"] = str(vdu["cloud-init-file"])
+                 if vdu.get("supplemental-boot-data"):
+                     if vdu["supplemental-boot-data"].get('boot-data-drive'):
+                             boot_data['boot-data-drive'] = True
+                     if vdu["supplemental-boot-data"].get('config-file'):
+                         om_cfgfile_list = list()
+                         for custom_config_file in vdu["supplemental-boot-data"]['config-file'].values():
+                             # TODO Where this file content is present???
+                             cfg_source = str(custom_config_file["source"])
+                             om_cfgfile_list.append({"dest": custom_config_file["dest"],
+                                                     "content": cfg_source})
+                         boot_data['config-files'] = om_cfgfile_list
+                 if boot_data:
+                     db_vm["boot_data"] = yaml.safe_dump(boot_data, default_flow_style=True, width=256)
+                 db_vms.append(db_vm)
+                 db_vms_index += 1
+                 # table interfaces (internal/external interfaces)
+                 flavor_epa_interfaces = []
+                 # for iface in chain(vdu.get("internal-interface").values(), vdu.get("external-interface").values()):
+                 for iface in vdu.get("interface").values():
+                     flavor_epa_interface = {}
+                     iface_uuid = str(uuid4())
+                     uuid_list.append(iface_uuid)
+                     db_interface = {
+                         "uuid": iface_uuid,
+                         "internal_name": get_str(iface, "name", 255),
+                         "vm_id": vm_uuid,
+                     }
+                     flavor_epa_interface["name"] = db_interface["internal_name"]
+                     if iface.get("virtual-interface").get("vpci"):
+                         db_interface["vpci"] = get_str(iface.get("virtual-interface"), "vpci", 12)
+                         flavor_epa_interface["vpci"] = db_interface["vpci"]
+                     if iface.get("virtual-interface").get("bandwidth"):
+                         bps = int(iface.get("virtual-interface").get("bandwidth"))
+                         db_interface["bw"] = int(math.ceil(bps / 1000000.0))
+                         flavor_epa_interface["bandwidth"] = "{} Mbps".format(db_interface["bw"])
+                     if iface.get("virtual-interface").get("type") == "OM-MGMT":
+                         db_interface["type"] = "mgmt"
+                     elif iface.get("virtual-interface").get("type") in ("VIRTIO", "E1000", "PARAVIRT"):
+                         db_interface["type"] = "bridge"
+                         db_interface["model"] = get_str(iface.get("virtual-interface"), "type", 12)
+                     elif iface.get("virtual-interface").get("type") in ("SR-IOV", "PCI-PASSTHROUGH"):
+                         db_interface["type"] = "data"
+                         db_interface["model"] = get_str(iface.get("virtual-interface"), "type", 12)
+                         flavor_epa_interface["dedicated"] = "no" if iface["virtual-interface"]["type"] == "SR-IOV" \
+                             else "yes"
+                         flavor_epa_interfaces.append(flavor_epa_interface)
+                     else:
+                         raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{}]':'vdu[{}]':'interface':'virtual"
+                                             "-interface':'type':'{}'. Interface type is not supported".format(
+                                                 vnfd_id, vdu_id, iface.get("virtual-interface").get("type")),
+                                             httperrors.Bad_Request)
+                     if iface.get("mgmt-interface"):
+                         db_interface["type"] = "mgmt"
+                     if iface.get("external-connection-point-ref"):
+                         try:
+                             cp = vnfd.get("connection-point")[iface.get("external-connection-point-ref")]
+                             db_interface["external_name"] = get_str(cp, "name", 255)
+                             cp_name2iface_uuid[db_interface["external_name"]] = iface_uuid
+                             cp_name2vdu_id[db_interface["external_name"]] = vdu_id
+                             cp_name2vm_uuid[db_interface["external_name"]] = vm_uuid
+                             cp_name2db_interface[db_interface["external_name"]] = db_interface
+                             for cp_descriptor in vnfd_descriptor["connection-point"]:
+                                 if cp_descriptor["name"] == db_interface["external_name"]:
+                                     break
+                             else:
+                                 raise KeyError()
+                             if vdu_id in vdu_id2cp_name:
+                                 vdu_id2cp_name[vdu_id] = None  # more than two connecdtion point for this VDU
+                             else:
+                                 vdu_id2cp_name[vdu_id] = db_interface["external_name"]
+                             # port security
+                             if str(cp_descriptor.get("port-security-enabled")).lower() == "false":
+                                 db_interface["port_security"] = 0
+                             elif str(cp_descriptor.get("port-security-enabled")).lower() == "true":
+                                 db_interface["port_security"] = 1
+                         except KeyError:
+                             raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':"
+                                                 "'interface[{iface}]':'vnfd-connection-point-ref':'{cp}' is not present"
+                                                 " at connection-point".format(
+                                                     vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
+                                                     cp=iface.get("vnfd-connection-point-ref")),
+                                                 httperrors.Bad_Request)
+                     elif iface.get("internal-connection-point-ref"):
+                         try:
+                             for icp_descriptor in vdu_descriptor["internal-connection-point"]:
+                                 if icp_descriptor["id"] == str(iface.get("internal-connection-point-ref")):
+                                     break
+                             else:
+                                 raise KeyError("does not exist at vdu:internal-connection-point")
+                             icp = None
+                             icp_vld = None
+                             for vld in vnfd.get("internal-vld").values():
+                                 for cp in vld.get("internal-connection-point").values():
+                                     if cp.get("id-ref") == iface.get("internal-connection-point-ref"):
+                                         if icp:
+                                             raise KeyError("is referenced by more than one 'internal-vld'")
+                                         icp = cp
+                                         icp_vld = vld
+                             if not icp:
+                                 raise KeyError("is not referenced by any 'internal-vld'")
+                             db_interface["net_id"] = net_id2uuid[icp_vld.get("id")]
+                             if str(icp_descriptor.get("port-security-enabled")).lower() == "false":
+                                 db_interface["port_security"] = 0
+                             elif str(icp_descriptor.get("port-security-enabled")).lower() == "true":
+                                 db_interface["port_security"] = 1
+                             if icp.get("ip-address"):
+                                 if not icp_vld.get("ip-profile-ref"):
+                                     raise NfvoException
+                                 db_interface["ip_address"] = str(icp.get("ip-address"))
+                         except KeyError as e:
+                             raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'vdu[{vdu}]':"
+                                                 "'interface[{iface}]':'internal-connection-point-ref':'{cp}'"
+                                                 " {msg}".format(
+                                                     vnf=vnfd_id, vdu=vdu_id, iface=iface["name"],
+                                                     cp=iface.get("internal-connection-point-ref"), msg=str(e)),
+                                                 httperrors.Bad_Request)
+                     if iface.get("position"):
+                         db_interface["created_at"] = int(iface.get("position")) * 50
+                     if iface.get("mac-address"):
+                         db_interface["mac"] = str(iface.get("mac-address"))
+                     db_interfaces.append(db_interface)
+                 # table flavors
+                 db_flavor = {
+                     "name": get_str(vdu, "name", 250) + "-flv",
+                     "vcpus": int(vdu["vm-flavor"].get("vcpu-count", 1)),
+                     "ram": int(vdu["vm-flavor"].get("memory-mb", 1)),
+                     "disk": int(vdu["vm-flavor"].get("storage-gb", 0)),
+                 }
+                 # TODO revise the case of several numa-node-policy node
+                 extended = {}
+                 numa = {}
+                 if devices:
+                     extended["devices"] = devices
+                 if flavor_epa_interfaces:
+                     numa["interfaces"] = flavor_epa_interfaces
+                 if vdu.get("guest-epa"):   # TODO or dedicated_int:
+                     epa_vcpu_set = False
+                     if vdu["guest-epa"].get("numa-node-policy"):  # TODO or dedicated_int:
+                         numa_node_policy = vdu["guest-epa"].get("numa-node-policy")
+                         if numa_node_policy.get("node"):
+                             numa_node = next(iter(numa_node_policy["node"].values()))
+                             if numa_node.get("num-cores"):
+                                 numa["cores"] = numa_node["num-cores"]
+                                 epa_vcpu_set = True
+                             if numa_node.get("paired-threads"):
+                                 if numa_node["paired-threads"].get("num-paired-threads"):
+                                     numa["paired-threads"] = int(numa_node["paired-threads"]["num-paired-threads"])
+                                     epa_vcpu_set = True
+                                 if len(numa_node["paired-threads"].get("paired-thread-ids")):
+                                     numa["paired-threads-id"] = []
+                                     for pair in numa_node["paired-threads"]["paired-thread-ids"].values():
+                                         numa["paired-threads-id"].append(
+                                             (str(pair["thread-a"]), str(pair["thread-b"]))
+                                         )
+                             if numa_node.get("num-threads"):
+                                 numa["threads"] = int(numa_node["num-threads"])
+                                 epa_vcpu_set = True
+                             if numa_node.get("memory-mb"):
+                                 numa["memory"] = max(int(numa_node["memory-mb"] / 1024), 1)
+                     if vdu["guest-epa"].get("mempage-size"):
+                         if vdu["guest-epa"]["mempage-size"] != "SMALL":
+                             numa["memory"] = max(int(db_flavor["ram"] / 1024), 1)
+                     if vdu["guest-epa"].get("cpu-pinning-policy") and not epa_vcpu_set:
+                         if vdu["guest-epa"]["cpu-pinning-policy"] == "DEDICATED":
+                             if vdu["guest-epa"].get("cpu-thread-pinning-policy") and \
+                                             vdu["guest-epa"]["cpu-thread-pinning-policy"] != "PREFER":
+                                 numa["cores"] = max(db_flavor["vcpus"], 1)
+                             else:
+                                 numa["threads"] = max(db_flavor["vcpus"], 1)
+                             epa_vcpu_set = True
+                     if vdu["guest-epa"].get("cpu-quota") and not epa_vcpu_set:
+                         cpuquota = get_resource_allocation_params(vdu["guest-epa"].get("cpu-quota"))
+                         if cpuquota:
+                             extended["cpu-quota"] = cpuquota
+                     if vdu["guest-epa"].get("mem-quota"):
+                         vduquota = get_resource_allocation_params(vdu["guest-epa"].get("mem-quota"))
+                         if vduquota:
+                             extended["mem-quota"] = vduquota
+                     if vdu["guest-epa"].get("disk-io-quota"):
+                         diskioquota = get_resource_allocation_params(vdu["guest-epa"].get("disk-io-quota"))
+                         if diskioquota:
+                             extended["disk-io-quota"] = diskioquota
+                     if vdu["guest-epa"].get("vif-quota"):
+                         vifquota = get_resource_allocation_params(vdu["guest-epa"].get("vif-quota"))
+                         if vifquota:
+                             extended["vif-quota"] = vifquota
+                 if numa:
+                     extended["numas"] = [numa]
+                 if extended:
+                     extended_text = yaml.safe_dump(extended, default_flow_style=True, width=256)
+                     db_flavor["extended"] = extended_text
+                 # look if flavor exist
+                 temp_flavor_dict = {'disk': db_flavor.get('disk', 0),
+                                     'ram': db_flavor.get('ram'),
+                                     'vcpus': db_flavor.get('vcpus'),
+                                     'extended': db_flavor.get('extended')
+                                     }
+                 existing_flavors = mydb.get_rows(FROM="flavors", WHERE=temp_flavor_dict)
+                 if existing_flavors:
+                     flavor_uuid = existing_flavors[0]["uuid"]
+                 else:
+                     flavor_uuid = str(uuid4())
+                     uuid_list.append(flavor_uuid)
+                     db_flavor["uuid"] = flavor_uuid
+                     db_flavors.append(db_flavor)
+                 db_vm["flavor_id"] = flavor_uuid
+             # VNF affinity and antiaffinity
+             for pg in vnfd.get("placement-groups").values():
+                 pg_name = get_str(pg, "name", 255)
+                 for vdu in pg.get("member-vdus").values():
+                     vdu_id = get_str(vdu, "member-vdu-ref", 255)
+                     if vdu_id not in vdu_id2db_table_index:
+                         raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'placement-groups[{pg}]':"
+                                             "'member-vdus':'{vdu}'. Reference to a non-existing vdu".format(
+                                                 vnf=vnfd_id, pg=pg_name, vdu=vdu_id),
+                                             httperrors.Bad_Request)
+                     db_vms[vdu_id2db_table_index[vdu_id]]["availability_zone"] = pg_name
+                     # TODO consider the case of isolation and not colocation
+                     # if pg.get("strategy") == "ISOLATION":
+             # VNF mgmt configuration
+             if vnfd["mgmt-interface"].get("vdu-id"):
+                 mgmt_vdu_id = get_str(vnfd["mgmt-interface"], "vdu-id", 255)
+                 if mgmt_vdu_id not in vdu_id2uuid:
+                     raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'vdu-id':"
+                                         "'{vdu}'. Reference to a non-existing vdu".format(
+                                             vnf=vnfd_id, vdu=mgmt_vdu_id),
+                                         httperrors.Bad_Request)
+                 mgmt_access["vm_id"] = vdu_id2uuid[mgmt_vdu_id]
+                 mgmt_access["vdu-id"] = mgmt_vdu_id
+                 # if only one cp is defined by this VDU, mark this interface as of type "mgmt"
+                 if vdu_id2cp_name.get(mgmt_vdu_id):
+                     if cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]:
+                         cp_name2db_interface[vdu_id2cp_name[mgmt_vdu_id]]["type"] = "mgmt"
+             if vnfd["mgmt-interface"].get("ip-address"):
+                 mgmt_access["ip-address"] = str(vnfd["mgmt-interface"].get("ip-address"))
 -                    # check correct parameters
 -                    if vnf_index not in vnf_index2vnf_uuid:
 -                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'vnfd-connection-point"
 -                                            "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
 -                                            "'nsd':'constituent-vnfd'".format(
 -                                                str(nsd["id"]), str(vld["id"]), str(iface["member-vnf-index-ref"])),
 -                                            httperrors.Bad_Request)
 -
 -                    existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid', 'i.type as iface_type'),
 -                                                    FROM="interfaces as i join vms on i.vm_id=vms.uuid",
 -                                                    WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
 -                                                           'external_name': get_str(iface, "vnfd-connection-point-ref",
 -                                                                                    255)})
 -                    if not existing_ifaces:
 -                        raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'vnfd-connection-point"
 -                                            "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
 -                                            "connection-point name at VNFD '{}'".format(
 -                                                str(nsd["id"]), str(vld["id"]), str(iface["vnfd-connection-point-ref"]),
 -                                                str(iface.get("vnfd-id-ref"))[:255]),
 -                                            httperrors.Bad_Request)
 -                    interface_uuid = existing_ifaces[0]["uuid"]
 -                    if existing_ifaces[0]["iface_type"] == "data":
 -                        db_sce_net["type"] = "data"
 -                    sce_interface_uuid = str(uuid4())
 -                    uuid_list.append(sce_net_uuid)
 -                    iface_ip_address = None
 -                    if iface.get("ip-address"):
 -                        iface_ip_address = str(iface.get("ip-address"))
 -                    db_sce_interface = {
 -                        "uuid": sce_interface_uuid,
 -                        "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
 -                        "sce_net_id": sce_net_uuid,
 -                        "interface_id": interface_uuid,
 -                        "ip_address": iface_ip_address,
 -                    }
 -                    db_sce_interfaces.append(db_sce_interface)
 -                if not db_sce_net["type"]:
 -                    db_sce_net["type"] = "bridge"
++            if vnfd["mgmt-interface"].get("cp") and vnfd.get("vdu"):
+                 if vnfd["mgmt-interface"]["cp"] not in cp_name2iface_uuid:
+                     raise NfvoException("Error. Invalid VNF descriptor at 'vnfd[{vnf}]':'mgmt-interface':'cp'['{cp}']. "
+                                         "Reference to a non-existing connection-point".format(
+                                             vnf=vnfd_id, cp=vnfd["mgmt-interface"]["cp"]),
+                                         httperrors.Bad_Request)
+                 mgmt_access["vm_id"] = cp_name2vm_uuid[vnfd["mgmt-interface"]["cp"]]
+                 mgmt_access["interface_id"] = cp_name2iface_uuid[vnfd["mgmt-interface"]["cp"]]
+                 mgmt_access["vdu-id"] = cp_name2vdu_id[vnfd["mgmt-interface"]["cp"]]
+                 # mark this interface as of type mgmt
+                 if cp_name2db_interface[vnfd["mgmt-interface"]["cp"]]:
+                     cp_name2db_interface[vnfd["mgmt-interface"]["cp"]]["type"] = "mgmt"
+             default_user = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}).get("ssh-access", {}),
+                                     "default-user", 64)
+             if default_user:
+                 mgmt_access["default_user"] = default_user
+             required = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}).get("ssh-access", {}),
+                                    "required", 6)
+             if required:
+                 mgmt_access["required"] = required
+             password_ = get_str(vnfd.get("vnf-configuration", {}).get("config-access", {}),
+                                    "password", 64)
+             if password_:
+                 mgmt_access["password"] = password_
+             if mgmt_access:
+                 db_vnf["mgmt_access"] = yaml.safe_dump(mgmt_access, default_flow_style=True, width=256)
+             db_vnfs.append(db_vnf)
+         db_tables=[
+             {"vnfs": db_vnfs},
+             {"nets": db_nets},
+             {"images": db_images},
+             {"flavors": db_flavors},
+             {"ip_profiles": db_ip_profiles},
+             {"vms": db_vms},
+             {"interfaces": db_interfaces},
+         ]
+         logger.debug("create_vnf Deployment done vnfDict: %s",
+                     yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
+         mydb.new_rows(db_tables, uuid_list)
+         return vnfd_uuid_list
+     except NfvoException:
+         raise
+     except Exception as e:
+         logger.error("Exception {}".format(e))
+         raise  # NfvoException("Exception {}".format(e), httperrors.Bad_Request)
+ @deprecated("Use new_vnfd_v3")
+ def new_vnf(mydb, tenant_id, vnf_descriptor):
+     global global_config
+     # Step 1. Check the VNF descriptor
+     check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=1)
+     # Step 2. Check tenant exist
+     vims = {}
+     if tenant_id != "any":
+         check_tenant(mydb, tenant_id)
+         if "tenant_id" in vnf_descriptor["vnf"]:
+             if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
+                 raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
+                                     httperrors.Unauthorized)
+         else:
+             vnf_descriptor['vnf']['tenant_id'] = tenant_id
+         # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
+         if global_config["auto_push_VNF_to_VIMs"]:
+             vims = get_vim(mydb, tenant_id, ignore_errors=True)
+     # Step 4. Review the descriptor and add missing  fields
+     #print vnf_descriptor
+     #logger.debug("Refactoring VNF descriptor with fields: description, public (default: true)")
+     vnf_name = vnf_descriptor['vnf']['name']
+     vnf_descriptor['vnf']['description'] = vnf_descriptor['vnf'].get("description", vnf_name)
+     if "physical" in vnf_descriptor['vnf']:
+         del vnf_descriptor['vnf']['physical']
+     #print vnf_descriptor
+     # Step 6. For each VNFC in the descriptor, flavors and images are created in the VIM
+     logger.debug('BEGIN creation of VNF "%s"' % vnf_name)
+     logger.debug("VNF %s: consisting of %d VNFC(s)" % (vnf_name,len(vnf_descriptor['vnf']['VNFC'])))
+     #For each VNFC, we add it to the VNFCDict and we  create a flavor.
+     VNFCDict = {}     # Dictionary, key: VNFC name, value: dict with the relevant information to create the VNF and VMs in the MANO database
+     rollback_list = []    # It will contain the new images created in mano. It is used for rollback
+     try:
+         logger.debug("Creating additional disk images and new flavors in the VIM for each VNFC")
+         for vnfc in vnf_descriptor['vnf']['VNFC']:
+             VNFCitem={}
+             VNFCitem["name"] = vnfc['name']
+             VNFCitem["availability_zone"] = vnfc.get('availability_zone')
+             VNFCitem["description"] = vnfc.get("description", 'VM {} of the VNF {}'.format(vnfc['name'],vnf_name))
+             #print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
+             myflavorDict = {}
+             myflavorDict["name"] = vnfc['name']+"-flv"   #Maybe we could rename the flavor by using the field "image name" if exists
+             myflavorDict["description"] = VNFCitem["description"]
+             myflavorDict["ram"] = vnfc.get("ram", 0)
+             myflavorDict["vcpus"] = vnfc.get("vcpus", 0)
+             myflavorDict["disk"] = vnfc.get("disk", 0)
+             myflavorDict["extended"] = {}
+             devices = vnfc.get("devices")
+             if devices != None:
+                 myflavorDict["extended"]["devices"] = devices
+             # TODO:
+             # Mapping from processor models to rankings should be available somehow in the NFVO. They could be taken from VIM or directly from a new database table
+             # Another option is that the processor in the VNF descriptor specifies directly the ranking of the host
+             # Previous code has been commented
+             #if vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" :
+             #    myflavorDict["flavor"]['extended']['processor_ranking'] = 200
+             #elif vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz" :
+             #    myflavorDict["flavor"]['extended']['processor_ranking'] = 300
+             #else:
+             #    result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
+             #    if result2:
+             #        print "Error creating flavor: unknown processor model. Rollback successful."
+             #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
+             #    else:
+             #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
+             myflavorDict['extended']['processor_ranking'] = 100  #Hardcoded value, while we decide when the mapping is done
+             if 'numas' in vnfc and len(vnfc['numas'])>0:
+                 myflavorDict['extended']['numas'] = vnfc['numas']
+             #print myflavorDict
+             # Step 6.2 New flavors are created in the VIM
+             flavor_id = create_or_use_flavor(mydb, vims, myflavorDict, rollback_list)
+             #print "Flavor id for VNFC %s: %s" % (vnfc['name'],flavor_id)
+             VNFCitem["flavor_id"] = flavor_id
+             VNFCDict[vnfc['name']] = VNFCitem
+         logger.debug("Creating new images in the VIM for each VNFC")
+         # Step 6.3 New images are created in the VIM
+         #For each VNFC, we must create the appropriate image.
+         #This "for" loop might be integrated with the previous one
+         #In case this integration is made, the VNFCDict might become a VNFClist.
+         for vnfc in vnf_descriptor['vnf']['VNFC']:
+             #print "Image name: %s. Description: %s" % (vnfc['name']+"-img", VNFCDict[vnfc['name']]['description'])
+             image_dict={}
+             image_dict['name']=vnfc.get('image name',vnf_name+"-"+vnfc['name']+"-img")
+             image_dict['universal_name']=vnfc.get('image name')
+             image_dict['description']=vnfc.get('image name', VNFCDict[vnfc['name']]['description'])
+             image_dict['location']=vnfc.get('VNFC image')
+             #image_dict['new_location']=vnfc.get('image location')
+             image_dict['checksum']=vnfc.get('image checksum')
+             image_metadata_dict = vnfc.get('image metadata', None)
+             image_metadata_str = None
+             if image_metadata_dict is not None:
+                 image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+             image_dict['metadata']=image_metadata_str
+             #print "create_or_use_image", mydb, vims, image_dict, rollback_list
+             image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+             #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
+             VNFCDict[vnfc['name']]["image_id"] = image_id
+             VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
+             VNFCDict[vnfc['name']]["count"] = vnfc.get('count', 1)
+             if vnfc.get("boot-data"):
+                 VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
+         # Step 7. Storing the VNF descriptor in the repository
+         if "descriptor" not in vnf_descriptor["vnf"]:
+             vnf_descriptor["vnf"]["descriptor"] = yaml.safe_dump(vnf_descriptor, indent=4, explicit_start=True, default_flow_style=False)
+         # Step 8. Adding the VNF to the NFVO DB
+         vnf_id = mydb.new_vnf_as_a_whole(tenant_id,vnf_name,vnf_descriptor,VNFCDict)
+         return vnf_id
+     except (db_base_Exception, vimconn.vimconnException, KeyError) as e:
+         _, message = rollback(mydb, vims, rollback_list)
+         if isinstance(e, db_base_Exception):
+             error_text = "Exception at database"
+         elif isinstance(e, KeyError):
+             error_text = "KeyError exception "
+             e.http_code = httperrors.Internal_Server_Error
+         else:
+             error_text = "Exception at VIM"
+         error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+         #logger.error("start_scenario %s", error_text)
+         raise NfvoException(error_text, e.http_code)
+ @deprecated("Use new_vnfd_v3")
+ def new_vnf_v02(mydb, tenant_id, vnf_descriptor):
+     global global_config
+     # Step 1. Check the VNF descriptor
+     check_vnf_descriptor(vnf_descriptor, vnf_descriptor_version=2)
+     # Step 2. Check tenant exist
+     vims = {}
+     if tenant_id != "any":
+         check_tenant(mydb, tenant_id)
+         if "tenant_id" in vnf_descriptor["vnf"]:
+             if vnf_descriptor["vnf"]["tenant_id"] != tenant_id:
+                 raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(vnf_descriptor["vnf"]["tenant_id"], tenant_id),
+                                     httperrors.Unauthorized)
+         else:
+             vnf_descriptor['vnf']['tenant_id'] = tenant_id
+         # Step 3. Get the URL of the VIM from the nfvo_tenant and the datacenter
+         if global_config["auto_push_VNF_to_VIMs"]:
+             vims = get_vim(mydb, tenant_id, ignore_errors=True)
+     # Step 4. Review the descriptor and add missing  fields
+     #print vnf_descriptor
+     #logger.debug("Refactoring VNF descriptor with fields: description, public (default: true)")
+     vnf_name = vnf_descriptor['vnf']['name']
+     vnf_descriptor['vnf']['description'] = vnf_descriptor['vnf'].get("description", vnf_name)
+     if "physical" in vnf_descriptor['vnf']:
+         del vnf_descriptor['vnf']['physical']
+     #print vnf_descriptor
+     # Step 6. For each VNFC in the descriptor, flavors and images are created in the VIM
+     logger.debug('BEGIN creation of VNF "%s"' % vnf_name)
+     logger.debug("VNF %s: consisting of %d VNFC(s)" % (vnf_name,len(vnf_descriptor['vnf']['VNFC'])))
+     #For each VNFC, we add it to the VNFCDict and we  create a flavor.
+     VNFCDict = {}     # Dictionary, key: VNFC name, value: dict with the relevant information to create the VNF and VMs in the MANO database
+     rollback_list = []    # It will contain the new images created in mano. It is used for rollback
+     try:
+         logger.debug("Creating additional disk images and new flavors in the VIM for each VNFC")
+         for vnfc in vnf_descriptor['vnf']['VNFC']:
+             VNFCitem={}
+             VNFCitem["name"] = vnfc['name']
+             VNFCitem["description"] = vnfc.get("description", 'VM {} of the VNF {}'.format(vnfc['name'],vnf_name))
+             #print "Flavor name: %s. Description: %s" % (VNFCitem["name"]+"-flv", VNFCitem["description"])
+             myflavorDict = {}
+             myflavorDict["name"] = vnfc['name']+"-flv"   #Maybe we could rename the flavor by using the field "image name" if exists
+             myflavorDict["description"] = VNFCitem["description"]
+             myflavorDict["ram"] = vnfc.get("ram", 0)
+             myflavorDict["vcpus"] = vnfc.get("vcpus", 0)
+             myflavorDict["disk"] = vnfc.get("disk", 0)
+             myflavorDict["extended"] = {}
+             devices = vnfc.get("devices")
+             if devices != None:
+                 myflavorDict["extended"]["devices"] = devices
+             # TODO:
+             # Mapping from processor models to rankings should be available somehow in the NFVO. They could be taken from VIM or directly from a new database table
+             # Another option is that the processor in the VNF descriptor specifies directly the ranking of the host
+             # Previous code has been commented
+             #if vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-4620 0 @ 2.20GHz" :
+             #    myflavorDict["flavor"]['extended']['processor_ranking'] = 200
+             #elif vnfc['processor']['model'] == "Intel(R) Xeon(R) CPU E5-2697 v2 @ 2.70GHz" :
+             #    myflavorDict["flavor"]['extended']['processor_ranking'] = 300
+             #else:
+             #    result2, message = rollback(myvim, myvimURL, myvim_tenant, flavorList, imageList)
+             #    if result2:
+             #        print "Error creating flavor: unknown processor model. Rollback successful."
+             #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback successful."
+             #    else:
+             #        return -httperrors.Bad_Request, "Error creating flavor: unknown processor model. Rollback fail: you need to access VIM and delete the following %s" % message
+             myflavorDict['extended']['processor_ranking'] = 100  #Hardcoded value, while we decide when the mapping is done
+             if 'numas' in vnfc and len(vnfc['numas'])>0:
+                 myflavorDict['extended']['numas'] = vnfc['numas']
+             #print myflavorDict
+             # Step 6.2 New flavors are created in the VIM
+             flavor_id = create_or_use_flavor(mydb, vims, myflavorDict, rollback_list)
+             #print "Flavor id for VNFC %s: %s" % (vnfc['name'],flavor_id)
+             VNFCitem["flavor_id"] = flavor_id
+             VNFCDict[vnfc['name']] = VNFCitem
+         logger.debug("Creating new images in the VIM for each VNFC")
+         # Step 6.3 New images are created in the VIM
+         #For each VNFC, we must create the appropriate image.
+         #This "for" loop might be integrated with the previous one
+         #In case this integration is made, the VNFCDict might become a VNFClist.
+         for vnfc in vnf_descriptor['vnf']['VNFC']:
+             #print "Image name: %s. Description: %s" % (vnfc['name']+"-img", VNFCDict[vnfc['name']]['description'])
+             image_dict={}
+             image_dict['name']=vnfc.get('image name',vnf_name+"-"+vnfc['name']+"-img")
+             image_dict['universal_name']=vnfc.get('image name')
+             image_dict['description']=vnfc.get('image name', VNFCDict[vnfc['name']]['description'])
+             image_dict['location']=vnfc.get('VNFC image')
+             #image_dict['new_location']=vnfc.get('image location')
+             image_dict['checksum']=vnfc.get('image checksum')
+             image_metadata_dict = vnfc.get('image metadata', None)
+             image_metadata_str = None
+             if image_metadata_dict is not None:
+                 image_metadata_str = yaml.safe_dump(image_metadata_dict,default_flow_style=True,width=256)
+             image_dict['metadata']=image_metadata_str
+             #print "create_or_use_image", mydb, vims, image_dict, rollback_list
+             image_id = create_or_use_image(mydb, vims, image_dict, rollback_list)
+             #print "Image id for VNFC %s: %s" % (vnfc['name'],image_id)
+             VNFCDict[vnfc['name']]["image_id"] = image_id
+             VNFCDict[vnfc['name']]["image_path"] = vnfc.get('VNFC image')
+             VNFCDict[vnfc['name']]["count"] = vnfc.get('count', 1)
+             if vnfc.get("boot-data"):
+                 VNFCDict[vnfc['name']]["boot_data"] = yaml.safe_dump(vnfc["boot-data"], default_flow_style=True, width=256)
+         # Step 7. Storing the VNF descriptor in the repository
+         if "descriptor" not in vnf_descriptor["vnf"]:
+             vnf_descriptor["vnf"]["descriptor"] = yaml.safe_dump(vnf_descriptor, indent=4, explicit_start=True, default_flow_style=False)
+         # Step 8. Adding the VNF to the NFVO DB
+         vnf_id = mydb.new_vnf_as_a_whole2(tenant_id,vnf_name,vnf_descriptor,VNFCDict)
+         return vnf_id
+     except (db_base_Exception, vimconn.vimconnException, KeyError) as e:
+         _, message = rollback(mydb, vims, rollback_list)
+         if isinstance(e, db_base_Exception):
+             error_text = "Exception at database"
+         elif isinstance(e, KeyError):
+             error_text = "KeyError exception "
+             e.http_code = httperrors.Internal_Server_Error
+         else:
+             error_text = "Exception at VIM"
+         error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+         #logger.error("start_scenario %s", error_text)
+         raise NfvoException(error_text, e.http_code)
+ def get_vnf_id(mydb, tenant_id, vnf_id):
+     #check valid tenant_id
+     check_tenant(mydb, tenant_id)
+     #obtain data
+     where_or = {}
+     if tenant_id != "any":
+         where_or["tenant_id"] = tenant_id
+         where_or["public"] = True
+     vnf = mydb.get_table_by_uuid_name('vnfs', vnf_id, "VNF", WHERE_OR=where_or, WHERE_AND_OR="AND")
+     vnf_id = vnf["uuid"]
+     filter_keys = ('uuid', 'name', 'description', 'public', "tenant_id", "osm_id", "created_at")
+     filtered_content = dict( (k,v) for k,v in vnf.items() if k in filter_keys )
+     #change_keys_http2db(filtered_content, http2db_vnf, reverse=True)
+     data={'vnf' : filtered_content}
+     #GET VM
+     content = mydb.get_rows(FROM='vnfs join vms on vnfs.uuid=vms.vnf_id',
+             SELECT=('vms.uuid as uuid', 'vms.osm_id as osm_id', 'vms.name as name', 'vms.description as description',
+                     'boot_data'),
+             WHERE={'vnfs.uuid': vnf_id} )
+     if len(content) != 0:
+         #raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
+     # change boot_data into boot-data
+         for vm in content:
+             if vm.get("boot_data"):
+                 vm["boot-data"] = yaml.safe_load(vm["boot_data"])
+                 del vm["boot_data"]
+         data['vnf']['VNFC'] = content
+     #TODO: GET all the information from a VNFC and include it in the output.
+     #GET NET
+     content = mydb.get_rows(FROM='vnfs join nets on vnfs.uuid=nets.vnf_id',
+                                     SELECT=('nets.uuid as uuid','nets.name as name','nets.description as description', 'nets.type as type', 'nets.multipoint as multipoint'),
+                                     WHERE={'vnfs.uuid': vnf_id} )
+     data['vnf']['nets'] = content
+     #GET ip-profile for each net
+     for net in data['vnf']['nets']:
+         ipprofiles = mydb.get_rows(FROM='ip_profiles',
+                                    SELECT=('ip_version','subnet_address','gateway_address','dns_address','dhcp_enabled','dhcp_start_address','dhcp_count'),
+                                    WHERE={'net_id': net["uuid"]} )
+         if len(ipprofiles)==1:
+             net["ip_profile"] = ipprofiles[0]
+         elif len(ipprofiles)>1:
+             raise NfvoException("More than one ip-profile found with this criteria: net_id='{}'".format(net['uuid']), httperrors.Bad_Request)
+     #TODO: For each net, GET its elements and relevant info per element (VNFC, iface, ip_address) and include them in the output.
+     #GET External Interfaces
+     content = mydb.get_rows(FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces on vms.uuid=interfaces.vm_id',\
+                                     SELECT=('interfaces.uuid as uuid','interfaces.external_name as external_name', 'vms.name as vm_name', 'interfaces.vm_id as vm_id', \
+                                             'interfaces.internal_name as internal_name', 'interfaces.type as type', 'interfaces.vpci as vpci','interfaces.bw as bw'),\
+                                     WHERE={'vnfs.uuid': vnf_id, 'interfaces.external_name<>': None} )
+     #print content
+     data['vnf']['external-connections'] = content
+     return data
+ def delete_vnf(mydb,tenant_id,vnf_id,datacenter=None,vim_tenant=None):
+     # Check tenant exist
+     if tenant_id != "any":
+         check_tenant(mydb, tenant_id)
+         # Get the URL of the VIM from the nfvo_tenant and the datacenter
+         vims = get_vim(mydb, tenant_id, ignore_errors=True)
+     else:
+         vims={}
+     # Checking if it is a valid uuid and, if not, getting the uuid assuming that the name was provided"
+     where_or = {}
+     if tenant_id != "any":
+         where_or["tenant_id"] = tenant_id
+         where_or["public"] = True
+     vnf = mydb.get_table_by_uuid_name('vnfs', vnf_id, "VNF", WHERE_OR=where_or, WHERE_AND_OR="AND")
+     vnf_id = vnf["uuid"]
+     # "Getting the list of flavors and tenants of the VNF"
+     flavorList = get_flavorlist(mydb, vnf_id)
+     if len(flavorList)==0:
+         logger.warn("delete_vnf error. No flavors found for the VNF id '%s'", vnf_id)
+     imageList = get_imagelist(mydb, vnf_id)
+     if len(imageList)==0:
+         logger.warn( "delete_vnf error. No images found for the VNF id '%s'", vnf_id)
+     deleted = mydb.delete_row_by_id('vnfs', vnf_id)
+     if deleted == 0:
+         raise NfvoException("vnf '{}' not found".format(vnf_id), httperrors.Not_Found)
+     undeletedItems = []
+     for flavor in flavorList:
+         #check if flavor is used by other vnf
+         try:
+             c = mydb.get_rows(FROM='vms', WHERE={'flavor_id':flavor} )
+             if len(c) > 0:
+                 logger.debug("Flavor '%s' not deleted because it is being used by another VNF", flavor)
+                 continue
+             #flavor not used, must be deleted
+             #delelte at VIM
+             c = mydb.get_rows(FROM='datacenters_flavors', WHERE={'flavor_id': flavor})
+             for flavor_vim in c:
+                 if not flavor_vim['created']:  # skip this flavor because not created by openmano
+                     continue
+                 # look for vim
+                 myvim = None
+                 for vim in vims.values():
+                     if vim["config"]["datacenter_tenant_id"] == flavor_vim["datacenter_vim_id"]:
+                         myvim = vim
+                         break
+                 if not myvim:
+                     continue
+                 try:
+                     myvim.delete_flavor(flavor_vim["vim_id"])
+                 except vimconn.vimconnNotFoundException:
+                     logger.warn("VIM flavor %s not exist at datacenter %s", flavor_vim["vim_id"],
+                                 flavor_vim["datacenter_vim_id"] )
+                 except vimconn.vimconnException as e:
+                     logger.error("Not possible to delete VIM flavor %s from datacenter %s: %s %s",
+                             flavor_vim["vim_id"], flavor_vim["datacenter_vim_id"], type(e).__name__, str(e))
+                     undeletedItems.append("flavor {} from VIM {}".format(flavor_vim["vim_id"],
+                                                                          flavor_vim["datacenter_vim_id"]))
+             # delete flavor from Database, using table flavors and with cascade foreign key also at datacenters_flavors
+             mydb.delete_row_by_id('flavors', flavor)
+         except db_base_Exception as e:
+             logger.error("delete_vnf_error. Not possible to get flavor details and delete '%s'. %s", flavor, str(e))
+             undeletedItems.append("flavor {}".format(flavor))
+     for image in imageList:
+         try:
+             #check if image is used by other vnf
+             c = mydb.get_rows(FROM='vms', WHERE=[{'image_id': image}, {'image_list LIKE ': '%' + image + '%'}])
+             if len(c) > 0:
+                 logger.debug("Image '%s' not deleted because it is being used by another VNF", image)
+                 continue
+             #image not used, must be deleted
+             #delelte at VIM
+             c = mydb.get_rows(FROM='datacenters_images', WHERE={'image_id':image})
+             for image_vim in c:
+                 if image_vim["datacenter_vim_id"] not in vims:   # TODO change to datacenter_tenant_id
+                     continue
+                 if image_vim['created']=='false': #skip this image because not created by openmano
+                     continue
+                 myvim=vims[ image_vim["datacenter_id"] ]
+                 try:
+                     myvim.delete_image(image_vim["vim_id"])
+                 except vimconn.vimconnNotFoundException as e:
+                     logger.warn("VIM image %s not exist at datacenter %s", image_vim["vim_id"], image_vim["datacenter_id"] )
+                 except vimconn.vimconnException as e:
+                     logger.error("Not possible to delete VIM image %s from datacenter %s: %s %s",
+                             image_vim["vim_id"], image_vim["datacenter_id"], type(e).__name__, str(e))
+                     undeletedItems.append("image {} from VIM {}".format(image_vim["vim_id"], image_vim["datacenter_id"] ))
+             #delete image from Database, using table images and with cascade foreign key also at datacenters_images
+             mydb.delete_row_by_id('images', image)
+         except db_base_Exception as e:
+             logger.error("delete_vnf_error. Not possible to get image details and delete '%s'. %s", image, str(e))
+             undeletedItems.append("image {}".format(image))
+     return vnf_id + " " + vnf["name"]
+     #if undeletedItems:
+     #    return "delete_vnf. Undeleted: %s" %(undeletedItems)
+ @deprecated("Not used")
+ def get_hosts_info(mydb, nfvo_tenant_id, datacenter_name=None):
+     result, vims = get_vim(mydb, nfvo_tenant_id, None, datacenter_name)
+     if result < 0:
+         return result, vims
+     elif result == 0:
+         return -httperrors.Not_Found, "datacenter '{}' not found".format(datacenter_name)
+     myvim = next(iter(vims.values()))
+     result,servers =  myvim.get_hosts_info()
+     if result < 0:
+         return result, servers
+     topology = {'name':myvim['name'] , 'servers': servers}
+     return result, topology
+ def get_hosts(mydb, nfvo_tenant_id):
+     vims = get_vim(mydb, nfvo_tenant_id)
+     if len(vims) == 0:
+         raise NfvoException("No datacenter found for tenant '{}'".format(str(nfvo_tenant_id)), httperrors.Not_Found)
+     elif len(vims)>1:
+         #print "nfvo.datacenter_action() error. Several datacenters found"
+         raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+     myvim = next(iter(vims.values()))
+     try:
+         hosts =  myvim.get_hosts()
+         logger.debug('VIM hosts response: '+ yaml.safe_dump(hosts, indent=4, default_flow_style=False))
+         datacenter = {'Datacenters': [ {'name':myvim['name'],'servers':[]} ] }
+         for host in hosts:
+             server={'name':host['name'], 'vms':[]}
+             for vm in host['instances']:
+                 #get internal name and model
+                 try:
+                     c = mydb.get_rows(SELECT=('name',), FROM='instance_vms as iv join vms on iv.vm_id=vms.uuid',\
+                         WHERE={'vim_vm_id':vm['id']} )
+                     if len(c) == 0:
+                         logger.warn("nfvo.get_hosts virtual machine at VIM '{}' not found at tidnfvo".format(vm['id']))
+                         continue
+                     server['vms'].append( {'name':vm['name'] , 'model':c[0]['name']} )
+                 except db_base_Exception as e:
+                     logger.warn("nfvo.get_hosts virtual machine at VIM '{}' error {}".format(vm['id'], str(e)))
+             datacenter['Datacenters'][0]['servers'].append(server)
+         #return -400, "en construccion"
+         #print 'datacenters '+ json.dumps(datacenter, indent=4)
+         return datacenter
+     except vimconn.vimconnException as e:
+         raise NfvoException("Not possible to get_host_list from VIM: {}".format(str(e)), e.http_code)
+ @deprecated("Use new_nsd_v3")
+ def new_scenario(mydb, tenant_id, topo):
+ #    result, vims = get_vim(mydb, tenant_id)
+ #    if result < 0:
+ #        return result, vims
+ #1: parse input
+     if tenant_id != "any":
+         check_tenant(mydb, tenant_id)
+         if "tenant_id" in topo:
+             if topo["tenant_id"] != tenant_id:
+                 raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(topo["tenant_id"], tenant_id),
+                                     httperrors.Unauthorized)
+     else:
+         tenant_id=None
+ #1.1: get VNFs and external_networks (other_nets).
+     vnfs={}
+     other_nets={}  #external_networks, bridge_networks and data_networkds
+     nodes = topo['topology']['nodes']
+     for k in nodes.keys():
+         if nodes[k]['type'] == 'VNF':
+             vnfs[k] = nodes[k]
+             vnfs[k]['ifaces'] = {}
+         elif nodes[k]['type'] == 'other_network' or nodes[k]['type'] == 'external_network':
+             other_nets[k] = nodes[k]
+             other_nets[k]['external']=True
+         elif nodes[k]['type'] == 'network':
+             other_nets[k] = nodes[k]
+             other_nets[k]['external']=False
+ #1.2: Check that VNF are present at database table vnfs. Insert uuid, description and external interfaces
+     for name,vnf in vnfs.items():
+         where = {"OR": {"tenant_id": tenant_id, 'public': "true"}}
+         error_text = ""
+         error_pos = "'topology':'nodes':'" + name + "'"
+         if 'vnf_id' in vnf:
+             error_text += " 'vnf_id' " +  vnf['vnf_id']
+             where['uuid'] = vnf['vnf_id']
+         if 'VNF model' in vnf:
+             error_text += " 'VNF model' " +  vnf['VNF model']
+             where['name'] = vnf['VNF model']
+         if len(where) == 1:
+             raise NfvoException("Descriptor need a 'vnf_id' or 'VNF model' field at " + error_pos, httperrors.Bad_Request)
+         vnf_db = mydb.get_rows(SELECT=('uuid','name','description'),
+                                FROM='vnfs',
+                                WHERE=where)
+         if len(vnf_db)==0:
+             raise NfvoException("unknown" + error_text + " at " + error_pos, httperrors.Not_Found)
+         elif len(vnf_db)>1:
+             raise NfvoException("more than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", httperrors.Conflict)
+         vnf['uuid']=vnf_db[0]['uuid']
+         vnf['description']=vnf_db[0]['description']
+         #get external interfaces
+         ext_ifaces = mydb.get_rows(SELECT=('external_name as name','i.uuid as iface_uuid', 'i.type as type'),
+             FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces as i on vms.uuid=i.vm_id',
+             WHERE={'vnfs.uuid':vnf['uuid'], 'external_name<>': None} )
+         for ext_iface in ext_ifaces:
+             vnf['ifaces'][ ext_iface['name'] ] = {'uuid':ext_iface['iface_uuid'], 'type':ext_iface['type']}
+ #1.4 get list of connections
+     conections = topo['topology']['connections']
+     conections_list = []
+     conections_list_name = []
+     for k in conections.keys():
+         if type(conections[k]['nodes'])==dict: #dict with node:iface pairs
+             ifaces_list = conections[k]['nodes'].items()
+         elif type(conections[k]['nodes'])==list: #list with dictionary
+             ifaces_list=[]
+             conection_pair_list = map(lambda x: x.items(), conections[k]['nodes'] )
+             for k2 in conection_pair_list:
+                 ifaces_list += k2
+         con_type = conections[k].get("type", "link")
+         if con_type != "link":
+             if k in other_nets:
+                 raise NfvoException("Format error. Reapeted network name at 'topology':'connections':'{}'".format(str(k)), httperrors.Bad_Request)
+             other_nets[k] = {'external': False}
+             if conections[k].get("graph"):
+                 other_nets[k]["graph"] =   conections[k]["graph"]
+             ifaces_list.append( (k, None) )
+         if con_type == "external_network":
+             other_nets[k]['external'] = True
+             if conections[k].get("model"):
+                 other_nets[k]["model"] =   conections[k]["model"]
+             else:
+                 other_nets[k]["model"] =   k
+         if con_type == "dataplane_net" or con_type == "bridge_net":
+             other_nets[k]["model"] = con_type
+         conections_list_name.append(k)
+         conections_list.append(set(ifaces_list)) #from list to set to operate as a set (this conversion removes elements that are repeated in a list)
+         #print set(ifaces_list)
+     #check valid VNF and iface names
+         for iface in ifaces_list:
+             if iface[0] not in vnfs and iface[0] not in other_nets :
+                 raise NfvoException("format error. Invalid VNF name at 'topology':'connections':'{}':'nodes':'{}'".format(
+                                                                                         str(k), iface[0]), httperrors.Not_Found)
+             if iface[0] in vnfs and iface[1] not in vnfs[ iface[0] ]['ifaces']:
+                 raise NfvoException("format error. Invalid interface name at 'topology':'connections':'{}':'nodes':'{}':'{}'".format(
+                                                                                         str(k), iface[0], iface[1]), httperrors.Not_Found)
+ #1.5 unify connections from the pair list to a consolidated list
+     index=0
+     while index < len(conections_list):
+         index2 = index+1
+         while index2 < len(conections_list):
+             if len(conections_list[index] & conections_list[index2])>0: #common interface, join nets
+                 conections_list[index] |= conections_list[index2]
+                 del conections_list[index2]
+                 del conections_list_name[index2]
+             else:
+                 index2 += 1
+         conections_list[index] = list(conections_list[index])  # from set to list again
+         index += 1
+     #for k in conections_list:
+     #    print k
+ #1.6 Delete non external nets
+ #    for k in other_nets.keys():
+ #        if other_nets[k]['model']=='bridge' or other_nets[k]['model']=='dataplane_net' or other_nets[k]['model']=='bridge_net':
+ #            for con in conections_list:
+ #                delete_indexes=[]
+ #                for index in range(0,len(con)):
+ #                    if con[index][0] == k: delete_indexes.insert(0,index) #order from higher to lower
+ #                for index in delete_indexes:
+ #                    del con[index]
+ #            del other_nets[k]
+ #1.7: Check external_ports are present at database table datacenter_nets
+     for k,net in other_nets.items():
+         error_pos = "'topology':'nodes':'" + k + "'"
+         if net['external']==False:
+             if 'name' not in net:
+                 net['name']=k
+             if 'model' not in net:
+                 raise NfvoException("needed a 'model' at " + error_pos, httperrors.Bad_Request)
+             if net['model']=='bridge_net':
+                 net['type']='bridge';
+             elif net['model']=='dataplane_net':
+                 net['type']='data';
+             else:
+                 raise NfvoException("unknown 'model' '"+ net['model'] +"' at " + error_pos, httperrors.Not_Found)
+         else: #external
+ #IF we do not want to check that external network exist at datacenter
+             pass
+ #ELSE
+ #             error_text = ""
+ #             WHERE_={}
+ #             if 'net_id' in net:
+ #                 error_text += " 'net_id' " +  net['net_id']
+ #                 WHERE_['uuid'] = net['net_id']
+ #             if 'model' in net:
+ #                 error_text += " 'model' " +  net['model']
+ #                 WHERE_['name'] = net['model']
+ #             if len(WHERE_) == 0:
+ #                 return -httperrors.Bad_Request, "needed a 'net_id' or 'model' at " + error_pos
+ #             r,net_db = mydb.get_table(SELECT=('uuid','name','description','type','shared'),
+ #                 FROM='datacenter_nets', WHERE=WHERE_ )
+ #             if r<0:
+ #                 print "nfvo.new_scenario Error getting datacenter_nets",r,net_db
+ #             elif r==0:
+ #                 print "nfvo.new_scenario Error" +error_text+ " is not present at database"
+ #                 return -httperrors.Bad_Request, "unknown " +error_text+ " at " + error_pos
+ #             elif r>1:
+ #                 print "nfvo.new_scenario Error more than one external_network for " +error_text+ " is present at database"
+ #                 return -httperrors.Bad_Request, "more than one external_network for " +error_text+ "at "+ error_pos + " Concrete with 'net_id'"
+ #             other_nets[k].update(net_db[0])
+ #ENDIF
+     net_list={}
+     net_nb=0  #Number of nets
+     for con in conections_list:
+         #check if this is connected to a external net
+         other_net_index=-1
+         #print
+         #print "con", con
+         for index in range(0,len(con)):
+             #check if this is connected to a external net
+             for net_key in other_nets.keys():
+                 if con[index][0]==net_key:
+                     if other_net_index>=0:
+                         error_text = "There is some interface connected both to net '{}' and net '{}'".format(
+                             con[other_net_index][0], net_key)
+                         #print "nfvo.new_scenario " + error_text
+                         raise NfvoException(error_text, httperrors.Bad_Request)
+                     else:
+                         other_net_index = index
+                         net_target = net_key
+                     break
+         #print "other_net_index", other_net_index
+         try:
+             if other_net_index>=0:
+                 del con[other_net_index]
+ #IF we do not want to check that external network exist at datacenter
+                 if other_nets[net_target]['external'] :
+                     if "name" not in other_nets[net_target]:
+                         other_nets[net_target]['name'] =  other_nets[net_target]['model']
+                     if other_nets[net_target]["type"] == "external_network":
+                         if vnfs[ con[0][0] ]['ifaces'][ con[0][1] ]["type"] == "data":
+                             other_nets[net_target]["type"] =  "data"
+                         else:
+                             other_nets[net_target]["type"] =  "bridge"
+ #ELSE
+ #                 if other_nets[net_target]['external'] :
+ #                     type_='data' if len(con)>1 else 'ptp'  #an external net is connected to a external port, so it is ptp if only one connection is done to this net
+ #                     if type_=='data' and other_nets[net_target]['type']=="ptp":
+ #                         error_text = "Error connecting %d nodes on a not multipoint net %s" % (len(con), net_target)
+ #                         print "nfvo.new_scenario " + error_text
+ #                         return -httperrors.Bad_Request, error_text
+ #ENDIF
+                 for iface in con:
+                     vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
+             else:
+                 #create a net
+                 net_type_bridge=False
+                 net_type_data=False
+                 net_target = "__-__net"+str(net_nb)
+                 net_list[net_target] = {'name': conections_list_name[net_nb],  #"net-"+str(net_nb),
+                     'description':"net-{} in scenario {}".format(net_nb,topo['name']),
+                     'external':False}
+                 for iface in con:
+                     vnfs[ iface[0] ]['ifaces'][ iface[1] ]['net_key'] = net_target
+                     iface_type = vnfs[ iface[0] ]['ifaces'][ iface[1] ]['type']
+                     if iface_type=='mgmt' or iface_type=='bridge':
+                         net_type_bridge = True
+                     else:
+                         net_type_data = True
+                 if net_type_bridge and net_type_data:
+                     error_text = "Error connection interfaces of bridge type with data type. Firs node {}, iface {}".format(iface[0], iface[1])
+                     #print "nfvo.new_scenario " + error_text
+                     raise NfvoException(error_text, httperrors.Bad_Request)
+                 elif net_type_bridge:
+                     type_='bridge'
+                 else:
+                     type_='data' if len(con)>2 else 'ptp'
+                 net_list[net_target]['type'] = type_
+                 net_nb+=1
+         except Exception:
+             error_text = "Error connection node {} : {} does not match any VNF or interface".format(iface[0], iface[1])
+             #print "nfvo.new_scenario " + error_text
+             #raise e
+             raise NfvoException(error_text, httperrors.Bad_Request)
+ #1.8: Connect to management net all not already connected interfaces of type 'mgmt'
+     #1.8.1 obtain management net
+     mgmt_net = mydb.get_rows(SELECT=('uuid','name','description','type','shared'),
+         FROM='datacenter_nets', WHERE={'name':'mgmt'} )
+     #1.8.2 check all interfaces from all vnfs
+     if len(mgmt_net)>0:
+         add_mgmt_net = False
+         for vnf in vnfs.values():
+             for iface in vnf['ifaces'].values():
+                 if iface['type']=='mgmt' and 'net_key' not in iface:
+                     #iface not connected
+                     iface['net_key'] = 'mgmt'
+                     add_mgmt_net = True
+         if add_mgmt_net and 'mgmt' not in net_list:
+             net_list['mgmt']=mgmt_net[0]
+             net_list['mgmt']['external']=True
+             net_list['mgmt']['graph']={'visible':False}
+     net_list.update(other_nets)
+     #print
+     #print 'net_list', net_list
+     #print
+     #print 'vnfs', vnfs
+     #print
+ #2: insert scenario. filling tables scenarios,sce_vnfs,sce_interfaces,sce_nets
+     c = mydb.new_scenario( { 'vnfs':vnfs, 'nets':net_list,
+         'tenant_id':tenant_id, 'name':topo['name'],
+          'description':topo.get('description',topo['name']),
+          'public': topo.get('public', False)
+          })
+     return c
+ @deprecated("Use new_nsd_v3")
+ def new_scenario_v02(mydb, tenant_id, scenario_dict, version):
+     """ This creates a new scenario for version 0.2 and 0.3"""
+     scenario = scenario_dict["scenario"]
+     if tenant_id != "any":
+         check_tenant(mydb, tenant_id)
+         if "tenant_id" in scenario:
+             if scenario["tenant_id"] != tenant_id:
+                 # print "nfvo.new_scenario_v02() tenant '%s' not found" % tenant_id
+                 raise NfvoException("VNF can not have a different tenant owner '{}', must be '{}'".format(
+                                                     scenario["tenant_id"], tenant_id), httperrors.Unauthorized)
+     else:
+         tenant_id=None
+     # 1: Check that VNF are present at database table vnfs and update content into scenario dict
+     for name,vnf in scenario["vnfs"].items():
+         where = {"OR": {"tenant_id": tenant_id, 'public': "true"}}
+         error_text = ""
+         error_pos = "'scenario':'vnfs':'" + name + "'"
+         if 'vnf_id' in vnf:
+             error_text += " 'vnf_id' " + vnf['vnf_id']
+             where['uuid'] = vnf['vnf_id']
+         if 'vnf_name' in vnf:
+             error_text += " 'vnf_name' " + vnf['vnf_name']
+             where['name'] = vnf['vnf_name']
+         if len(where) == 1:
+             raise NfvoException("Needed a 'vnf_id' or 'vnf_name' at " + error_pos, httperrors.Bad_Request)
+         vnf_db = mydb.get_rows(SELECT=('uuid', 'name', 'description'),
+                                FROM='vnfs',
+                                WHERE=where)
+         if len(vnf_db) == 0:
+             raise NfvoException("Unknown" + error_text + " at " + error_pos, httperrors.Not_Found)
+         elif len(vnf_db) > 1:
+             raise NfvoException("More than one" + error_text + " at " + error_pos + " Concrete with 'vnf_id'", httperrors.Conflict)
+         vnf['uuid'] = vnf_db[0]['uuid']
+         vnf['description'] = vnf_db[0]['description']
+         vnf['ifaces'] = {}
+         # get external interfaces
+         ext_ifaces = mydb.get_rows(SELECT=('external_name as name', 'i.uuid as iface_uuid', 'i.type as type'),
+                                    FROM='vnfs join vms on vnfs.uuid=vms.vnf_id join interfaces as i on vms.uuid=i.vm_id',
+                                    WHERE={'vnfs.uuid':vnf['uuid'], 'external_name<>': None} )
+         for ext_iface in ext_ifaces:
+             vnf['ifaces'][ ext_iface['name'] ] = {'uuid':ext_iface['iface_uuid'], 'type': ext_iface['type']}
+         # TODO? get internal-connections from db.nets and their profiles, and update scenario[vnfs][internal-connections] accordingly
+     # 2: Insert net_key and ip_address at every vnf interface
+     for net_name, net in scenario["networks"].items():
+         net_type_bridge = False
+         net_type_data = False
+         for iface_dict in net["interfaces"]:
+             if version == "0.2":
+                 temp_dict = iface_dict
+                 ip_address = None
+             elif version == "0.3":
+                 temp_dict = {iface_dict["vnf"] : iface_dict["vnf_interface"]}
+                 ip_address = iface_dict.get('ip_address', None)
+             for vnf, iface in temp_dict.items():
+                 if vnf not in scenario["vnfs"]:
+                     error_text = "Error at 'networks':'{}':'interfaces' VNF '{}' not match any VNF at 'vnfs'".format(
+                         net_name, vnf)
+                     # logger.debug("nfvo.new_scenario_v02 " + error_text)
+                     raise NfvoException(error_text, httperrors.Not_Found)
+                 if iface not in scenario["vnfs"][vnf]['ifaces']:
+                     error_text = "Error at 'networks':'{}':'interfaces':'{}' interface not match any VNF interface"\
+                         .format(net_name, iface)
+                     # logger.debug("nfvo.new_scenario_v02 " + error_text)
+                     raise NfvoException(error_text, httperrors.Bad_Request)
+                 if "net_key" in scenario["vnfs"][vnf]['ifaces'][iface]:
+                     error_text = "Error at 'networks':'{}':'interfaces':'{}' interface already connected at network"\
+                                  "'{}'".format(net_name, iface,scenario["vnfs"][vnf]['ifaces'][iface]['net_key'])
+                     # logger.debug("nfvo.new_scenario_v02 " + error_text)
+                     raise NfvoException(error_text, httperrors.Bad_Request)
+                 scenario["vnfs"][vnf]['ifaces'][ iface ]['net_key'] = net_name
+                 scenario["vnfs"][vnf]['ifaces'][iface]['ip_address'] = ip_address
+                 iface_type = scenario["vnfs"][vnf]['ifaces'][iface]['type']
+                 if iface_type == 'mgmt' or iface_type == 'bridge':
+                     net_type_bridge = True
+                 else:
+                     net_type_data = True
+         if net_type_bridge and net_type_data:
+             error_text = "Error connection interfaces of 'bridge' type and 'data' type at 'networks':'{}':'interfaces'"\
+                 .format(net_name)
+             # logger.debug("nfvo.new_scenario " + error_text)
+             raise NfvoException(error_text, httperrors.Bad_Request)
+         elif net_type_bridge:
+             type_ = 'bridge'
+         else:
+             type_ = 'data' if len(net["interfaces"]) > 2 else 'ptp'
+         if net.get("implementation"):     # for v0.3
+             if type_ == "bridge" and net["implementation"] == "underlay":
+                 error_text = "Error connecting interfaces of data type to a network declared as 'underlay' at "\
+                              "'network':'{}'".format(net_name)
+                 # logger.debug(error_text)
+                 raise NfvoException(error_text, httperrors.Bad_Request)
+             elif type_ != "bridge" and net["implementation"] == "overlay":
+                 error_text = "Error connecting interfaces of data type to a network declared as 'overlay' at "\
+                              "'network':'{}'".format(net_name)
+                 # logger.debug(error_text)
+                 raise NfvoException(error_text, httperrors.Bad_Request)
+             net.pop("implementation")
+         if "type" in net and version == "0.3":   # for v0.3
+             if type_ == "data" and net["type"] == "e-line":
+                 error_text = "Error connecting more than 2 interfaces of data type to a network declared as type "\
+                              "'e-line' at 'network':'{}'".format(net_name)
+                 # logger.debug(error_text)
+                 raise NfvoException(error_text, httperrors.Bad_Request)
+             elif type_ == "ptp" and net["type"] == "e-lan":
+                 type_ = "data"
+         net['type'] = type_
+         net['name'] = net_name
+         net['external'] = net.get('external', False)
+     # 3: insert at database
+     scenario["nets"] = scenario["networks"]
+     scenario['tenant_id'] = tenant_id
+     scenario_id = mydb.new_scenario(scenario)
+     return scenario_id
+ def new_nsd_v3(mydb, tenant_id, nsd_descriptor):
+     """
+     Parses an OSM IM nsd_catalog and insert at DB
+     :param mydb:
+     :param tenant_id:
+     :param nsd_descriptor:
+     :return: The list of created NSD ids
+     """
+     try:
+         mynsd = nsd_catalog.nsd()
+         try:
+             pybindJSONDecoder.load_ietf_json(nsd_descriptor, None, None, obj=mynsd, skip_unknown=True)
+         except Exception as e:
+             raise NfvoException("Error. Invalid NS descriptor format: " + str(e), httperrors.Bad_Request)
+         db_scenarios = []
+         db_sce_nets = []
+         db_sce_vnfs = []
+         db_sce_interfaces = []
+         db_sce_vnffgs = []
+         db_sce_rsps = []
+         db_sce_rsp_hops = []
+         db_sce_classifiers = []
+         db_sce_classifier_matches = []
+         db_ip_profiles = []
+         db_ip_profiles_index = 0
+         uuid_list = []
+         nsd_uuid_list = []
+         for nsd_yang in mynsd.nsd_catalog.nsd.values():
+             nsd = nsd_yang.get()
+             # table scenarios
+             scenario_uuid = str(uuid4())
+             uuid_list.append(scenario_uuid)
+             nsd_uuid_list.append(scenario_uuid)
+             db_scenario = {
+                 "uuid": scenario_uuid,
+                 "osm_id": get_str(nsd, "id", 255),
+                 "name": get_str(nsd, "name", 255),
+                 "description": get_str(nsd, "description", 255),
+                 "tenant_id": tenant_id,
+                 "vendor": get_str(nsd, "vendor", 255),
+                 "short_name": get_str(nsd, "short-name", 255),
+                 "descriptor": str(nsd_descriptor)[:60000],
+             }
+             db_scenarios.append(db_scenario)
+             # table sce_vnfs (constituent-vnfd)
+             vnf_index2scevnf_uuid = {}
+             vnf_index2vnf_uuid = {}
+             for vnf in nsd.get("constituent-vnfd").values():
+                 existing_vnf = mydb.get_rows(FROM="vnfs", WHERE={'osm_id': str(vnf["vnfd-id-ref"])[:255],
+                                                                       'tenant_id': tenant_id})
+                 if not existing_vnf:
+                     raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'constituent-vnfd':'vnfd-id-ref':"
+                                         "'{}'. Reference to a non-existing VNFD in the catalog".format(
+                                             str(nsd["id"]), str(vnf["vnfd-id-ref"])[:255]),
+                                         httperrors.Bad_Request)
+                 sce_vnf_uuid = str(uuid4())
+                 uuid_list.append(sce_vnf_uuid)
+                 db_sce_vnf = {
+                     "uuid": sce_vnf_uuid,
+                     "scenario_id": scenario_uuid,
+                     # "name": get_str(vnf, "member-vnf-index", 255),
+                     "name": existing_vnf[0]["name"][:200] + "." + get_str(vnf, "member-vnf-index", 50),
+                     "vnf_id": existing_vnf[0]["uuid"],
+                     "member_vnf_index": str(vnf["member-vnf-index"]),
+                     # TODO 'start-by-default': True
+                 }
+                 vnf_index2scevnf_uuid[str(vnf['member-vnf-index'])] = sce_vnf_uuid
+                 vnf_index2vnf_uuid[str(vnf['member-vnf-index'])] = existing_vnf[0]["uuid"]
+                 db_sce_vnfs.append(db_sce_vnf)
+             # table ip_profiles (ip-profiles)
+             ip_profile_name2db_table_index = {}
+             for ip_profile in nsd.get("ip-profiles").values():
+                 db_ip_profile = {
+                     "ip_version": str(ip_profile["ip-profile-params"].get("ip-version", "ipv4")),
+                     "subnet_address": str(ip_profile["ip-profile-params"].get("subnet-address")),
+                     "gateway_address": str(ip_profile["ip-profile-params"].get("gateway-address")),
+                     "dhcp_enabled": str(ip_profile["ip-profile-params"]["dhcp-params"].get("enabled", True)),
+                     "dhcp_start_address": str(ip_profile["ip-profile-params"]["dhcp-params"].get("start-address")),
+                     "dhcp_count": str(ip_profile["ip-profile-params"]["dhcp-params"].get("count")),
+                 }
+                 dns_list = []
+                 for dns in ip_profile["ip-profile-params"]["dns-server"].values():
+                     dns_list.append(str(dns.get("address")))
+                 db_ip_profile["dns_address"] = ";".join(dns_list)
+                 if ip_profile["ip-profile-params"].get('security-group'):
+                     db_ip_profile["security_group"] = ip_profile["ip-profile-params"]['security-group']
+                 ip_profile_name2db_table_index[str(ip_profile["name"])] = db_ip_profiles_index
+                 db_ip_profiles_index += 1
+                 db_ip_profiles.append(db_ip_profile)
+             # table sce_nets (internal-vld)
+             for vld in nsd.get("vld").values():
+                 sce_net_uuid = str(uuid4())
+                 uuid_list.append(sce_net_uuid)
+                 db_sce_net = {
+                     "uuid": sce_net_uuid,
+                     "name": get_str(vld, "name", 255),
+                     "scenario_id": scenario_uuid,
+                     # "type": #TODO
+                     "multipoint": not vld.get("type") == "ELINE",
+                     "osm_id":  get_str(vld, "id", 255),
+                     # "external": #TODO
+                     "description": get_str(vld, "description", 255),
+                 }
+                 # guess type of network
+                 if vld.get("mgmt-network"):
+                     db_sce_net["type"] = "bridge"
+                     db_sce_net["external"] = True
+                 elif vld.get("provider-network").get("overlay-type") == "VLAN":
+                     db_sce_net["type"] = "data"
+                 else:
+                     # later on it will be fixed to bridge or data depending on the type of interfaces attached to it
+                     db_sce_net["type"] = None
+                 db_sce_nets.append(db_sce_net)
+                 # ip-profile, link db_ip_profile with db_sce_net
+                 if vld.get("ip-profile-ref"):
+                     ip_profile_name = vld.get("ip-profile-ref")
+                     if ip_profile_name not in ip_profile_name2db_table_index:
+                         raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'ip-profile-ref':'{}'."
+                                             " Reference to a non-existing 'ip_profiles'".format(
+                                                 str(nsd["id"]), str(vld["id"]), str(vld["ip-profile-ref"])),
+                                             httperrors.Bad_Request)
+                     db_ip_profiles[ip_profile_name2db_table_index[ip_profile_name]]["sce_net_id"] = sce_net_uuid
+                 elif vld.get("vim-network-name"):
+                     db_sce_net["vim_network_name"] = get_str(vld, "vim-network-name", 255)
++                
+                 # table sce_interfaces (vld:vnfd-connection-point-ref)
+                 for iface in vld.get("vnfd-connection-point-ref").values():
++                    # Check if there are VDUs in the descriptor
+                     vnf_index = str(iface['member-vnf-index-ref'])
 -                network_id, _ = myvim.new_network(myNetName, myNetType, myNetIPProfile)
++                    existing_vdus = mydb.get_rows(SELECT=('vms.uuid'), FROM="vms", WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index]})
++                    if existing_vdus:
++                        # check correct parameters
++                        if vnf_index not in vnf_index2vnf_uuid:
++                            raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'vnfd-connection-point"
++                                              "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
++                                              "'nsd':'constituent-vnfd'".format(
++                                                  str(nsd["id"]), str(vld["id"]), str(iface["member-vnf-index-ref"])),
++                                              httperrors.Bad_Request)
++  
++                        existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid', 'i.type as iface_type'),
++                                                      FROM="interfaces as i join vms on i.vm_id=vms.uuid",
++                                                      WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
++                                                             'external_name': get_str(iface, "vnfd-connection-point-ref",
++                                                                                      255)})
++                        if not existing_ifaces:
++                            raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'vld[{}]':'vnfd-connection-point"
++                                              "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
++                                              "connection-point name at VNFD '{}'".format(
++                                                  str(nsd["id"]), str(vld["id"]), str(iface["vnfd-connection-point-ref"]),
++                                                  str(iface.get("vnfd-id-ref"))[:255]),
++                                              httperrors.Bad_Request)
++                        interface_uuid = existing_ifaces[0]["uuid"]
++                        if existing_ifaces[0]["iface_type"] == "data":
++                            db_sce_net["type"] = "data"
++                        sce_interface_uuid = str(uuid4())
++                        uuid_list.append(sce_net_uuid)
++                        iface_ip_address = None
++                        if iface.get("ip-address"):
++                            iface_ip_address = str(iface.get("ip-address"))
++                        db_sce_interface = {
++                            "uuid": sce_interface_uuid,
++                            "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
++                            "sce_net_id": sce_net_uuid,
++                            "interface_id": interface_uuid,
++                            "ip_address": iface_ip_address,
++                        }    
++                        db_sce_interfaces.append(db_sce_interface)
++                        if not db_sce_net["type"]:
++                            db_sce_net["type"] = "bridge"
+             # table sce_vnffgs (vnffgd)
+             for vnffg in nsd.get("vnffgd").values():
+                 sce_vnffg_uuid = str(uuid4())
+                 uuid_list.append(sce_vnffg_uuid)
+                 db_sce_vnffg = {
+                     "uuid": sce_vnffg_uuid,
+                     "name": get_str(vnffg, "name", 255),
+                     "scenario_id": scenario_uuid,
+                     "vendor": get_str(vnffg, "vendor", 255),
+                     "description": get_str(vld, "description", 255),
+                 }
+                 db_sce_vnffgs.append(db_sce_vnffg)
+                 # deal with rsps
+                 for rsp in vnffg.get("rsp").values():
+                     sce_rsp_uuid = str(uuid4())
+                     uuid_list.append(sce_rsp_uuid)
+                     db_sce_rsp = {
+                         "uuid": sce_rsp_uuid,
+                         "name": get_str(rsp, "name", 255),
+                         "sce_vnffg_id": sce_vnffg_uuid,
+                         "id": get_str(rsp, "id", 255), # only useful to link with classifiers; will be removed later in the code
+                     }
+                     db_sce_rsps.append(db_sce_rsp)
+                     for iface in rsp.get("vnfd-connection-point-ref").values():
+                         vnf_index = str(iface['member-vnf-index-ref'])
+                         if_order = int(iface['order'])
+                         # check correct parameters
+                         if vnf_index not in vnf_index2vnf_uuid:
+                             raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+                                                 "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+                                                 "'nsd':'constituent-vnfd'".format(
+                                                     str(nsd["id"]), str(rsp["id"]), str(iface["member-vnf-index-ref"])),
+                                                 httperrors.Bad_Request)
+                         ingress_existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+                                                                 FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+                                                                 WHERE={
+                                                                     'vnf_id': vnf_index2vnf_uuid[vnf_index],
+                                                                     'external_name': get_str(iface, "vnfd-ingress-connection-point-ref",
+                                                                                              255)})
+                         if not ingress_existing_ifaces:
+                             raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+                                                 "-ref':'vnfd-ingress-connection-point-ref':'{}'. Reference to a non-existing "
+                                                 "connection-point name at VNFD '{}'".format(
+                                 str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-ingress-connection-point-ref"]),
+                                 str(iface.get("vnfd-id-ref"))[:255]), httperrors.Bad_Request)
+                         egress_existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+                                                                FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+                                                                WHERE={
+                                                                    'vnf_id': vnf_index2vnf_uuid[vnf_index],
+                                                                    'external_name': get_str(iface, "vnfd-egress-connection-point-ref",
+                                                                                             255)})
+                         if not egress_existing_ifaces:
+                             raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+                                                 "-ref':'vnfd-egress-connection-point-ref':'{}'. Reference to a non-existing "
+                                                 "connection-point name at VNFD '{}'".format(
+                                 str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-egress-connection-point-ref"]),
+                                 str(iface.get("vnfd-id-ref"))[:255]), HTTP_Bad_Request)
+                         ingress_interface_uuid = ingress_existing_ifaces[0]["uuid"]
+                         egress_interface_uuid = egress_existing_ifaces[0]["uuid"]
+                         sce_rsp_hop_uuid = str(uuid4())
+                         uuid_list.append(sce_rsp_hop_uuid)
+                         db_sce_rsp_hop = {
+                             "uuid": sce_rsp_hop_uuid,
+                             "if_order": if_order,
+                             "ingress_interface_id": ingress_interface_uuid,
+                             "egress_interface_id": egress_interface_uuid,
+                             "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+                             "sce_rsp_id": sce_rsp_uuid,
+                         }
+                         db_sce_rsp_hops.append(db_sce_rsp_hop)
+                 # deal with classifiers
+                 for classifier in vnffg.get("classifier").values():
+                     sce_classifier_uuid = str(uuid4())
+                     uuid_list.append(sce_classifier_uuid)
+                     # source VNF
+                     vnf_index = str(classifier['member-vnf-index-ref'])
+                     if vnf_index not in vnf_index2vnf_uuid:
+                         raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'classifier[{}]':'vnfd-connection-point"
+                                             "-ref':'member-vnf-index-ref':'{}'. Reference to a non-existing index at "
+                                             "'nsd':'constituent-vnfd'".format(
+                                                 str(nsd["id"]), str(classifier["id"]), str(classifier["member-vnf-index-ref"])),
+                                             httperrors.Bad_Request)
+                     existing_ifaces = mydb.get_rows(SELECT=('i.uuid as uuid',),
+                                                     FROM="interfaces as i join vms on i.vm_id=vms.uuid",
+                                                     WHERE={'vnf_id': vnf_index2vnf_uuid[vnf_index],
+                                                            'external_name': get_str(classifier, "vnfd-connection-point-ref",
+                                                                                     255)})
+                     if not existing_ifaces:
+                         raise NfvoException("Error. Invalid NS descriptor at 'nsd[{}]':'rsp[{}]':'vnfd-connection-point"
+                                             "-ref':'vnfd-connection-point-ref':'{}'. Reference to a non-existing "
+                                             "connection-point name at VNFD '{}'".format(
+                                                 str(nsd["id"]), str(rsp["id"]), str(iface["vnfd-connection-point-ref"]),
+                                                 str(iface.get("vnfd-id-ref"))[:255]),
+                                             httperrors.Bad_Request)
+                     interface_uuid = existing_ifaces[0]["uuid"]
+                     db_sce_classifier = {
+                         "uuid": sce_classifier_uuid,
+                         "name": get_str(classifier, "name", 255),
+                         "sce_vnffg_id": sce_vnffg_uuid,
+                         "sce_vnf_id": vnf_index2scevnf_uuid[vnf_index],
+                         "interface_id": interface_uuid,
+                     }
+                     rsp_id = get_str(classifier, "rsp-id-ref", 255)
+                     rsp = next((item for item in db_sce_rsps if item["id"] == rsp_id), None)
+                     db_sce_classifier["sce_rsp_id"] = rsp["uuid"]
+                     db_sce_classifiers.append(db_sce_classifier)
+                     for match in classifier.get("match-attributes").values():
+                         sce_classifier_match_uuid = str(uuid4())
+                         uuid_list.append(sce_classifier_match_uuid)
+                         db_sce_classifier_match = {
+                             "uuid": sce_classifier_match_uuid,
+                             "ip_proto": get_str(match, "ip-proto", 2),
+                             "source_ip": get_str(match, "source-ip-address", 16),
+                             "destination_ip": get_str(match, "destination-ip-address", 16),
+                             "source_port": get_str(match, "source-port", 5),
+                             "destination_port": get_str(match, "destination-port", 5),
+                             "sce_classifier_id": sce_classifier_uuid,
+                         }
+                         db_sce_classifier_matches.append(db_sce_classifier_match)
+                     # TODO: vnf/cp keys
+         # remove unneeded id's in sce_rsps
+         for rsp in db_sce_rsps:
+             rsp.pop('id')
+         db_tables = [
+             {"scenarios": db_scenarios},
+             {"sce_nets": db_sce_nets},
+             {"ip_profiles": db_ip_profiles},
+             {"sce_vnfs": db_sce_vnfs},
+             {"sce_interfaces": db_sce_interfaces},
+             {"sce_vnffgs": db_sce_vnffgs},
+             {"sce_rsps": db_sce_rsps},
+             {"sce_rsp_hops": db_sce_rsp_hops},
+             {"sce_classifiers": db_sce_classifiers},
+             {"sce_classifier_matches": db_sce_classifier_matches},
+         ]
+         logger.debug("new_nsd_v3 done: %s",
+                     yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
+         mydb.new_rows(db_tables, uuid_list)
+         return nsd_uuid_list
+     except NfvoException:
+         raise
+     except Exception as e:
+         logger.error("Exception {}".format(e))
+         raise  # NfvoException("Exception {}".format(e), httperrors.Bad_Request)
+ def edit_scenario(mydb, tenant_id, scenario_id, data):
+     data["uuid"] = scenario_id
+     data["tenant_id"] = tenant_id
+     c = mydb.edit_scenario( data )
+     return c
+ @deprecated("Use create_instance")
+ def start_scenario(mydb, tenant_id, scenario_id, instance_scenario_name, instance_scenario_description, datacenter=None,vim_tenant=None, startvms=True):
+     #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+     datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter, vim_tenant=vim_tenant)
+     vims = {datacenter_id: myvim}
+     myvim_tenant = myvim['tenant_id']
+     datacenter_name = myvim['name']
+     rollbackList=[]
+     try:
+         #print "Checking that the scenario_id exists and getting the scenario dictionary"
+         scenarioDict = mydb.get_scenario(scenario_id, tenant_id, datacenter_id=datacenter_id)
+         scenarioDict['datacenter2tenant'] = { datacenter_id: myvim['config']['datacenter_tenant_id'] }
+         scenarioDict['datacenter_id'] = datacenter_id
+         #print '================scenarioDict======================='
+         #print json.dumps(scenarioDict, indent=4)
+         #print 'BEGIN launching instance scenario "%s" based on "%s"' % (instance_scenario_name,scenarioDict['name'])
+         logger.debug("start_scenario Scenario %s: consisting of %d VNF(s)", scenarioDict['name'],len(scenarioDict['vnfs']))
+         #print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
+         auxNetDict = {}   #Auxiliar dictionary. First key:'scenario' or sce_vnf uuid. Second Key: uuid of the net/sce_net. Value: vim_net_id
+         auxNetDict['scenario'] = {}
+         logger.debug("start_scenario 1. Creating new nets (sce_nets) in the VIM")
+         for sce_net in scenarioDict['nets']:
+             #print "Net name: %s. Description: %s" % (sce_net["name"], sce_net["description"])
+             myNetName = "{}.{}".format(instance_scenario_name, sce_net['name'])
+             myNetName = myNetName[0:255] #limit length
+             myNetType = sce_net['type']
+             myNetDict = {}
+             myNetDict["name"] = myNetName
+             myNetDict["type"] = myNetType
+             myNetDict["tenant_id"] = myvim_tenant
+             myNetIPProfile = sce_net.get('ip_profile', None)
++            myProviderNetwork = sce_net.get('provider_network', None)
+             #TODO:
+             #We should use the dictionary as input parameter for new_network
+             #print myNetDict
+             if not sce_net["external"]:
 -                network_id, _  = myvim.new_network(myNetName, myNetType, myNetIPProfile)
++                network_id, _ = myvim.new_network(myNetName, myNetType, myNetIPProfile, provider_network_profile=myProviderNetwork)
+                 #print "New VIM network created for scenario %s. Network id:  %s" % (scenarioDict['name'],network_id)
+                 sce_net['vim_id'] = network_id
+                 auxNetDict['scenario'][sce_net['uuid']] = network_id
+                 rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
+                 sce_net["created"] = True
+             else:
+                 if sce_net['vim_id'] == None:
+                     error_text = "Error, datacenter '{}' does not have external network '{}'.".format(
+                         datacenter_name, sce_net['name'])
+                     _, message = rollback(mydb, vims, rollbackList)
+                     logger.error("nfvo.start_scenario: %s", error_text)
+                     raise NfvoException(error_text, httperrors.Bad_Request)
+                 logger.debug("Using existent VIM network for scenario %s. Network id %s", scenarioDict['name'],sce_net['vim_id'])
+                 auxNetDict['scenario'][sce_net['uuid']] = sce_net['vim_id']
+         logger.debug("start_scenario 2. Creating new nets (vnf internal nets) in the VIM")
+         #For each vnf net, we create it and we add it to instanceNetlist.
+         for sce_vnf in scenarioDict['vnfs']:
+             for net in sce_vnf['nets']:
+                 #print "Net name: %s. Description: %s" % (net["name"], net["description"])
+                 myNetName = "{}.{}".format(instance_scenario_name,net['name'])
+                 myNetName = myNetName[0:255] #limit length
+                 myNetType = net['type']
+                 myNetDict = {}
+                 myNetDict["name"] = myNetName
+                 myNetDict["type"] = myNetType
+                 myNetDict["tenant_id"] = myvim_tenant
+                 myNetIPProfile = net.get('ip_profile', None)
++                myProviderNetwork = sce_net.get('provider_network', None)
+                 #print myNetDict
+                 #TODO:
+                 #We should use the dictionary as input parameter for new_network
 -                    task_extra["params"] = (net_vim_name, net_type, sce_net.get('ip_profile', None), wim_account_name)
++                network_id, _  = myvim.new_network(myNetName, myNetType, myNetIPProfile, provider_network_profile=myProviderNetwork)
+                 #print "VIM network id for scenario %s: %s" % (scenarioDict['name'],network_id)
+                 net['vim_id'] = network_id
+                 if sce_vnf['uuid'] not in auxNetDict:
+                     auxNetDict[sce_vnf['uuid']] = {}
+                 auxNetDict[sce_vnf['uuid']][net['uuid']] = network_id
+                 rollbackList.append({'what':'network','where':'vim','vim_id':datacenter_id,'uuid':network_id})
+                 net["created"] = True
+         #print "auxNetDict:"
+         #print yaml.safe_dump(auxNetDict, indent=4, default_flow_style=False)
+         logger.debug("start_scenario 3. Creating new vm instances in the VIM")
+         #myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
+         i = 0
+         for sce_vnf in scenarioDict['vnfs']:
+             vnf_availability_zones = []
+             for vm in sce_vnf['vms']:
+                 vm_av = vm.get('availability_zone')
+                 if vm_av and vm_av not in vnf_availability_zones:
+                     vnf_availability_zones.append(vm_av)
+             # check if there is enough availability zones available at vim level.
+             if myvims[datacenter_id].availability_zone and vnf_availability_zones:
+                 if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
+                     raise NfvoException('No enough availability zones at VIM for this deployment', httperrors.Bad_Request)
+             for vm in sce_vnf['vms']:
+                 i += 1
+                 myVMDict = {}
+                 #myVMDict['name'] = "%s-%s-%s" % (scenarioDict['name'],sce_vnf['name'], vm['name'])
+                 myVMDict['name'] = "{}.{}.{}".format(instance_scenario_name,sce_vnf['name'],chr(96+i))
+                 #myVMDict['description'] = vm['description']
+                 myVMDict['description'] = myVMDict['name'][0:99]
+                 if not startvms:
+                     myVMDict['start'] = "no"
+                 myVMDict['name'] = myVMDict['name'][0:255] #limit name length
+                 #print "VM name: %s. Description: %s" % (myVMDict['name'], myVMDict['name'])
+                 #create image at vim in case it not exist
+                 image_dict = mydb.get_table_by_uuid_name("images", vm['image_id'])
+                 image_id = create_or_use_image(mydb, vims, image_dict, [], True)
+                 vm['vim_image_id'] = image_id
+                 #create flavor at vim in case it not exist
+                 flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
+                 if flavor_dict['extended']!=None:
+                     flavor_dict['extended']= yaml.load(flavor_dict['extended'], Loader=yaml.Loader)
+                 flavor_id = create_or_use_flavor(mydb, vims, flavor_dict, [], True)
+                 vm['vim_flavor_id'] = flavor_id
+                 myVMDict['imageRef'] = vm['vim_image_id']
+                 myVMDict['flavorRef'] = vm['vim_flavor_id']
+                 myVMDict['networks'] = []
+                 for iface in vm['interfaces']:
+                     netDict = {}
+                     if iface['type']=="data":
+                         netDict['type'] = iface['model']
+                     elif "model" in iface and iface["model"]!=None:
+                         netDict['model']=iface['model']
+                     #TODO in future, remove this because mac_address will not be set, and the type of PV,VF is obtained from iterface table model
+                     #discover type of interface looking at flavor
+                     for numa in flavor_dict.get('extended',{}).get('numas',[]):
+                         for flavor_iface in numa.get('interfaces',[]):
+                             if flavor_iface.get('name') == iface['internal_name']:
+                                 if flavor_iface['dedicated'] == 'yes':
+                                     netDict['type']="PF"    #passthrough
+                                 elif flavor_iface['dedicated'] == 'no':
+                                     netDict['type']="VF"    #siov
+                                 elif flavor_iface['dedicated'] == 'yes:sriov':
+                                     netDict['type']="VFnotShared"   #sriov but only one sriov on the PF
+                                 netDict["mac_address"] = flavor_iface.get("mac_address")
+                                 break;
+                     netDict["use"]=iface['type']
+                     if netDict["use"]=="data" and not netDict.get("type"):
+                         #print "netDict", netDict
+                         #print "iface", iface
+                         e_text = "Cannot determine the interface type PF or VF of VNF '{}' VM '{}' iface '{}'".format(
+                             sce_vnf['name'], vm['name'], iface['internal_name'])
+                         if flavor_dict.get('extended')==None:
+                             raise NfvoException(e_text  + "After database migration some information is not available. \
+                                     Try to delete and create the scenarios and VNFs again", httperrors.Conflict)
+                         else:
+                             raise NfvoException(e_text, httperrors.Internal_Server_Error)
+                     if netDict["use"]=="mgmt" or netDict["use"]=="bridge":
+                         netDict["type"]="virtual"
+                     if "vpci" in iface and iface["vpci"] is not None:
+                         netDict['vpci'] = iface['vpci']
+                     if "mac" in iface and iface["mac"] is not None:
+                         netDict['mac_address'] = iface['mac']
+                     if "port-security" in iface and iface["port-security"] is not None:
+                         netDict['port_security'] = iface['port-security']
+                     if "floating-ip" in iface and iface["floating-ip"] is not None:
+                         netDict['floating_ip'] = iface['floating-ip']
+                     netDict['name'] = iface['internal_name']
+                     if iface['net_id'] is None:
+                         for vnf_iface in sce_vnf["interfaces"]:
+                             #print iface
+                             #print vnf_iface
+                             if vnf_iface['interface_id']==iface['uuid']:
+                                 netDict['net_id'] = auxNetDict['scenario'][ vnf_iface['sce_net_id'] ]
+                                 break
+                     else:
+                         netDict['net_id'] = auxNetDict[ sce_vnf['uuid'] ][ iface['net_id'] ]
+                     #skip bridge ifaces not connected to any net
+                     #if 'net_id' not in netDict or netDict['net_id']==None:
+                     #    continue
+                     myVMDict['networks'].append(netDict)
+                 #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+                 #print myVMDict['name']
+                 #print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
+                 #print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
+                 #print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+                 if 'availability_zone' in myVMDict:
+                     av_index = vnf_availability_zones.index(myVMDict['availability_zone'])
+                 else:
+                     av_index = None
+                 vm_id, _ = myvim.new_vminstance(myVMDict['name'], myVMDict['description'], myVMDict.get('start', None),
+                                              myVMDict['imageRef'], myVMDict['flavorRef'], myVMDict['networks'],
+                                              availability_zone_index=av_index,
+                                              availability_zone_list=vnf_availability_zones)
+                 #print "VIM vm instance id (server id) for scenario %s: %s" % (scenarioDict['name'],vm_id)
+                 vm['vim_id'] = vm_id
+                 rollbackList.append({'what':'vm','where':'vim','vim_id':datacenter_id,'uuid':vm_id})
+                 #put interface uuid back to scenario[vnfs][vms[[interfaces]
+                 for net in myVMDict['networks']:
+                     if "vim_id" in net:
+                         for iface in vm['interfaces']:
+                             if net["name"]==iface["internal_name"]:
+                                 iface["vim_id"]=net["vim_id"]
+                                 break
+         logger.debug("start scenario Deployment done")
+         #print yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False)
+         #r,c = mydb.new_instance_scenario_as_a_whole(nfvo_tenant,scenarioDict['name'],scenarioDict)
+         instance_id = mydb.new_instance_scenario_as_a_whole(tenant_id,instance_scenario_name, instance_scenario_description, scenarioDict)
+         return mydb.get_instance_scenario(instance_id)
+     except (db_base_Exception, vimconn.vimconnException) as e:
+         _, message = rollback(mydb, vims, rollbackList)
+         if isinstance(e, db_base_Exception):
+             error_text = "Exception at database"
+         else:
+             error_text = "Exception at VIM"
+         error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+         #logger.error("start_scenario %s", error_text)
+         raise NfvoException(error_text, e.http_code)
+ def unify_cloud_config(cloud_config_preserve, cloud_config):
+     """ join the cloud config information into cloud_config_preserve.
+     In case of conflict cloud_config_preserve preserves
+     None is allowed
+     """
+     if not cloud_config_preserve and not cloud_config:
+         return None
+     new_cloud_config = {"key-pairs":[], "users":[]}
+     # key-pairs
+     if cloud_config_preserve:
+         for key in cloud_config_preserve.get("key-pairs", () ):
+             if key not in new_cloud_config["key-pairs"]:
+                 new_cloud_config["key-pairs"].append(key)
+     if cloud_config:
+         for key in cloud_config.get("key-pairs", () ):
+             if key not in new_cloud_config["key-pairs"]:
+                 new_cloud_config["key-pairs"].append(key)
+     if not new_cloud_config["key-pairs"]:
+         del new_cloud_config["key-pairs"]
+     # users
+     if cloud_config:
+         new_cloud_config["users"] += cloud_config.get("users", () )
+     if cloud_config_preserve:
+         new_cloud_config["users"] += cloud_config_preserve.get("users", () )
+     index_to_delete = []
+     users = new_cloud_config.get("users", [])
+     for index0 in range(0,len(users)):
+         if index0 in index_to_delete:
+             continue
+         for index1 in range(index0+1,len(users)):
+             if index1 in index_to_delete:
+                 continue
+             if users[index0]["name"] == users[index1]["name"]:
+                 index_to_delete.append(index1)
+                 for key in users[index1].get("key-pairs",()):
+                     if "key-pairs" not in users[index0]:
+                         users[index0]["key-pairs"] = [key]
+                     elif key not in users[index0]["key-pairs"]:
+                         users[index0]["key-pairs"].append(key)
+     index_to_delete.sort(reverse=True)
+     for index in index_to_delete:
+         del users[index]
+     if not new_cloud_config["users"]:
+         del new_cloud_config["users"]
+     #boot-data-drive
+     if cloud_config and cloud_config.get("boot-data-drive") != None:
+         new_cloud_config["boot-data-drive"] = cloud_config["boot-data-drive"]
+     if cloud_config_preserve and cloud_config_preserve.get("boot-data-drive") != None:
+         new_cloud_config["boot-data-drive"] = cloud_config_preserve["boot-data-drive"]
+     # user-data
+     new_cloud_config["user-data"] = []
+     if cloud_config and cloud_config.get("user-data"):
+         if isinstance(cloud_config["user-data"], list):
+             new_cloud_config["user-data"] += cloud_config["user-data"]
+         else:
+             new_cloud_config["user-data"].append(cloud_config["user-data"])
+     if cloud_config_preserve and cloud_config_preserve.get("user-data"):
+         if isinstance(cloud_config_preserve["user-data"], list):
+             new_cloud_config["user-data"] += cloud_config_preserve["user-data"]
+         else:
+             new_cloud_config["user-data"].append(cloud_config_preserve["user-data"])
+     if not new_cloud_config["user-data"]:
+         del new_cloud_config["user-data"]
+     # config files
+     new_cloud_config["config-files"] = []
+     if cloud_config and cloud_config.get("config-files") != None:
+         new_cloud_config["config-files"] += cloud_config["config-files"]
+     if cloud_config_preserve:
+         for file in cloud_config_preserve.get("config-files", ()):
+             for index in range(0, len(new_cloud_config["config-files"])):
+                 if new_cloud_config["config-files"][index]["dest"] == file["dest"]:
+                     new_cloud_config["config-files"][index] = file
+                     break
+             else:
+                 new_cloud_config["config-files"].append(file)
+     if not new_cloud_config["config-files"]:
+         del new_cloud_config["config-files"]
+     return new_cloud_config
+ def get_vim_thread(mydb, tenant_id, datacenter_id_name=None, datacenter_tenant_id=None):
+     datacenter_id = None
+     datacenter_name = None
+     thread = None
+     try:
+         if datacenter_tenant_id:
+             thread_id = datacenter_tenant_id
+             thread = vim_threads["running"].get(datacenter_tenant_id)
+         else:
+             where_={"td.nfvo_tenant_id": tenant_id}
+             if datacenter_id_name:
+                 if utils.check_valid_uuid(datacenter_id_name):
+                     datacenter_id = datacenter_id_name
+                     where_["dt.datacenter_id"] = datacenter_id
+                 else:
+                     datacenter_name = datacenter_id_name
+                     where_["d.name"] = datacenter_name
+             if datacenter_tenant_id:
+                 where_["dt.uuid"] = datacenter_tenant_id
+             datacenters = mydb.get_rows(
+                 SELECT=("dt.uuid as datacenter_tenant_id",),
+                 FROM="datacenter_tenants as dt join tenants_datacenters as td on dt.uuid=td.datacenter_tenant_id "
+                      "join datacenters as d on d.uuid=dt.datacenter_id",
+                 WHERE=where_)
+             if len(datacenters) > 1:
+                 raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+             elif datacenters:
+                 thread_id = datacenters[0]["datacenter_tenant_id"]
+                 thread = vim_threads["running"].get(thread_id)
+         if not thread:
+             raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
+         return thread_id, thread
+     except db_base_Exception as e:
+         raise NfvoException("{} {}".format(type(e).__name__ , str(e)), e.http_code)
+ def get_datacenter_uuid(mydb, tenant_id, datacenter_id_name):
+     WHERE_dict={}
+     if utils.check_valid_uuid(datacenter_id_name):
+         WHERE_dict['d.uuid'] = datacenter_id_name
+     else:
+         WHERE_dict['d.name'] = datacenter_id_name
+     if tenant_id:
+         WHERE_dict['nfvo_tenant_id'] = tenant_id
+         from_= "tenants_datacenters as td join datacenters as d on td.datacenter_id=d.uuid join datacenter_tenants as" \
+                " dt on td.datacenter_tenant_id=dt.uuid"
+     else:
+         from_ = 'datacenters as d'
+     vimaccounts = mydb.get_rows(FROM=from_, SELECT=("d.uuid as uuid, d.name as name",), WHERE=WHERE_dict )
+     if len(vimaccounts) == 0:
+         raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
+     elif len(vimaccounts)>1:
+         #print "nfvo.datacenter_action() error. Several datacenters found"
+         raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+     return vimaccounts[0]["uuid"], vimaccounts[0]["name"]
+ def get_datacenter_by_name_uuid(mydb, tenant_id, datacenter_id_name=None, **extra_filter):
+     datacenter_id = None
+     datacenter_name = None
+     if datacenter_id_name:
+         if utils.check_valid_uuid(datacenter_id_name):
+             datacenter_id = datacenter_id_name
+         else:
+             datacenter_name = datacenter_id_name
+     vims = get_vim(mydb, tenant_id, datacenter_id, datacenter_name, **extra_filter)
+     if len(vims) == 0:
+         raise NfvoException("datacenter '{}' not found".format(str(datacenter_id_name)), httperrors.Not_Found)
+     elif len(vims)>1:
+         #print "nfvo.datacenter_action() error. Several datacenters found"
+         raise NfvoException("More than one datacenters found, try to identify with uuid", httperrors.Conflict)
+     for vim_id, vim_content in vims.items():
+         return vim_id, vim_content
+ def update(d, u):
+     """Takes dict d and updates it with the values in dict u.
+        It merges all depth levels"""
+     for k, v in u.items():
+         if isinstance(v, collections.Mapping):
+             r = update(d.get(k, {}), v)
+             d[k] = r
+         else:
+             d[k] = u[k]
+     return d
+ def _get_wim(db, wim_account_id):
+     # get wim from wim_account
+     wim_accounts = db.get_rows(FROM='wim_accounts', WHERE={"uuid": wim_account_id})
+     if not wim_accounts:
+         raise NfvoException("Not found sdn id={}".format(wim_account_id), http_code=httperrors.Not_Found)
+     return wim_accounts[0]["wim_id"]
+ def create_instance(mydb, tenant_id, instance_dict):
+     # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+     # logger.debug("Creating instance...")
++
+     scenario = instance_dict["scenario"]
+     # find main datacenter
+     myvims = {}
+     myvim_threads_id = {}
+     datacenter = instance_dict.get("datacenter")
+     default_wim_account = instance_dict.get("wim_account")
+     default_datacenter_id, vim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+     myvims[default_datacenter_id] = vim
+     myvim_threads_id[default_datacenter_id], _ = get_vim_thread(mydb, tenant_id, default_datacenter_id)
+     tenant = mydb.get_rows_by_id('nfvo_tenants', tenant_id)
+     # myvim_tenant = myvim['tenant_id']
+     rollbackList = []
+     # print "Checking that the scenario exists and getting the scenario dictionary"
+     if isinstance(scenario, str):
+         scenarioDict = mydb.get_scenario(scenario, tenant_id, datacenter_vim_id=myvim_threads_id[default_datacenter_id],
+                                          datacenter_id=default_datacenter_id)
+     else:
+         scenarioDict = scenario
+         scenarioDict["uuid"] = None
+     # logger.debug(">>>>>> Dictionaries before merging")
+     # logger.debug(">>>>>> InstanceDict:\n{}".format(yaml.safe_dump(instance_dict,default_flow_style=False, width=256)))
+     # logger.debug(">>>>>> ScenarioDict:\n{}".format(yaml.safe_dump(scenarioDict,default_flow_style=False, width=256)))
+     db_instance_vnfs = []
+     db_instance_vms = []
+     db_instance_interfaces = []
+     db_instance_sfis = []
+     db_instance_sfs = []
+     db_instance_classifications = []
+     db_instance_sfps = []
+     db_ip_profiles = []
+     db_vim_actions = []
+     uuid_list = []
+     task_index = 0
+     instance_name = instance_dict["name"]
+     instance_uuid = str(uuid4())
+     uuid_list.append(instance_uuid)
+     db_instance_scenario = {
+         "uuid": instance_uuid,
+         "name": instance_name,
+         "tenant_id": tenant_id,
+         "scenario_id": scenarioDict['uuid'],
+         "datacenter_id": default_datacenter_id,
+         # filled bellow 'datacenter_tenant_id'
+         "description": instance_dict.get("description"),
+     }
+     if scenarioDict.get("cloud-config"):
+         db_instance_scenario["cloud_config"] = yaml.safe_dump(scenarioDict["cloud-config"],
+                                                               default_flow_style=True, width=256)
+     instance_action_id = get_task_id()
+     db_instance_action = {
+         "uuid": instance_action_id,   # same uuid for the instance and the action on create
+         "tenant_id": tenant_id,
+         "instance_id": instance_uuid,
+         "description": "CREATE",
+     }
+     # Auxiliary dictionaries from x to y
+     sce_net2wim_instance = {}
+     sce_net2instance = {}
+     net2task_id = {'scenario': {}}
+     # Mapping between local networks and WIMs
+     wim_usage = {}
+     def ip_profile_IM2RO(ip_profile_im):
+         # translate from input format to database format
+         ip_profile_ro = {}
+         if 'subnet-address' in ip_profile_im:
+             ip_profile_ro['subnet_address'] = ip_profile_im['subnet-address']
+         if 'ip-version' in ip_profile_im:
+             ip_profile_ro['ip_version'] = ip_profile_im['ip-version']
+         if 'gateway-address' in ip_profile_im:
+             ip_profile_ro['gateway_address'] = ip_profile_im['gateway-address']
+         if 'dns-address' in ip_profile_im:
+             ip_profile_ro['dns_address'] = ip_profile_im['dns-address']
+             if isinstance(ip_profile_ro['dns_address'], (list, tuple)):
+                 ip_profile_ro['dns_address'] = ";".join(ip_profile_ro['dns_address'])
+         if 'dhcp' in ip_profile_im:
+             ip_profile_ro['dhcp_start_address'] = ip_profile_im['dhcp'].get('start-address')
+             ip_profile_ro['dhcp_enabled'] = ip_profile_im['dhcp'].get('enabled', True)
+             ip_profile_ro['dhcp_count'] = ip_profile_im['dhcp'].get('count')
+         return ip_profile_ro
+     # logger.debug("Creating instance from scenario-dict:\n%s",
+     #               yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))
+     try:
+         # 0 check correct parameters
+         for net_name, net_instance_desc in instance_dict.get("networks", {}).items():
+             for scenario_net in scenarioDict['nets']:
+                 if net_name == scenario_net.get("name") or net_name == scenario_net.get("osm_id") or net_name == scenario_net.get("uuid"):
+                     break
+             else:
+                 raise NfvoException("Invalid scenario network name or id '{}' at instance:networks".format(net_name),
+                                     httperrors.Bad_Request)
+             if "sites" not in net_instance_desc:
+                 net_instance_desc["sites"] = [ {} ]
+             site_without_datacenter_field = False
+             for site in net_instance_desc["sites"]:
+                 if site.get("datacenter"):
+                     site["datacenter"], _ = get_datacenter_uuid(mydb, tenant_id, site["datacenter"])
+                     if site["datacenter"] not in myvims:
+                         # Add this datacenter to myvims
+                         d, v = get_datacenter_by_name_uuid(mydb, tenant_id, site["datacenter"])
+                         myvims[d] = v
+                         myvim_threads_id[d], _ = get_vim_thread(mydb, tenant_id, site["datacenter"])
+                         site["datacenter"] = d  # change name to id
+                 else:
+                     if site_without_datacenter_field:
+                         raise NfvoException("Found more than one entries without datacenter field at "
+                                             "instance:networks:{}:sites".format(net_name), httperrors.Bad_Request)
+                     site_without_datacenter_field = True
+                     site["datacenter"] = default_datacenter_id   # change name to id
+         for vnf_name, vnf_instance_desc in instance_dict.get("vnfs",{}).items():
+             for scenario_vnf in scenarioDict['vnfs']:
+                 if vnf_name == scenario_vnf['member_vnf_index'] or vnf_name == scenario_vnf['uuid'] or vnf_name == scenario_vnf['name']:
+                     break
+             else:
+                 raise NfvoException("Invalid vnf name '{}' at instance:vnfs".format(vnf_name), httperrors.Bad_Request)
+             if "datacenter" in vnf_instance_desc:
+                 # Add this datacenter to myvims
+                 vnf_instance_desc["datacenter"], _ = get_datacenter_uuid(mydb, tenant_id, vnf_instance_desc["datacenter"])
+                 if vnf_instance_desc["datacenter"] not in myvims:
+                     d, v = get_datacenter_by_name_uuid(mydb, tenant_id, vnf_instance_desc["datacenter"])
+                     myvims[d] = v
+                     myvim_threads_id[d], _ = get_vim_thread(mydb, tenant_id, vnf_instance_desc["datacenter"])
+                 scenario_vnf["datacenter"] = vnf_instance_desc["datacenter"]
+             for net_id, net_instance_desc in vnf_instance_desc.get("networks", {}).items():
+                 for scenario_net in scenario_vnf['nets']:
+                     if net_id == scenario_net['osm_id'] or net_id == scenario_net['uuid'] or net_id == scenario_net["name"]:
+                         break
+                 else:
+                     raise NfvoException("Invalid net id or name '{}' at instance:vnfs:networks".format(net_id), httperrors.Bad_Request)
+                 if net_instance_desc.get("vim-network-name"):
+                     scenario_net["vim-network-name"] = net_instance_desc["vim-network-name"]
+                 if net_instance_desc.get("vim-network-id"):
+                     scenario_net["vim-network-id"] = net_instance_desc["vim-network-id"]
+                 if net_instance_desc.get("name"):
+                     scenario_net["name"] = net_instance_desc["name"]
+                 if 'ip-profile' in net_instance_desc:
+                     ipprofile_db = ip_profile_IM2RO(net_instance_desc['ip-profile'])
+                     if 'ip_profile' not in scenario_net:
+                         scenario_net['ip_profile'] = ipprofile_db
+                     else:
+                         update(scenario_net['ip_profile'], ipprofile_db)
++                if 'provider-network' in net_instance_desc:
++                        provider_network_db = net_instance_desc['provider-network']
++                        if 'provider-network' not in scenario_net:
++                            scenario_net['provider-network'] = provider_network_db
++                        else:
++                            update(scenario_net['provider-network'], provider_network_db)
++
+             for vdu_id, vdu_instance_desc in vnf_instance_desc.get("vdus", {}).items():
+                 for scenario_vm in scenario_vnf['vms']:
+                     if vdu_id == scenario_vm['osm_id'] or vdu_id == scenario_vm["name"]:
+                         break
+                 else:
+                     raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), httperrors.Bad_Request)
+                 scenario_vm["instance_parameters"] = vdu_instance_desc
+                 for iface_id, iface_instance_desc in vdu_instance_desc.get("interfaces", {}).items():
+                     for scenario_interface in scenario_vm['interfaces']:
+                         if iface_id == scenario_interface['internal_name'] or iface_id == scenario_interface["external_name"]:
+                             scenario_interface.update(iface_instance_desc)
+                             break
+                     else:
+                         raise NfvoException("Invalid vdu id or name '{}' at instance:vnfs:vdus".format(vdu_id), httperrors.Bad_Request)
+         # 0.1 parse cloud-config parameters
+         cloud_config = unify_cloud_config(instance_dict.get("cloud-config"), scenarioDict.get("cloud-config"))
+         # 0.2 merge instance information into scenario
+         # Ideally, the operation should be as simple as: update(scenarioDict,instance_dict)
+         # However, this is not possible yet.
+         for net_name, net_instance_desc in instance_dict.get("networks", {}).items():
+             for scenario_net in scenarioDict['nets']:
+                 if net_name == scenario_net.get("name") or net_name == scenario_net.get("osm_id") or net_name == scenario_net.get("uuid"):
+                     if "wim_account" in net_instance_desc and net_instance_desc["wim_account"] is not None:
+                         scenario_net["wim_account"] = net_instance_desc["wim_account"]
+                     if 'ip-profile' in net_instance_desc:
+                         ipprofile_db = ip_profile_IM2RO(net_instance_desc['ip-profile'])
+                         if 'ip_profile' not in scenario_net:
+                             scenario_net['ip_profile'] = ipprofile_db
+                         else:
+                             update(scenario_net['ip_profile'], ipprofile_db)
++                    if 'provider-network' in net_instance_desc:
++                        provider_network_db = net_instance_desc['provider-network']
++
++                        if 'provider-network' not in scenario_net:
++                            scenario_net['provider_network'] = provider_network_db
++                        else:
++                            update(scenario_net['provider-network'], provider_network_db)
++
+             for interface in net_instance_desc.get('interfaces', ()):
+                 if 'ip_address' in interface:
+                     for vnf in scenarioDict['vnfs']:
+                         if interface['vnf'] == vnf['name']:
+                             for vnf_interface in vnf['interfaces']:
+                                 if interface['vnf_interface'] == vnf_interface['external_name']:
+                                     vnf_interface['ip_address'] = interface['ip_address']
+         # logger.debug(">>>>>>>> Merged dictionary")
+         # logger.debug("Creating instance scenario-dict MERGED:\n%s",
+         #              yaml.safe_dump(scenarioDict, indent=4, default_flow_style=False))
++
+         # 1. Creating new nets (sce_nets) in the VIM"
+         number_mgmt_networks = 0
+         db_instance_nets = []
+         db_instance_wim_nets = []
+         for sce_net in scenarioDict['nets']:
++
+             sce_net_uuid = sce_net.get('uuid', sce_net["name"])
+             # get involved datacenters where this network need to be created
+             involved_datacenters = []
+             for sce_vnf in scenarioDict.get("vnfs", ()):
+                 vnf_datacenter = sce_vnf.get("datacenter", default_datacenter_id)
+                 if vnf_datacenter in involved_datacenters:
+                     continue
+                 if sce_vnf.get("interfaces"):
+                     for sce_vnf_ifaces in sce_vnf["interfaces"]:
+                         if sce_vnf_ifaces.get("sce_net_id") == sce_net["uuid"]:
+                             involved_datacenters.append(vnf_datacenter)
+                             break
+             if not involved_datacenters:
+                 involved_datacenters.append(default_datacenter_id)
+             target_wim_account = sce_net.get("wim_account", default_wim_account)
+             # --> WIM
+             # TODO: use this information during network creation
+             wim_account_id = wim_account_name = None
+             if len(involved_datacenters) > 1 and 'uuid' in sce_net:
+                 if target_wim_account is None or target_wim_account is True:  # automatic selection of WIM
+                     # OBS: sce_net without uuid are used internally to VNFs
+                     # and the assumption is that VNFs will not be split among
+                     # different datacenters
+                     wim_account = wim_engine.find_suitable_wim_account(
+                         involved_datacenters, tenant_id)
+                     wim_account_id = wim_account['uuid']
+                     wim_account_name = wim_account['name']
+                     wim_usage[sce_net['uuid']] = wim_account_id
+                 elif isinstance(target_wim_account, str):     # manual selection of WIM
+                     wim_account.persist.get_wim_account_by(target_wim_account, tenant_id)
+                     wim_account_id = wim_account['uuid']
+                     wim_account_name = wim_account['name']
+                     wim_usage[sce_net['uuid']] = wim_account_id
+                 else:  # not WIM usage
+                     wim_usage[sce_net['uuid']] = False
+             # <-- WIM
+             descriptor_net = {}
+             if instance_dict.get("networks"):
+                 if sce_net.get("uuid") in instance_dict["networks"]:
+                     descriptor_net = instance_dict["networks"][sce_net["uuid"]]
+                     descriptor_net_name = sce_net["uuid"]
+                 elif sce_net.get("osm_id") in instance_dict["networks"]:
+                     descriptor_net = instance_dict["networks"][sce_net["osm_id"]]
+                     descriptor_net_name = sce_net["osm_id"]
+                 elif sce_net["name"] in instance_dict["networks"]:
+                     descriptor_net = instance_dict["networks"][sce_net["name"]]
+                     descriptor_net_name = sce_net["name"]
+             net_name = descriptor_net.get("vim-network-name")
+             # add datacenters from instantiation parameters
+             if descriptor_net.get("sites"):
+                 for site in descriptor_net["sites"]:
+                     if site.get("datacenter") and site["datacenter"] not in involved_datacenters:
+                         involved_datacenters.append(site["datacenter"])
+             sce_net2instance[sce_net_uuid] = {}
+             sce_net2wim_instance[sce_net_uuid] = {}
+             net2task_id['scenario'][sce_net_uuid] = {}
+             use_network = None
+             related_network = None
+             if descriptor_net.get("use-network"):
+                 target_instance_nets = mydb.get_rows(
+                     SELECT="related",
+                     FROM="instance_nets",
+                     WHERE={"instance_scenario_id": descriptor_net["use-network"]["instance_scenario_id"],
+                            "osm_id":  descriptor_net["use-network"]["osm_id"]},
+                 )
+                 if not target_instance_nets:
+                     raise NfvoException(
+                         "Cannot find the target network at instance:networks[{}]:use-network".format(descriptor_net_name),
+                         httperrors.Bad_Request)
+                 else:
+                     use_network = target_instance_nets[0]["related"]
+             if sce_net["external"]:
+                 number_mgmt_networks += 1
+             for datacenter_id in involved_datacenters:
+                 netmap_use = None
+                 netmap_create = None
+                 if descriptor_net.get("sites"):
+                     for site in descriptor_net["sites"]:
+                         if site.get("datacenter") == datacenter_id:
+                             netmap_use = site.get("netmap-use")
+                             netmap_create = site.get("netmap-create")
+                             break
+                 vim = myvims[datacenter_id]
+                 myvim_thread_id = myvim_threads_id[datacenter_id]
+                 net_type = sce_net['type']
+                 net_vim_name = None
+                 lookfor_filter = {'admin_state_up': True, 'status': 'ACTIVE'}  # 'shared': True
+                 if not net_name:
+                     if sce_net["external"]:
+                         net_name = sce_net["name"]
+                     else:
+                         net_name = "{}-{}".format(instance_name, sce_net["name"])
+                         net_name = net_name[:255]     # limit length
+                 if netmap_use or netmap_create:
+                     create_network = False
+                     lookfor_network = False
+                     if netmap_use:
+                         lookfor_network = True
+                         if utils.check_valid_uuid(netmap_use):
+                             lookfor_filter["id"] = netmap_use
+                         else:
+                             lookfor_filter["name"] = netmap_use
+                     if netmap_create:
+                         create_network = True
+                         net_vim_name = net_name
+                         if isinstance(netmap_create, str):
+                             net_vim_name = netmap_create
+                 elif sce_net.get("vim_network_name"):
+                     create_network = False
+                     lookfor_network = True
+                     lookfor_filter["name"] = sce_net.get("vim_network_name")
+                 elif sce_net["external"]:
+                     if sce_net.get('vim_id'):
+                         # there is a netmap at datacenter_nets database   # TODO REVISE!!!!
+                         create_network = False
+                         lookfor_network = True
+                         lookfor_filter["id"] = sce_net['vim_id']
+                     elif vim["config"].get("management_network_id") or vim["config"].get("management_network_name"):
+                         if number_mgmt_networks > 1:
+                             raise NfvoException("Found several VLD of type mgmt. "
+                                                 "You must concrete what vim-network must be use for each one",
+                                                 httperrors.Bad_Request)
+                         create_network = False
+                         lookfor_network = True
+                         if vim["config"].get("management_network_id"):
+                             lookfor_filter["id"] = vim["config"]["management_network_id"]
+                         else:
+                             lookfor_filter["name"] = vim["config"]["management_network_name"]
+                     else:
+                         # There is not a netmap, look at datacenter for a net with this name and create if not found
+                         create_network = True
+                         lookfor_network = True
+                         lookfor_filter["name"] = sce_net["name"]
+                         net_vim_name = sce_net["name"]
+                 else:
+                     net_vim_name = net_name
+                     create_network = True
+                     lookfor_network = False
+                 task_extra = {}
+                 if create_network:
+                     task_action = "CREATE"
 -                                                       "description": "Public key injected",
 -                                                       "name":vm['name']
++                    task_extra["params"] = (net_vim_name, net_type, sce_net.get('ip_profile', None), None, sce_net.get('provider_network', None), wim_account_name)
++
+                     if lookfor_network:
+                         task_extra["find"] = (lookfor_filter,)
+                 elif lookfor_network:
+                     task_action = "FIND"
+                     task_extra["params"] = (lookfor_filter,)
+                 # fill database content
+                 net_uuid = str(uuid4())
+                 uuid_list.append(net_uuid)
+                 sce_net2instance[sce_net_uuid][datacenter_id] = net_uuid
+                 if not related_network:   # all db_instance_nets will have same related
+                     related_network = use_network or net_uuid
+                 sdn_net_id = None
+                 sdn_controller = vim.config.get('sdn-controller')
+                 sce_net2wim_instance[sce_net_uuid][datacenter_id] = None
+                 if sdn_controller and net_type in ("data", "ptp"):
+                     wim_id = _get_wim(mydb, sdn_controller)
+                     sdn_net_id = str(uuid4())
+                     sce_net2wim_instance[sce_net_uuid][datacenter_id] = sdn_net_id
+                     task_extra["sdn_net_id"] = sdn_net_id
+                     db_instance_wim_nets.append({
+                         "uuid": sdn_net_id,
+                         "instance_scenario_id": instance_uuid,
+                         "sce_net_id": sce_net.get("uuid"),
+                         "wim_id": wim_id,
+                         "wim_account_id": sdn_controller,
+                         'status': 'BUILD',  # if create_network else "ACTIVE"
+                         "related": related_network,
+                         'multipoint': True if net_type=="data" else False,
+                         "created": create_network, # TODO py3
+                         "sdn": True,
+                     })
+                     task_wim_extra = {"params": [net_type, wim_account_name]}
+                     db_vim_action = {
+                         "instance_action_id": instance_action_id,
+                         "status": "SCHEDULED",
+                         "task_index": task_index,
+                         # "datacenter_vim_id": myvim_thread_id,
+                         "wim_account_id": sdn_controller,
+                         "action": task_action,
+                         "item": "instance_wim_nets",
+                         "item_id": sdn_net_id,
+                         "related": related_network,
+                         "extra": yaml.safe_dump(task_wim_extra, default_flow_style=True, width=256)
+                     }
+                     task_index += 1
+                     db_vim_actions.append(db_vim_action)
+                 db_net = {
+                     "uuid": net_uuid,
+                     "osm_id": sce_net.get("osm_id") or sce_net["name"],
+                     "related": related_network,
+                     'vim_net_id': None,
+                     "vim_name": net_vim_name,
+                     "instance_scenario_id": instance_uuid,
+                     "sce_net_id": sce_net.get("uuid"),
+                     "created": create_network,
+                     'datacenter_id': datacenter_id,
+                     'datacenter_tenant_id': myvim_thread_id,
+                     'status': 'BUILD', #  if create_network else "ACTIVE"
+                     'sdn_net_id': sdn_net_id,
+                 }
+                 db_instance_nets.append(db_net)
+                 db_vim_action = {
+                     "instance_action_id": instance_action_id,
+                     "status": "SCHEDULED",
+                     "task_index": task_index,
+                     "datacenter_vim_id": myvim_thread_id,
+                     "action": task_action,
+                     "item": "instance_nets",
+                     "item_id": net_uuid,
+                     "related": related_network,
+                     "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
+                 }
+                 net2task_id['scenario'][sce_net_uuid][datacenter_id] = task_index
+                 task_index += 1
+                 db_vim_actions.append(db_vim_action)
+             if 'ip_profile' in sce_net:
+                 db_ip_profile={
+                     'instance_net_id': net_uuid,
+                     'ip_version': sce_net['ip_profile']['ip_version'],
+                     'subnet_address': sce_net['ip_profile']['subnet_address'],
+                     'gateway_address': sce_net['ip_profile']['gateway_address'],
+                     'dns_address': sce_net['ip_profile']['dns_address'],
+                     'dhcp_enabled': sce_net['ip_profile']['dhcp_enabled'],
+                     'dhcp_start_address': sce_net['ip_profile']['dhcp_start_address'],
+                     'dhcp_count': sce_net['ip_profile']['dhcp_count'],
+                 }
+                 db_ip_profiles.append(db_ip_profile)
+         # Create VNFs
+         vnf_params = {
+             "default_datacenter_id": default_datacenter_id,
+             "myvim_threads_id": myvim_threads_id,
+             "instance_uuid": instance_uuid,
+             "instance_name": instance_name,
+             "instance_action_id": instance_action_id,
+             "myvims": myvims,
+             "cloud_config": cloud_config,
+             "RO_pub_key": tenant[0].get('RO_pub_key'),
+             "instance_parameters": instance_dict,
+         }
+         vnf_params_out = {
+             "task_index": task_index,
+             "uuid_list": uuid_list,
+             "db_instance_nets": db_instance_nets,
+             "db_instance_wim_nets": db_instance_wim_nets,
+             "db_vim_actions": db_vim_actions,
+             "db_ip_profiles": db_ip_profiles,
+             "db_instance_vnfs": db_instance_vnfs,
+             "db_instance_vms": db_instance_vms,
+             "db_instance_interfaces": db_instance_interfaces,
+             "net2task_id": net2task_id,
+             "sce_net2instance": sce_net2instance,
+             "sce_net2wim_instance": sce_net2wim_instance,
+         }
+         # sce_vnf_list = sorted(scenarioDict['vnfs'], key=lambda k: k['name'])
+         for sce_vnf in scenarioDict.get('vnfs', ()):  # sce_vnf_list:
+             instantiate_vnf(mydb, sce_vnf, vnf_params, vnf_params_out, rollbackList)
+         task_index = vnf_params_out["task_index"]
+         uuid_list = vnf_params_out["uuid_list"]
+         # Create VNFFGs
+         # task_depends_on = []
+         for vnffg in scenarioDict.get('vnffgs', ()):
+             for rsp in vnffg['rsps']:
+                 sfs_created = []
+                 for cp in rsp['connection_points']:
+                     count = mydb.get_rows(
+                             SELECT='vms.count',
+                             FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_rsp_hops as h "
+                                  "on interfaces.uuid=h.ingress_interface_id",
+                             WHERE={'h.uuid': cp['uuid']})[0]['count']
+                     instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == cp['sce_vnf_id']), None)
+                     instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+                     dependencies = []
+                     for instance_vm in instance_vms:
+                         action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+                         if action:
+                             dependencies.append(action['task_index'])
+                         # TODO: throw exception if count != len(instance_vms)
+                         # TODO: and action shouldn't ever be None
+                     sfis_created = []
+                     for i in range(count):
+                         # create sfis
+                         sfi_uuid = str(uuid4())
+                         extra_params = {
+                             "ingress_interface_id": cp["ingress_interface_id"],
+                             "egress_interface_id": cp["egress_interface_id"]
+                         }
+                         uuid_list.append(sfi_uuid)
+                         db_sfi = {
+                             "uuid": sfi_uuid,
+                             "related": sfi_uuid,
+                             "instance_scenario_id": instance_uuid,
+                             'sce_rsp_hop_id': cp['uuid'],
+                             'datacenter_id': datacenter_id,
+                             'datacenter_tenant_id': myvim_thread_id,
+                             "vim_sfi_id": None, # vim thread will populate
+                         }
+                         db_instance_sfis.append(db_sfi)
+                         db_vim_action = {
+                             "instance_action_id": instance_action_id,
+                             "task_index": task_index,
+                             "datacenter_vim_id": myvim_thread_id,
+                             "action": "CREATE",
+                             "status": "SCHEDULED",
+                             "item": "instance_sfis",
+                             "item_id": sfi_uuid,
+                             "related": sfi_uuid,
+                             "extra": yaml.safe_dump({"params": extra_params, "depends_on": [dependencies[i]]},
+                                                     default_flow_style=True, width=256)
+                         }
+                         sfis_created.append(task_index)
+                         task_index += 1
+                         db_vim_actions.append(db_vim_action)
+                     # create sfs
+                     sf_uuid = str(uuid4())
+                     uuid_list.append(sf_uuid)
+                     db_sf = {
+                         "uuid": sf_uuid,
+                         "related": sf_uuid,
+                         "instance_scenario_id": instance_uuid,
+                         'sce_rsp_hop_id': cp['uuid'],
+                         'datacenter_id': datacenter_id,
+                         'datacenter_tenant_id': myvim_thread_id,
+                         "vim_sf_id": None, # vim thread will populate
+                     }
+                     db_instance_sfs.append(db_sf)
+                     db_vim_action = {
+                         "instance_action_id": instance_action_id,
+                         "task_index": task_index,
+                         "datacenter_vim_id": myvim_thread_id,
+                         "action": "CREATE",
+                         "status": "SCHEDULED",
+                         "item": "instance_sfs",
+                         "item_id": sf_uuid,
+                         "related": sf_uuid,
+                         "extra": yaml.safe_dump({"params": "", "depends_on": sfis_created},
+                                                 default_flow_style=True, width=256)
+                     }
+                     sfs_created.append(task_index)
+                     task_index += 1
+                     db_vim_actions.append(db_vim_action)
+                 classifier = rsp['classifier']
+                 # TODO the following ~13 lines can be reused for the sfi case
+                 count = mydb.get_rows(
+                         SELECT=('vms.count'),
+                         FROM="vms join interfaces on vms.uuid=interfaces.vm_id join sce_classifiers as c on interfaces.uuid=c.interface_id",
+                         WHERE={'c.uuid': classifier['uuid']})[0]['count']
+                 instance_vnf = next((item for item in db_instance_vnfs if item['sce_vnf_id'] == classifier['sce_vnf_id']), None)
+                 instance_vms = [item for item in db_instance_vms if item['instance_vnf_id'] == instance_vnf['uuid']]
+                 dependencies = []
+                 for instance_vm in instance_vms:
+                     action = next((item for item in db_vim_actions if item['item_id'] == instance_vm['uuid']), None)
+                     if action:
+                         dependencies.append(action['task_index'])
+                     # TODO: throw exception if count != len(instance_vms)
+                     # TODO: and action shouldn't ever be None
+                 classifications_created = []
+                 for i in range(count):
+                     for match in classifier['matches']:
+                         # create classifications
+                         classification_uuid = str(uuid4())
+                         uuid_list.append(classification_uuid)
+                         db_classification = {
+                             "uuid": classification_uuid,
+                             "related": classification_uuid,
+                             "instance_scenario_id": instance_uuid,
+                             'sce_classifier_match_id': match['uuid'],
+                             'datacenter_id': datacenter_id,
+                             'datacenter_tenant_id': myvim_thread_id,
+                             "vim_classification_id": None, # vim thread will populate
+                         }
+                         db_instance_classifications.append(db_classification)
+                         classification_params = {
+                             "ip_proto": match["ip_proto"],
+                             "source_ip": match["source_ip"],
+                             "destination_ip": match["destination_ip"],
+                             "source_port": match["source_port"],
+                             "destination_port": match["destination_port"]
+                         }
+                         db_vim_action = {
+                             "instance_action_id": instance_action_id,
+                             "task_index": task_index,
+                             "datacenter_vim_id": myvim_thread_id,
+                             "action": "CREATE",
+                             "status": "SCHEDULED",
+                             "item": "instance_classifications",
+                             "item_id": classification_uuid,
+                             "related": classification_uuid,
+                             "extra": yaml.safe_dump({"params": classification_params, "depends_on": [dependencies[i]]},
+                                                     default_flow_style=True, width=256)
+                         }
+                         classifications_created.append(task_index)
+                         task_index += 1
+                         db_vim_actions.append(db_vim_action)
+                 # create sfps
+                 sfp_uuid = str(uuid4())
+                 uuid_list.append(sfp_uuid)
+                 db_sfp = {
+                     "uuid": sfp_uuid,
+                     "related": sfp_uuid,
+                     "instance_scenario_id": instance_uuid,
+                     'sce_rsp_id': rsp['uuid'],
+                     'datacenter_id': datacenter_id,
+                     'datacenter_tenant_id': myvim_thread_id,
+                     "vim_sfp_id": None, # vim thread will populate
+                 }
+                 db_instance_sfps.append(db_sfp)
+                 db_vim_action = {
+                     "instance_action_id": instance_action_id,
+                     "task_index": task_index,
+                     "datacenter_vim_id": myvim_thread_id,
+                     "action": "CREATE",
+                     "status": "SCHEDULED",
+                     "item": "instance_sfps",
+                     "item_id": sfp_uuid,
+                     "related": sfp_uuid,
+                     "extra": yaml.safe_dump({"params": "", "depends_on": sfs_created + classifications_created},
+                                             default_flow_style=True, width=256)
+                 }
+                 task_index += 1
+                 db_vim_actions.append(db_vim_action)
+         db_instance_action["number_tasks"] = task_index
+         # --> WIM
+         logger.debug('wim_usage:\n%s\n\n', pformat(wim_usage))
+         wan_links = wim_engine.derive_wan_links(wim_usage, db_instance_nets, tenant_id)
+         wim_actions = wim_engine.create_actions(wan_links)
+         wim_actions, db_instance_action = (
+             wim_engine.incorporate_actions(wim_actions, db_instance_action))
+         # <-- WIM
+         scenarioDict["datacenter2tenant"] = myvim_threads_id
+         db_instance_scenario['datacenter_tenant_id'] = myvim_threads_id[default_datacenter_id]
+         db_instance_scenario['datacenter_id'] = default_datacenter_id
+         db_tables=[
+             {"instance_scenarios": db_instance_scenario},
+             {"instance_vnfs": db_instance_vnfs},
+             {"instance_nets": db_instance_nets},
+             {"ip_profiles": db_ip_profiles},
+             {"instance_vms": db_instance_vms},
+             {"instance_interfaces": db_instance_interfaces},
+             {"instance_actions": db_instance_action},
+             {"instance_sfis": db_instance_sfis},
+             {"instance_sfs": db_instance_sfs},
+             {"instance_classifications": db_instance_classifications},
+             {"instance_sfps": db_instance_sfps},
+             {"instance_wim_nets": db_instance_wim_nets + wan_links},
+             {"vim_wim_actions": db_vim_actions + wim_actions}
+         ]
+         logger.debug("create_instance done DB tables: %s",
+                     yaml.safe_dump(db_tables, indent=4, default_flow_style=False) )
+         mydb.new_rows(db_tables, uuid_list)
+         for myvim_thread_id in myvim_threads_id.values():
+             vim_threads["running"][myvim_thread_id].insert_task(db_vim_actions)
+         wim_engine.dispatch(wim_actions)
+         returned_instance = mydb.get_instance_scenario(instance_uuid)
+         returned_instance["action_id"] = instance_action_id
+         return returned_instance
+     except (NfvoException, vimconn.vimconnException, sdnconn.SdnConnectorError, db_base_Exception) as e:
+         message = rollback(mydb, myvims, rollbackList)
+         if isinstance(e, db_base_Exception):
+             error_text = "database Exception"
+         elif isinstance(e, vimconn.vimconnException):
+             error_text = "VIM Exception"
+         elif isinstance(e, sdnconn.SdnConnectorError):
+             error_text = "WIM Exception"
+         else:
+             error_text = "Exception"
+         error_text += " {} {}. {}".format(type(e).__name__, str(e), message)
+         # logger.error("create_instance: %s", error_text)
+         logger.exception(e)
+         raise NfvoException(error_text, e.http_code)
+ def instantiate_vnf(mydb, sce_vnf, params, params_out, rollbackList):
+     default_datacenter_id = params["default_datacenter_id"]
+     myvim_threads_id = params["myvim_threads_id"]
+     instance_uuid = params["instance_uuid"]
+     instance_name = params["instance_name"]
+     instance_action_id = params["instance_action_id"]
+     myvims = params["myvims"]
+     cloud_config = params["cloud_config"]
+     RO_pub_key = params["RO_pub_key"]
+     task_index = params_out["task_index"]
+     uuid_list = params_out["uuid_list"]
+     db_instance_nets = params_out["db_instance_nets"]
+     db_instance_wim_nets = params_out["db_instance_wim_nets"]
+     db_vim_actions = params_out["db_vim_actions"]
+     db_ip_profiles = params_out["db_ip_profiles"]
+     db_instance_vnfs = params_out["db_instance_vnfs"]
+     db_instance_vms = params_out["db_instance_vms"]
+     db_instance_interfaces = params_out["db_instance_interfaces"]
+     net2task_id = params_out["net2task_id"]
+     sce_net2instance = params_out["sce_net2instance"]
+     sce_net2wim_instance = params_out["sce_net2wim_instance"]
+     vnf_net2instance = {}
+     # 2. Creating new nets (vnf internal nets) in the VIM"
+     # For each vnf net, we create it and we add it to instanceNetlist.
+     if sce_vnf.get("datacenter"):
+         vim = myvims[sce_vnf["datacenter"]]
+         datacenter_id = sce_vnf["datacenter"]
+         myvim_thread_id = myvim_threads_id[sce_vnf["datacenter"]]
+     else:
+         vim = myvims[default_datacenter_id]
+         datacenter_id = default_datacenter_id
+         myvim_thread_id = myvim_threads_id[default_datacenter_id]
+     for net in sce_vnf['nets']:
+         # TODO revis
+         # descriptor_net = instance_dict.get("vnfs", {}).get(sce_vnf["name"], {})
+         # net_name = descriptor_net.get("name")
+         net_name = None
+         if not net_name:
+             net_name = "{}-{}".format(instance_name, net["name"])
+             net_name = net_name[:255]  # limit length
+         net_type = net['type']
+         if sce_vnf['uuid'] not in vnf_net2instance:
+             vnf_net2instance[sce_vnf['uuid']] = {}
+         if sce_vnf['uuid'] not in net2task_id:
+             net2task_id[sce_vnf['uuid']] = {}
+         # fill database content
+         net_uuid = str(uuid4())
+         uuid_list.append(net_uuid)
+         vnf_net2instance[sce_vnf['uuid']][net['uuid']] = net_uuid
+         sdn_controller = vim.config.get('sdn-controller')
+         sdn_net_id = None
+         if sdn_controller and net_type in ("data", "ptp"):
+             wim_id = _get_wim(mydb, sdn_controller)
+             sdn_net_id = str(uuid4())
+             db_instance_wim_nets.append({
+                 "uuid": sdn_net_id,
+                 "instance_scenario_id": instance_uuid,
+                 "wim_id": wim_id,
+                 "wim_account_id": sdn_controller,
+                 'status': 'BUILD',  # if create_network else "ACTIVE"
+                 "related": net_uuid,
+                 'multipoint': True if net_type == "data" else False,
+                 "created": True,  # TODO py3
+                 "sdn": True,
+             })
+         db_net = {
+             "uuid": net_uuid,
+             "related": net_uuid,
+             'vim_net_id': None,
+             "vim_name": net_name,
+             "instance_scenario_id": instance_uuid,
+             "net_id": net["uuid"],
+             "created": True,
+             'datacenter_id': datacenter_id,
+             'datacenter_tenant_id': myvim_thread_id,
+             'sdn_net_id': sdn_net_id,
+         }
+         db_instance_nets.append(db_net)
+         lookfor_filter = {}
+         if net.get("vim-network-name"):
+             lookfor_filter["name"] = net["vim-network-name"]
+         if net.get("vim-network-id"):
+             lookfor_filter["id"] = net["vim-network-id"]
+         if lookfor_filter:
+             task_action = "FIND"
+             task_extra = {"params": (lookfor_filter,)}
+         else:
+             task_action = "CREATE"
+             task_extra = {"params": (net_name, net_type, net.get('ip_profile', None))}
+         if sdn_net_id:
+             task_extra["sdn_net_id"] = sdn_net_id
+         if sdn_net_id:
+             task_wim_extra = {"params": [net_type, None]}
+             db_vim_action = {
+                 "instance_action_id": instance_action_id,
+                 "status": "SCHEDULED",
+                 "task_index": task_index,
+                 # "datacenter_vim_id": myvim_thread_id,
+                 "wim_account_id": sdn_controller,
+                 "action": task_action,
+                 "item": "instance_wim_nets",
+                 "item_id": sdn_net_id,
+                 "related": net_uuid,
+                 "extra": yaml.safe_dump(task_wim_extra, default_flow_style=True, width=256)
+             }
+             task_index += 1
+             db_vim_actions.append(db_vim_action)
+         db_vim_action = {
+             "instance_action_id": instance_action_id,
+             "task_index": task_index,
+             "datacenter_vim_id": myvim_thread_id,
+             "status": "SCHEDULED",
+             "action": task_action,
+             "item": "instance_nets",
+             "item_id": net_uuid,
+             "related": net_uuid,
+             "extra": yaml.safe_dump(task_extra, default_flow_style=True, width=256)
+         }
+         net2task_id[sce_vnf['uuid']][net['uuid']] = task_index
+         task_index += 1
+         db_vim_actions.append(db_vim_action)
+         if 'ip_profile' in net:
+             db_ip_profile = {
+                 'instance_net_id': net_uuid,
+                 'ip_version': net['ip_profile']['ip_version'],
+                 'subnet_address': net['ip_profile']['subnet_address'],
+                 'gateway_address': net['ip_profile']['gateway_address'],
+                 'dns_address': net['ip_profile']['dns_address'],
+                 'dhcp_enabled': net['ip_profile']['dhcp_enabled'],
+                 'dhcp_start_address': net['ip_profile']['dhcp_start_address'],
+                 'dhcp_count': net['ip_profile']['dhcp_count'],
+             }
+             db_ip_profiles.append(db_ip_profile)
+     # print "vnf_net2instance:"
+     # print yaml.safe_dump(vnf_net2instance, indent=4, default_flow_style=False)
+     # 3. Creating new vm instances in the VIM
+     # myvim.new_vminstance(self,vimURI,tenant_id,name,description,image_id,flavor_id,net_dict)
+     ssh_access = None
+     if sce_vnf.get('mgmt_access'):
+         ssh_access = sce_vnf['mgmt_access'].get('config-access', {}).get('ssh-access')
+     vnf_availability_zones = []
+     for vm in sce_vnf.get('vms'):
+         vm_av = vm.get('availability_zone')
+         if vm_av and vm_av not in vnf_availability_zones:
+             vnf_availability_zones.append(vm_av)
+     # check if there is enough availability zones available at vim level.
+     if myvims[datacenter_id].availability_zone and vnf_availability_zones:
+         if len(vnf_availability_zones) > len(myvims[datacenter_id].availability_zone):
+             raise NfvoException('No enough availability zones at VIM for this deployment', httperrors.Bad_Request)
+     if sce_vnf.get("datacenter"):
+         vim = myvims[sce_vnf["datacenter"]]
+         myvim_thread_id = myvim_threads_id[sce_vnf["datacenter"]]
+         datacenter_id = sce_vnf["datacenter"]
+     else:
+         vim = myvims[default_datacenter_id]
+         myvim_thread_id = myvim_threads_id[default_datacenter_id]
+         datacenter_id = default_datacenter_id
+     sce_vnf["datacenter_id"] = datacenter_id
+     i = 0
+     vnf_uuid = str(uuid4())
+     uuid_list.append(vnf_uuid)
+     db_instance_vnf = {
+         'uuid': vnf_uuid,
+         'instance_scenario_id': instance_uuid,
+         'vnf_id': sce_vnf['vnf_id'],
+         'sce_vnf_id': sce_vnf['uuid'],
+         'datacenter_id': datacenter_id,
+         'datacenter_tenant_id': myvim_thread_id,
+     }
+     db_instance_vnfs.append(db_instance_vnf)
+     for vm in sce_vnf['vms']:
+         # skip PDUs
+         if vm.get("pdu_type"):
+             continue
+         myVMDict = {}
+         sce_vnf_name = sce_vnf['member_vnf_index'] if sce_vnf['member_vnf_index'] else sce_vnf['name']
+         myVMDict['name'] = "{}-{}-{}".format(instance_name[:64], sce_vnf_name[:64], vm["name"][:64])
+         myVMDict['description'] = myVMDict['name'][0:99]
+         #                if not startvms:
+         #                    myVMDict['start'] = "no"
+         if vm.get("instance_parameters") and vm["instance_parameters"].get("name"):
+             myVMDict['name'] = vm["instance_parameters"].get("name")
+         myVMDict['name'] = myVMDict['name'][0:255]  # limit name length
+         # create image at vim in case it not exist
+         image_uuid = vm['image_id']
+         if vm.get("image_list"):
+             for alternative_image in vm["image_list"]:
+                 if alternative_image["vim_type"] == vim["config"]["_vim_type_internal"]:
+                     image_uuid = alternative_image['image_id']
+                     break
+         image_dict = mydb.get_table_by_uuid_name("images", image_uuid)
+         image_id = create_or_use_image(mydb, {datacenter_id: vim}, image_dict, [], True)
+         vm['vim_image_id'] = image_id
+         # create flavor at vim in case it not exist
+         flavor_dict = mydb.get_table_by_uuid_name("flavors", vm['flavor_id'])
+         if flavor_dict['extended'] != None:
+             flavor_dict['extended'] = yaml.load(flavor_dict['extended'], Loader=yaml.Loader)
+         flavor_id = create_or_use_flavor(mydb, {datacenter_id: vim}, flavor_dict, rollbackList, True)
+         # Obtain information for additional disks
+         extended_flavor_dict = mydb.get_rows(FROM='datacenters_flavors', SELECT=('extended',),
+                                              WHERE={'vim_id': flavor_id})
+         if not extended_flavor_dict:
+             raise NfvoException("flavor '{}' not found".format(flavor_id), httperrors.Not_Found)
+         # extended_flavor_dict_yaml = yaml.load(extended_flavor_dict[0], Loader=yaml.Loader)
+         myVMDict['disks'] = None
+         extended_info = extended_flavor_dict[0]['extended']
+         if extended_info != None:
+             extended_flavor_dict_yaml = yaml.load(extended_info, Loader=yaml.Loader)
+             if 'disks' in extended_flavor_dict_yaml:
+                 myVMDict['disks'] = extended_flavor_dict_yaml['disks']
+                 if vm.get("instance_parameters") and vm["instance_parameters"].get("devices"):
+                     for disk in myVMDict['disks']:
+                         if disk.get("name") in vm["instance_parameters"]["devices"]:
+                             disk.update(vm["instance_parameters"]["devices"][disk.get("name")])
+         vm['vim_flavor_id'] = flavor_id
+         myVMDict['imageRef'] = vm['vim_image_id']
+         myVMDict['flavorRef'] = vm['vim_flavor_id']
+         myVMDict['availability_zone'] = vm.get('availability_zone')
+         myVMDict['networks'] = []
+         task_depends_on = []
+         # TODO ALF. connect_mgmt_interfaces. Connect management interfaces if this is true
+         is_management_vm = False
+         db_vm_ifaces = []
+         for iface in vm['interfaces']:
+             netDict = {}
+             if iface['type'] == "data":
+                 netDict['type'] = iface['model']
+             elif "model" in iface and iface["model"] != None:
+                 netDict['model'] = iface['model']
+             # TODO in future, remove this because mac_address will not be set, and the type of PV,VF
+             # is obtained from iterface table model
+             # discover type of interface looking at flavor
+             for numa in flavor_dict.get('extended', {}).get('numas', []):
+                 for flavor_iface in numa.get('interfaces', []):
+                     if flavor_iface.get('name') == iface['internal_name']:
+                         if flavor_iface['dedicated'] == 'yes':
+                             netDict['type'] = "PF"  # passthrough
+                         elif flavor_iface['dedicated'] == 'no':
+                             netDict['type'] = "VF"  # siov
+                         elif flavor_iface['dedicated'] == 'yes:sriov':
+                             netDict['type'] = "VFnotShared"  # sriov but only one sriov on the PF
+                         netDict["mac_address"] = flavor_iface.get("mac_address")
+                         break
+             netDict["use"] = iface['type']
+             if netDict["use"] == "data" and not netDict.get("type"):
+                 # print "netDict", netDict
+                 # print "iface", iface
+                 e_text = "Cannot determine the interface type PF or VF of VNF '{}' VM '{}' iface '{}'".fromat(
+                     sce_vnf['name'], vm['name'], iface['internal_name'])
+                 if flavor_dict.get('extended') == None:
+                     raise NfvoException(e_text + "After database migration some information is not available. \
+                             Try to delete and create the scenarios and VNFs again", httperrors.Conflict)
+                 else:
+                     raise NfvoException(e_text, httperrors.Internal_Server_Error)
+             if netDict["use"] == "mgmt":
+                 is_management_vm = True
+                 netDict["type"] = "virtual"
+             if netDict["use"] == "bridge":
+                 netDict["type"] = "virtual"
+             if iface.get("vpci"):
+                 netDict['vpci'] = iface['vpci']
+             if iface.get("mac"):
+                 netDict['mac_address'] = iface['mac']
+             if iface.get("mac_address"):
+                 netDict['mac_address'] = iface['mac_address']
+             if iface.get("ip_address"):
+                 netDict['ip_address'] = iface['ip_address']
+             if iface.get("port-security") is not None:
+                 netDict['port_security'] = iface['port-security']
+             if iface.get("floating-ip") is not None:
+                 netDict['floating_ip'] = iface['floating-ip']
+             netDict['name'] = iface['internal_name']
+             if iface['net_id'] is None:
+                 for vnf_iface in sce_vnf["interfaces"]:
+                     # print iface
+                     # print vnf_iface
+                     if vnf_iface['interface_id'] == iface['uuid']:
+                         netDict['net_id'] = "TASK-{}".format(
+                             net2task_id['scenario'][vnf_iface['sce_net_id']][datacenter_id])
+                         instance_net_id = sce_net2instance[vnf_iface['sce_net_id']][datacenter_id]
+                         instance_wim_net_id = sce_net2wim_instance[vnf_iface['sce_net_id']][datacenter_id]
+                         task_depends_on.append(net2task_id['scenario'][vnf_iface['sce_net_id']][datacenter_id])
+                         break
+             else:
+                 netDict['net_id'] = "TASK-{}".format(net2task_id[sce_vnf['uuid']][iface['net_id']])
+                 instance_net_id = vnf_net2instance[sce_vnf['uuid']][iface['net_id']]
+                 task_depends_on.append(net2task_id[sce_vnf['uuid']][iface['net_id']])
+             # skip bridge ifaces not connected to any net
+             if 'net_id' not in netDict or netDict['net_id'] == None:
+                 continue
+             myVMDict['networks'].append(netDict)
+             db_vm_iface = {
+                 # "uuid"
+                 # 'instance_vm_id': instance_vm_uuid,
+                 "instance_net_id": instance_net_id,
+                 "instance_wim_net_id": instance_wim_net_id,
+                 'interface_id': iface['uuid'],
+                 # 'vim_interface_id': ,
+                 'type': 'external' if iface['external_name'] is not None else 'internal',
+                 'model': iface['model'],
+                 'ip_address': iface.get('ip_address'),
+                 'mac_address': iface.get('mac'),
+                 'floating_ip': int(iface.get('floating-ip', False)),
+                 'port_security': int(iface.get('port-security', True))
+             }
+             db_vm_ifaces.append(db_vm_iface)
+         # print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+         # print myVMDict['name']
+         # print "networks", yaml.safe_dump(myVMDict['networks'], indent=4, default_flow_style=False)
+         # print "interfaces", yaml.safe_dump(vm['interfaces'], indent=4, default_flow_style=False)
+         # print ">>>>>>>>>>>>>>>>>>>>>>>>>>>"
+         # We add the RO key to cloud_config if vnf will need ssh access
+         cloud_config_vm = cloud_config
+         if is_management_vm and params["instance_parameters"].get("mgmt_keys"):
+             cloud_config_vm = unify_cloud_config({"key-pairs": params["instance_parameters"]["mgmt_keys"]},
+                                                   cloud_config_vm)
+         if vm.get("instance_parameters") and "mgmt_keys" in vm["instance_parameters"]:
+             if vm["instance_parameters"]["mgmt_keys"]:
+                 cloud_config_vm = unify_cloud_config({"key-pairs": vm["instance_parameters"]["mgmt_keys"]},
+                                                      cloud_config_vm)
+             if RO_pub_key:
+                 cloud_config_vm = unify_cloud_config(cloud_config_vm, {"key-pairs": [RO_pub_key]})
+         if vm.get("boot_data"):
+             cloud_config_vm = unify_cloud_config(vm["boot_data"], cloud_config_vm)
+         if myVMDict.get('availability_zone'):
+             av_index = vnf_availability_zones.index(myVMDict['availability_zone'])
+         else:
+             av_index = None
+         for vm_index in range(0, vm.get('count', 1)):
+             vm_name = myVMDict['name'] + "-" + str(vm_index+1)
+             task_params = (vm_name, myVMDict['description'], myVMDict.get('start', None),
+                            myVMDict['imageRef'], myVMDict['flavorRef'], myVMDict['networks'], cloud_config_vm,
+                            myVMDict['disks'], av_index, vnf_availability_zones)
+             # put interface uuid back to scenario[vnfs][vms[[interfaces]
+             for net in myVMDict['networks']:
+                 if "vim_id" in net:
+                     for iface in vm['interfaces']:
+                         if net["name"] == iface["internal_name"]:
+                             iface["vim_id"] = net["vim_id"]
+                             break
+             vm_uuid = str(uuid4())
+             uuid_list.append(vm_uuid)
+             db_vm = {
+                 "uuid": vm_uuid,
+                 "related": vm_uuid,
+                 'instance_vnf_id': vnf_uuid,
+                 # TODO delete "vim_vm_id": vm_id,
+                 "vm_id": vm["uuid"],
+                 "vim_name": vm_name,
+                 # "status":
+             }
+             db_instance_vms.append(db_vm)
+             iface_index = 0
+             for db_vm_iface in db_vm_ifaces:
+                 iface_uuid = str(uuid4())
+                 uuid_list.append(iface_uuid)
+                 db_vm_iface_instance = {
+                     "uuid": iface_uuid,
+                     "instance_vm_id": vm_uuid
+                 }
+                 db_vm_iface_instance.update(db_vm_iface)
+                 if db_vm_iface_instance.get("ip_address"):  # increment ip_address
+                     ip = db_vm_iface_instance.get("ip_address")
+                     i = ip.rfind(".")
+                     if i > 0:
+                         try:
+                             i += 1
+                             ip = ip[i:] + str(int(ip[:i]) + 1)
+                             db_vm_iface_instance["ip_address"] = ip
+                         except:
+                             db_vm_iface_instance["ip_address"] = None
+                 db_instance_interfaces.append(db_vm_iface_instance)
+                 myVMDict['networks'][iface_index]["uuid"] = iface_uuid
+                 iface_index += 1
+             db_vim_action = {
+                 "instance_action_id": instance_action_id,
+                 "task_index": task_index,
+                 "datacenter_vim_id": myvim_thread_id,
+                 "action": "CREATE",
+                 "status": "SCHEDULED",
+                 "item": "instance_vms",
+                 "item_id": vm_uuid,
+                 "related": vm_uuid,
+                 "extra": yaml.safe_dump({"params": task_params, "depends_on": task_depends_on},
+                                         default_flow_style=True, width=256)
+             }
+             task_index += 1
+             db_vim_actions.append(db_vim_action)
+     params_out["task_index"] = task_index
+     params_out["uuid_list"] = uuid_list
+ def delete_instance(mydb, tenant_id, instance_id):
+     # print "Checking that the instance_id exists and getting the instance dictionary"
+     instanceDict = mydb.get_instance_scenario(instance_id, tenant_id)
+     # print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
+     tenant_id = instanceDict["tenant_id"]
+     # --> WIM
+     # We need to retrieve the WIM Actions now, before the instance_scenario is
+     # deleted. The reason for that is that: ON CASCADE rules will delete the
+     # instance_wim_nets record in the database
+     wim_actions = wim_engine.delete_actions(instance_scenario_id=instance_id)
+     # <-- WIM
+     # print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+     # 1. Delete from Database
+     message = mydb.delete_instance_scenario(instance_id, tenant_id)
+     # 2. delete from VIM
+     error_msg = ""
+     myvims = {}
+     myvim_threads = {}
+     vimthread_affected = {}
+     net2vm_dependencies = {}
+     task_index = 0
+     instance_action_id = get_task_id()
+     db_vim_actions = []
+     db_instance_action = {
+         "uuid": instance_action_id,   # same uuid for the instance and the action on create
+         "tenant_id": tenant_id,
+         "instance_id": instance_id,
+         "description": "DELETE",
+         # "number_tasks": 0 # filled bellow
+     }
+     # 2.1 deleting VNFFGs
+     for sfp in instanceDict.get('sfps', ()):
+         vimthread_affected[sfp["datacenter_tenant_id"]] = None
+         datacenter_key = (sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+         if datacenter_key not in myvims:
+             try:
+                 _, myvim_thread = get_vim_thread(mydb, tenant_id, sfp["datacenter_id"], sfp["datacenter_tenant_id"])
+             except NfvoException as e:
+                 logger.error(str(e))
+                 myvim_thread = None
+             myvim_threads[datacenter_key] = myvim_thread
+             vims = get_vim(mydb, tenant_id, datacenter_id=sfp["datacenter_id"],
+                            datacenter_tenant_id=sfp["datacenter_tenant_id"])
+             if len(vims) == 0:
+                 logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfp["datacenter_id"], sfp["datacenter_tenant_id"]))
+                 myvims[datacenter_key] = None
+             else:
+                 myvims[datacenter_key] = next(iter(vims.values()))
+         myvim = myvims[datacenter_key]
+         myvim_thread = myvim_threads[datacenter_key]
+         if not myvim:
+             error_msg += "\n    vim_sfp_id={} cannot be deleted because datacenter={} not found".format(sfp['vim_sfp_id'], sfp["datacenter_id"])
+             continue
+         extra = {"params": (sfp['vim_sfp_id'])}
+         db_vim_action = {
+             "instance_action_id": instance_action_id,
+             "task_index": task_index,
+             "datacenter_vim_id": sfp["datacenter_tenant_id"],
+             "action": "DELETE",
+             "status": "SCHEDULED",
+             "item": "instance_sfps",
+             "item_id": sfp["uuid"],
+             "related": sfp["related"],
+             "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+         }
+         task_index += 1
+         db_vim_actions.append(db_vim_action)
+     for classification in instanceDict['classifications']:
+         vimthread_affected[classification["datacenter_tenant_id"]] = None
+         datacenter_key = (classification["datacenter_id"], classification["datacenter_tenant_id"])
+         if datacenter_key not in myvims:
+             try:
+                 _, myvim_thread = get_vim_thread(mydb, tenant_id, classification["datacenter_id"], classification["datacenter_tenant_id"])
+             except NfvoException as e:
+                 logger.error(str(e))
+                 myvim_thread = None
+             myvim_threads[datacenter_key] = myvim_thread
+             vims = get_vim(mydb, tenant_id, datacenter_id=classification["datacenter_id"],
+                            datacenter_tenant_id=classification["datacenter_tenant_id"])
+             if len(vims) == 0:
+                 logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(classification["datacenter_id"],
+                                                                                                classification["datacenter_tenant_id"]))
+                 myvims[datacenter_key] = None
+             else:
+                 myvims[datacenter_key] = next(iter(vims.values()))
+         myvim = myvims[datacenter_key]
+         myvim_thread = myvim_threads[datacenter_key]
+         if not myvim:
+             error_msg += "\n    vim_classification_id={} cannot be deleted because datacenter={} not found".format(classification['vim_classification_id'],
+                                                                                                                    classification["datacenter_id"])
+             continue
+         depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfps"]
+         extra = {"params": (classification['vim_classification_id']), "depends_on": depends_on}
+         db_vim_action = {
+             "instance_action_id": instance_action_id,
+             "task_index": task_index,
+             "datacenter_vim_id": classification["datacenter_tenant_id"],
+             "action": "DELETE",
+             "status": "SCHEDULED",
+             "item": "instance_classifications",
+             "item_id": classification["uuid"],
+             "related": classification["related"],
+             "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+         }
+         task_index += 1
+         db_vim_actions.append(db_vim_action)
+     for sf in instanceDict.get('sfs', ()):
+         vimthread_affected[sf["datacenter_tenant_id"]] = None
+         datacenter_key = (sf["datacenter_id"], sf["datacenter_tenant_id"])
+         if datacenter_key not in myvims:
+             try:
+                 _, myvim_thread = get_vim_thread(mydb, tenant_id, sf["datacenter_id"], sf["datacenter_tenant_id"])
+             except NfvoException as e:
+                 logger.error(str(e))
+                 myvim_thread = None
+             myvim_threads[datacenter_key] = myvim_thread
+             vims = get_vim(mydb, tenant_id, datacenter_id=sf["datacenter_id"],
+                            datacenter_tenant_id=sf["datacenter_tenant_id"])
+             if len(vims) == 0:
+                 logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sf["datacenter_id"], sf["datacenter_tenant_id"]))
+                 myvims[datacenter_key] = None
+             else:
+                 myvims[datacenter_key] = next(iter(vims.values()))
+         myvim = myvims[datacenter_key]
+         myvim_thread = myvim_threads[datacenter_key]
+         if not myvim:
+             error_msg += "\n    vim_sf_id={} cannot be deleted because datacenter={} not found".format(sf['vim_sf_id'], sf["datacenter_id"])
+             continue
+         depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfps"]
+         extra = {"params": (sf['vim_sf_id']), "depends_on": depends_on}
+         db_vim_action = {
+             "instance_action_id": instance_action_id,
+             "task_index": task_index,
+             "datacenter_vim_id": sf["datacenter_tenant_id"],
+             "action": "DELETE",
+             "status": "SCHEDULED",
+             "item": "instance_sfs",
+             "item_id": sf["uuid"],
+             "related": sf["related"],
+             "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+         }
+         task_index += 1
+         db_vim_actions.append(db_vim_action)
+     for sfi in instanceDict.get('sfis', ()):
+         vimthread_affected[sfi["datacenter_tenant_id"]] = None
+         datacenter_key = (sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+         if datacenter_key not in myvims:
+             try:
+                 _, myvim_thread = get_vim_thread(mydb, tenant_id, sfi["datacenter_id"], sfi["datacenter_tenant_id"])
+             except NfvoException as e:
+                 logger.error(str(e))
+                 myvim_thread = None
+             myvim_threads[datacenter_key] = myvim_thread
+             vims = get_vim(mydb, tenant_id, datacenter_id=sfi["datacenter_id"],
+                            datacenter_tenant_id=sfi["datacenter_tenant_id"])
+             if len(vims) == 0:
+                 logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sfi["datacenter_id"], sfi["datacenter_tenant_id"]))
+                 myvims[datacenter_key] = None
+             else:
+                 myvims[datacenter_key] = next(iter(vims.values()))
+         myvim = myvims[datacenter_key]
+         myvim_thread = myvim_threads[datacenter_key]
+         if not myvim:
+             error_msg += "\n    vim_sfi_id={} cannot be deleted because datacenter={} not found".format(sfi['vim_sfi_id'], sfi["datacenter_id"])
+             continue
+         depends_on = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfs"]
+         extra = {"params": (sfi['vim_sfi_id']), "depends_on": depends_on}
+         db_vim_action = {
+             "instance_action_id": instance_action_id,
+             "task_index": task_index,
+             "datacenter_vim_id": sfi["datacenter_tenant_id"],
+             "action": "DELETE",
+             "status": "SCHEDULED",
+             "item": "instance_sfis",
+             "item_id": sfi["uuid"],
+             "related": sfi["related"],
+             "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+         }
+         task_index += 1
+         db_vim_actions.append(db_vim_action)
+     # 2.2 deleting VMs
+     # vm_fail_list=[]
+     for sce_vnf in instanceDict.get('vnfs', ()):
+         datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+         vimthread_affected[sce_vnf["datacenter_tenant_id"]] = None
+         if datacenter_key not in myvims:
+             try:
+                 _, myvim_thread = get_vim_thread(mydb, tenant_id, sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+             except NfvoException as e:
+                 logger.error(str(e))
+                 myvim_thread = None
+             myvim_threads[datacenter_key] = myvim_thread
+             vims = get_vim(mydb, tenant_id, datacenter_id=sce_vnf["datacenter_id"],
+                            datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
+             if len(vims) == 0:
+                 logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"],
+                                                                                                sce_vnf["datacenter_tenant_id"]))
+                 myvims[datacenter_key] = None
+             else:
+                 myvims[datacenter_key] = next(iter(vims.values()))
+         myvim = myvims[datacenter_key]
+         myvim_thread = myvim_threads[datacenter_key]
+         for vm in sce_vnf['vms']:
+             if not myvim:
+                 error_msg += "\n    VM id={} cannot be deleted because datacenter={} not found".format(vm['vim_vm_id'], sce_vnf["datacenter_id"])
+                 continue
+             sfi_dependencies = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfis"]
+             db_vim_action = {
+                 "instance_action_id": instance_action_id,
+                 "task_index": task_index,
+                 "datacenter_vim_id": sce_vnf["datacenter_tenant_id"],
+                 "action": "DELETE",
+                 "status": "SCHEDULED",
+                 "item": "instance_vms",
+                 "item_id": vm["uuid"],
+                 "related": vm["related"],
+                 "extra": yaml.safe_dump({"params": vm["interfaces"], "depends_on": sfi_dependencies},
+                                         default_flow_style=True, width=256)
+             }
+             db_vim_actions.append(db_vim_action)
+             for interface in vm["interfaces"]:
+                 if not interface.get("instance_net_id"):
+                     continue
+                 if interface["instance_net_id"] not in net2vm_dependencies:
+                     net2vm_dependencies[interface["instance_net_id"]] = []
+                 net2vm_dependencies[interface["instance_net_id"]].append(task_index)
+             task_index += 1
+     # 2.3 deleting NETS
+     # net_fail_list=[]
+     for net in instanceDict['nets']:
+         vimthread_affected[net["datacenter_tenant_id"]] = None
+         datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
+         if datacenter_key not in myvims:
+             try:
+                 _,myvim_thread = get_vim_thread(mydb, tenant_id, net["datacenter_id"], net["datacenter_tenant_id"])
+             except NfvoException as e:
+                 logger.error(str(e))
+                 myvim_thread = None
+             myvim_threads[datacenter_key] = myvim_thread
+             vims = get_vim(mydb, tenant_id, datacenter_id=net["datacenter_id"],
+                            datacenter_tenant_id=net["datacenter_tenant_id"])
+             if len(vims) == 0:
+                 logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
+                 myvims[datacenter_key] = None
+             else:
+                 myvims[datacenter_key] = next(iter(vims.values()))
+         myvim = myvims[datacenter_key]
+         myvim_thread = myvim_threads[datacenter_key]
+         if not myvim:
+             error_msg += "\n    Net VIM_id={} cannot be deleted because datacenter={} not found".format(net['vim_net_id'], net["datacenter_id"])
+             continue
+         extra = {"params": (net['vim_net_id'], net['sdn_net_id'])}
+         if net2vm_dependencies.get(net["uuid"]):
+             extra["depends_on"] = net2vm_dependencies[net["uuid"]]
+         sfi_dependencies = [action["task_index"] for action in db_vim_actions if action["item"] == "instance_sfis"]
+         if len(sfi_dependencies) > 0:
+             if "depends_on" in extra:
+                 extra["depends_on"] += sfi_dependencies
+             else:
+                 extra["depends_on"] = sfi_dependencies
+         db_vim_action = {
+             "instance_action_id": instance_action_id,
+             "task_index": task_index,
+             "datacenter_vim_id": net["datacenter_tenant_id"],
+             "action": "DELETE",
+             "status": "SCHEDULED",
+             "item": "instance_nets",
+             "item_id": net["uuid"],
+             "related": net["related"],
+             "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+         }
+         task_index += 1
+         db_vim_actions.append(db_vim_action)
+     for sdn_net in instanceDict['sdn_nets']:
+         if not sdn_net["sdn"]:
+             continue
+         extra = {}
+         db_vim_action = {
+             "instance_action_id": instance_action_id,
+             "task_index": task_index,
+             "wim_account_id": sdn_net["wim_account_id"],
+             "action": "DELETE",
+             "status": "SCHEDULED",
+             "item": "instance_wim_nets",
+             "item_id": sdn_net["uuid"],
+             "related": sdn_net["related"],
+             "extra": yaml.safe_dump(extra, default_flow_style=True, width=256)
+         }
+         task_index += 1
+         db_vim_actions.append(db_vim_action)
+     db_instance_action["number_tasks"] = task_index
+     # --> WIM
+     wim_actions, db_instance_action = (
+         wim_engine.incorporate_actions(wim_actions, db_instance_action))
+     # <-- WIM
+     db_tables = [
+         {"instance_actions": db_instance_action},
+         {"vim_wim_actions": db_vim_actions + wim_actions}
+     ]
+     logger.debug("delete_instance done DB tables: %s",
+                  yaml.safe_dump(db_tables, indent=4, default_flow_style=False))
+     mydb.new_rows(db_tables, ())
+     for myvim_thread_id in vimthread_affected.keys():
+         vim_threads["running"][myvim_thread_id].insert_task(db_vim_actions)
+     wim_engine.dispatch(wim_actions)
+     if len(error_msg) > 0:
+         return 'action_id={} instance {} deleted but some elements could not be deleted, or already deleted '\
+                '(error: 404) from VIM: {}'.format(instance_action_id, message, error_msg)
+     else:
+         return "action_id={} instance {} deleted".format(instance_action_id, message)
+ def get_instance_id(mydb, tenant_id, instance_id):
+     global ovim
+     #check valid tenant_id
+     check_tenant(mydb, tenant_id)
+     #obtain data
+     instance_dict = mydb.get_instance_scenario(instance_id, tenant_id, verbose=True)
+     # TODO py3
+     # for net in instance_dict["nets"]:
+     #     if net.get("sdn_net_id"):
+     #         net_sdn = ovim.show_network(net["sdn_net_id"])
+     #         net["sdn_info"] = {
+     #             "admin_state_up": net_sdn.get("admin_state_up"),
+     #             "flows": net_sdn.get("flows"),
+     #             "last_error": net_sdn.get("last_error"),
+     #             "ports": net_sdn.get("ports"),
+     #             "type": net_sdn.get("type"),
+     #             "status": net_sdn.get("status"),
+     #             "vlan": net_sdn.get("vlan"),
+     #         }
+     return instance_dict
+ @deprecated("Instance is automatically refreshed by vim_threads")
+ def refresh_instance(mydb, nfvo_tenant, instanceDict, datacenter=None, vim_tenant=None):
+     '''Refreshes a scenario instance. It modifies instanceDict'''
+     '''Returns:
+          - result: <0 if there is any unexpected error, n>=0 if no errors where n is the number of vms and nets that couldn't be updated in the database
+          - error_msg
+     '''
+     # # Assumption: nfvo_tenant and instance_id were checked before entering into this function
+     # #print "nfvo.refresh_instance begins"
+     # #print json.dumps(instanceDict, indent=4)
+     #
+     # #print "Getting the VIM URL and the VIM tenant_id"
+     # myvims={}
+     #
+     # # 1. Getting VIM vm and net list
+     # vms_updated = [] #List of VM instance uuids in openmano that were updated
+     # vms_notupdated=[]
+     # vm_list = {}
+     # for sce_vnf in instanceDict['vnfs']:
+     #     datacenter_key = (sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"])
+     #     if datacenter_key not in vm_list:
+     #         vm_list[datacenter_key] = []
+     #     if datacenter_key not in myvims:
+     #         vims = get_vim(mydb, nfvo_tenant, datacenter_id=sce_vnf["datacenter_id"],
+     #                        datacenter_tenant_id=sce_vnf["datacenter_tenant_id"])
+     #         if len(vims) == 0:
+     #             logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(sce_vnf["datacenter_id"], sce_vnf["datacenter_tenant_id"]))
+     #             myvims[datacenter_key] = None
+     #         else:
+     #             myvims[datacenter_key] = next(iter(vims.values()))
+     #     for vm in sce_vnf['vms']:
+     #         vm_list[datacenter_key].append(vm['vim_vm_id'])
+     #         vms_notupdated.append(vm["uuid"])
+     #
+     # nets_updated = [] #List of VM instance uuids in openmano that were updated
+     # nets_notupdated=[]
+     # net_list = {}
+     # for net in instanceDict['nets']:
+     #     datacenter_key = (net["datacenter_id"], net["datacenter_tenant_id"])
+     #     if datacenter_key not in net_list:
+     #         net_list[datacenter_key] = []
+     #     if datacenter_key not in myvims:
+     #         vims = get_vim(mydb, nfvo_tenant, datacenter_id=net["datacenter_id"],
+     #                        datacenter_tenant_id=net["datacenter_tenant_id"])
+     #         if len(vims) == 0:
+     #             logger.error("datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"]))
+     #             myvims[datacenter_key] = None
+     #         else:
+     #             myvims[datacenter_key] = next(iter(vims.values()))
+     #
+     #     net_list[datacenter_key].append(net['vim_net_id'])
+     #     nets_notupdated.append(net["uuid"])
+     #
+     # # 1. Getting the status of all VMs
+     # vm_dict={}
+     # for datacenter_key in myvims:
+     #     if not vm_list.get(datacenter_key):
+     #         continue
+     #     failed = True
+     #     failed_message=""
+     #     if not myvims[datacenter_key]:
+     #         failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
+     #     else:
+     #         try:
+     #             vm_dict.update(myvims[datacenter_key].refresh_vms_status(vm_list[datacenter_key]) )
+     #             failed = False
+     #         except vimconn.vimconnException as e:
+     #             logger.error("VIM exception %s %s", type(e).__name__, str(e))
+     #             failed_message = str(e)
+     #     if failed:
+     #         for vm in vm_list[datacenter_key]:
+     #             vm_dict[vm] = {'status': "VIM_ERROR", 'error_msg': failed_message}
+     #
+     # # 2. Update the status of VMs in the instanceDict, while collects the VMs whose status changed
+     # for sce_vnf in instanceDict['vnfs']:
+     #     for vm in sce_vnf['vms']:
+     #         vm_id = vm['vim_vm_id']
+     #         interfaces = vm_dict[vm_id].pop('interfaces', [])
+     #         #2.0 look if contain manamgement interface, and if not change status from ACTIVE:NoMgmtIP to ACTIVE
+     #         has_mgmt_iface = False
+     #         for iface in vm["interfaces"]:
+     #             if iface["type"]=="mgmt":
+     #                 has_mgmt_iface = True
+     #         if vm_dict[vm_id]['status'] == "ACTIVE:NoMgmtIP" and not has_mgmt_iface:
+     #             vm_dict[vm_id]['status'] = "ACTIVE"
+     #         if vm_dict[vm_id].get('error_msg') and len(vm_dict[vm_id]['error_msg']) >= 1024:
+     #             vm_dict[vm_id]['error_msg'] = vm_dict[vm_id]['error_msg'][:516] + " ... " + vm_dict[vm_id]['error_msg'][-500:]
+     #         if vm['status'] != vm_dict[vm_id]['status'] or vm.get('error_msg')!=vm_dict[vm_id].get('error_msg') or vm.get('vim_info')!=vm_dict[vm_id].get('vim_info'):
+     #             vm['status']    = vm_dict[vm_id]['status']
+     #             vm['error_msg'] = vm_dict[vm_id].get('error_msg')
+     #             vm['vim_info']  = vm_dict[vm_id].get('vim_info')
+     #             # 2.1. Update in openmano DB the VMs whose status changed
+     #             try:
+     #                 updates = mydb.update_rows('instance_vms', UPDATE=vm_dict[vm_id], WHERE={'uuid':vm["uuid"]})
+     #                 vms_notupdated.remove(vm["uuid"])
+     #                 if updates>0:
+     #                     vms_updated.append(vm["uuid"])
+     #             except db_base_Exception as e:
+     #                 logger.error("nfvo.refresh_instance error database update: %s", str(e))
+     #         # 2.2. Update in openmano DB the interface VMs
+     #         for interface in interfaces:
+     #             #translate from vim_net_id to instance_net_id
+     #             network_id_list=[]
+     #             for net in instanceDict['nets']:
+     #                 if net["vim_net_id"] == interface["vim_net_id"]:
+     #                     network_id_list.append(net["uuid"])
+     #             if not network_id_list:
+     #                 continue
+     #             del interface["vim_net_id"]
+     #             try:
+     #                 for network_id in network_id_list:
+     #                     mydb.update_rows('instance_interfaces', UPDATE=interface, WHERE={'instance_vm_id':vm["uuid"], "instance_net_id":network_id})
+     #             except db_base_Exception as e:
+     #                 logger.error( "nfvo.refresh_instance error with vm=%s, interface_net_id=%s", vm["uuid"], network_id)
+     #
+     # # 3. Getting the status of all nets
+     # net_dict = {}
+     # for datacenter_key in myvims:
+     #     if not net_list.get(datacenter_key):
+     #         continue
+     #     failed = True
+     #     failed_message = ""
+     #     if not myvims[datacenter_key]:
+     #         failed_message = "datacenter '{}' with datacenter_tenant_id '{}' not found".format(net["datacenter_id"], net["datacenter_tenant_id"])
+     #     else:
+     #         try:
+     #             net_dict.update(myvims[datacenter_key].refresh_nets_status(net_list[datacenter_key]) )
+     #             failed = False
+     #         except vimconn.vimconnException as e:
+     #             logger.error("VIM exception %s %s", type(e).__name__, str(e))
+     #             failed_message = str(e)
+     #     if failed:
+     #         for net in net_list[datacenter_key]:
+     #             net_dict[net] = {'status': "VIM_ERROR", 'error_msg': failed_message}
+     #
+     # # 4. Update the status of nets in the instanceDict, while collects the nets whose status changed
+     # # TODO: update nets inside a vnf
+     # for net in instanceDict['nets']:
+     #     net_id = net['vim_net_id']
+     #     if net_dict[net_id].get('error_msg') and len(net_dict[net_id]['error_msg']) >= 1024:
+     #         net_dict[net_id]['error_msg'] = net_dict[net_id]['error_msg'][:516] + " ... " + net_dict[vm_id]['error_msg'][-500:]
+     #     if net['status'] != net_dict[net_id]['status'] or net.get('error_msg')!=net_dict[net_id].get('error_msg') or net.get('vim_info')!=net_dict[net_id].get('vim_info'):
+     #         net['status']    = net_dict[net_id]['status']
+     #         net['error_msg'] = net_dict[net_id].get('error_msg')
+     #         net['vim_info']  = net_dict[net_id].get('vim_info')
+     #         # 5.1. Update in openmano DB the nets whose status changed
+     #         try:
+     #             updated = mydb.update_rows('instance_nets', UPDATE=net_dict[net_id], WHERE={'uuid':net["uuid"]})
+     #             nets_notupdated.remove(net["uuid"])
+     #             if updated>0:
+     #                 nets_updated.append(net["uuid"])
+     #         except db_base_Exception as e:
+     #             logger.error("nfvo.refresh_instance error database update: %s", str(e))
+     #
+     # # Returns appropriate output
+     # #print "nfvo.refresh_instance finishes"
+     # logger.debug("VMs updated in the database: %s; nets updated in the database %s; VMs not updated: %s; nets not updated: %s",
+     #             str(vms_updated), str(nets_updated), str(vms_notupdated), str(nets_notupdated))
+     instance_id = instanceDict['uuid']
+     # if len(vms_notupdated)+len(nets_notupdated)>0:
+     #     error_msg = "VMs not updated: " + str(vms_notupdated) + "; nets not updated: " + str(nets_notupdated)
+     #     return len(vms_notupdated)+len(nets_notupdated), 'Scenario instance ' + instance_id + ' refreshed but some elements could not be updated in the database: ' + error_msg
+     return 0, 'Scenario instance ' + instance_id + ' refreshed.'
+ def instance_action(mydb,nfvo_tenant,instance_id, action_dict):
+     #print "Checking that the instance_id exists and getting the instance dictionary"
+     instanceDict = mydb.get_instance_scenario(instance_id, nfvo_tenant)
+     #print yaml.safe_dump(instanceDict, indent=4, default_flow_style=False)
+     #print "Checking that nfvo_tenant_id exists and getting the VIM URI and the VIM tenant_id"
+     vims = get_vim(mydb, nfvo_tenant, instanceDict['datacenter_id'])
+     if len(vims) == 0:
+         raise NfvoException("datacenter '{}' not found".format(str(instanceDict['datacenter_id'])), httperrors.Not_Found)
+     myvim = next(iter(vims.values()))
+     vm_result = {}
+     vm_error = 0
+     vm_ok = 0
+     myvim_threads_id = {}
+     if action_dict.get("vdu-scaling"):
+         db_instance_vms = []
+         db_vim_actions = []
+         db_instance_interfaces = []
+         instance_action_id = get_task_id()
+         db_instance_action = {
+             "uuid": instance_action_id,   # same uuid for the instance and the action on create
+             "tenant_id": nfvo_tenant,
+             "instance_id": instance_id,
+             "description": "SCALE",
+         }
+         vm_result["instance_action_id"] = instance_action_id
+         vm_result["created"] = []
+         vm_result["deleted"] = []
+         task_index = 0
+         for vdu in action_dict["vdu-scaling"]:
+             vdu_id = vdu.get("vdu-id")
+             osm_vdu_id = vdu.get("osm_vdu_id")
+             member_vnf_index = vdu.get("member-vnf-index")
+             vdu_count = vdu.get("count", 1)
+             if vdu_id:
+                 target_vms = mydb.get_rows(
+                     FROM="instance_vms as vms join instance_vnfs as vnfs on vms.instance_vnf_id=vnfs.uuid",
+                     WHERE={"vms.uuid": vdu_id},
+                     ORDER_BY="vms.created_at"
+                 )
+                 if not target_vms:
+                     raise NfvoException("Cannot find the vdu with id {}".format(vdu_id), httperrors.Not_Found)
+             else:
+                 if not osm_vdu_id and not member_vnf_index:
+                     raise NfvoException("Invalid input vdu parameters. Must supply either 'vdu-id' of 'osm_vdu_id','member-vnf-index'")
+                 target_vms = mydb.get_rows(
+                     # SELECT=("ivms.uuid", "ivnfs.datacenter_id", "ivnfs.datacenter_tenant_id"),
+                     FROM="instance_vms as ivms join instance_vnfs as ivnfs on ivms.instance_vnf_id=ivnfs.uuid"\
+                          " join sce_vnfs as svnfs on ivnfs.sce_vnf_id=svnfs.uuid"\
+                          " join vms on ivms.vm_id=vms.uuid",
+                     WHERE={"vms.osm_id": osm_vdu_id, "svnfs.member_vnf_index": member_vnf_index,
+                            "ivnfs.instance_scenario_id": instance_id},
+                     ORDER_BY="ivms.created_at"
+                 )
+                 if not target_vms:
+                     raise NfvoException("Cannot find the vdu with osm_vdu_id {} and member-vnf-index {}".format(osm_vdu_id, member_vnf_index), httperrors.Not_Found)
+                 vdu_id = target_vms[-1]["uuid"]
+             target_vm = target_vms[-1]
+             datacenter = target_vm["datacenter_id"]
+             myvim_threads_id[datacenter], _ = get_vim_thread(mydb, nfvo_tenant, datacenter)
+             if vdu["type"] == "delete":
+                 for index in range(0, vdu_count):
+                     target_vm = target_vms[-1-index]
+                     vdu_id = target_vm["uuid"]
+                     # look for nm
+                     vm_interfaces = None
+                     for sce_vnf in instanceDict['vnfs']:
+                         for vm in sce_vnf['vms']:
+                             if vm["uuid"] == vdu_id:
+                                 # TODO revise this should not be vm["uuid"]   instance_vms["vm_id"]
+                                 vm_interfaces = vm["interfaces"]
+                                 break
+                     db_vim_action = {
+                         "instance_action_id": instance_action_id,
+                         "task_index": task_index,
+                         "datacenter_vim_id": target_vm["datacenter_tenant_id"],
+                         "action": "DELETE",
+                         "status": "SCHEDULED",
+                         "item": "instance_vms",
+                         "item_id": vdu_id,
+                         "related": target_vm["related"],
+                         "extra": yaml.safe_dump({"params": vm_interfaces},
+                                                 default_flow_style=True, width=256)
+                     }
+                     task_index += 1
+                     db_vim_actions.append(db_vim_action)
+                     vm_result["deleted"].append(vdu_id)
+                     # delete from database
+                     db_instance_vms.append({"TO-DELETE": vdu_id})
+             else:  # vdu["type"] == "create":
+                 iface2iface = {}
+                 where = {"item": "instance_vms", "item_id": target_vm["uuid"], "action": "CREATE"}
+                 vim_action_to_clone = mydb.get_rows(FROM="vim_wim_actions", WHERE=where)
+                 if not vim_action_to_clone:
+                     raise NfvoException("Cannot find the vim_action at database with {}".format(where), httperrors.Internal_Server_Error)
+                 vim_action_to_clone = vim_action_to_clone[0]
+                 extra = yaml.safe_load(vim_action_to_clone["extra"])
+                 # generate a new depends_on. Convert format TASK-Y into new format TASK-ACTION-XXXX.XXXX.Y
+                 # TODO do the same for flavor and image when available
+                 task_depends_on = []
+                 task_params = extra["params"]
+                 task_params_networks = deepcopy(task_params[5])
+                 for iface in task_params[5]:
+                     if iface["net_id"].startswith("TASK-"):
+                         if "." not in iface["net_id"]:
+                             task_depends_on.append("{}.{}".format(vim_action_to_clone["instance_action_id"],
+                                                              iface["net_id"][5:]))
+                             iface["net_id"] = "TASK-{}.{}".format(vim_action_to_clone["instance_action_id"],
+                                                                   iface["net_id"][5:])
+                         else:
+                             task_depends_on.append(iface["net_id"][5:])
+                     if "mac_address" in iface:
+                         del iface["mac_address"]
+                 vm_ifaces_to_clone = mydb.get_rows(FROM="instance_interfaces", WHERE={"instance_vm_id": target_vm["uuid"]})
+                 for index in range(0, vdu_count):
+                     vm_uuid = str(uuid4())
+                     vm_name = target_vm.get('vim_name')
+                     try:
+                         suffix = vm_name.rfind("-")
+                         vm_name = vm_name[:suffix+1] + str(index + 1 + int(vm_name[suffix+1:]))
+                     except Exception:
+                         pass
+                     db_instance_vm = {
+                         "uuid": vm_uuid,
+                         'related': vm_uuid,
+                         'instance_vnf_id': target_vm['instance_vnf_id'],
+                         'vm_id': target_vm['vm_id'],
+                         'vim_name': vm_name,
+                     }
+                     db_instance_vms.append(db_instance_vm)
+                     for vm_iface in vm_ifaces_to_clone:
+                         iface_uuid = str(uuid4())
+                         iface2iface[vm_iface["uuid"]] = iface_uuid
+                         db_vm_iface = {
+                             "uuid": iface_uuid,
+                             'instance_vm_id': vm_uuid,
+                             "instance_net_id": vm_iface["instance_net_id"],
+                             'interface_id': vm_iface['interface_id'],
+                             'type': vm_iface['type'],
+                             'floating_ip': vm_iface['floating_ip'],
+                             'port_security': vm_iface['port_security']
+                         }
+                         db_instance_interfaces.append(db_vm_iface)
+                     task_params_copy = deepcopy(task_params)
+                     for iface in task_params_copy[5]:
+                         iface["uuid"] = iface2iface[iface["uuid"]]
+                         # increment ip_address
+                         if "ip_address" in iface:
+                             ip = iface.get("ip_address")
+                             i = ip.rfind(".")
+                             if i > 0:
+                                 try:
+                                     i += 1
+                                     ip = ip[i:] + str(int(ip[:i]) + 1)
+                                     iface["ip_address"] = ip
+                                 except:
+                                     iface["ip_address"] = None
+                     if vm_name:
+                         task_params_copy[0] = vm_name
+                     db_vim_action = {
+                         "instance_action_id": instance_action_id,
+                         "task_index": task_index,
+                         "datacenter_vim_id": vim_action_to_clone["datacenter_vim_id"],
+                         "action": "CREATE",
+                         "status": "SCHEDULED",
+                         "item": "instance_vms",
+                         "item_id": vm_uuid,
+                         "related": vm_uuid,
+                         # ALF
+                         # ALF
+                         # TODO examinar parametros, quitar MAC o incrementar. Incrementar IP y colocar las dependencias con ACTION-asdfasd.
+                         # ALF
+                         # ALF
+                         "extra": yaml.safe_dump({"params": task_params_copy, "depends_on": task_depends_on}, default_flow_style=True, width=256)
+                     }
+                     task_index += 1
+                     db_vim_actions.append(db_vim_action)
+                     vm_result["created"].append(vm_uuid)
+         db_instance_action["number_tasks"] = task_index
+         db_tables = [
+             {"instance_vms": db_instance_vms},
+             {"instance_interfaces": db_instance_interfaces},
+             {"instance_actions": db_instance_action},
+             # TODO revise sfps
+             # {"instance_sfis": db_instance_sfis},
+             # {"instance_sfs": db_instance_sfs},
+             # {"instance_classifications": db_instance_classifications},
+             # {"instance_sfps": db_instance_sfps},
+             {"vim_wim_actions": db_vim_actions}
+         ]
+         logger.debug("create_vdu done DB tables: %s",
+                      yaml.safe_dump(db_tables, indent=4, default_flow_style=False))
+         mydb.new_rows(db_tables, [])
+         for myvim_thread in myvim_threads_id.values():
+             vim_threads["running"][myvim_thread].insert_task(db_vim_actions)
+         return vm_result
+     input_vnfs = action_dict.pop("vnfs", [])
+     input_vms = action_dict.pop("vms", [])
+     action_over_all = True if not input_vnfs and not input_vms else False
+     for sce_vnf in instanceDict['vnfs']:
+         for vm in sce_vnf['vms']:
+             if not action_over_all and sce_vnf['uuid'] not in input_vnfs and sce_vnf['vnf_name'] not in input_vnfs and \
+                     sce_vnf['member_vnf_index'] not in input_vnfs and \
+                     vm['uuid'] not in input_vms and vm['name'] not in input_vms and \
+                     sce_vnf['member_vnf_index'] + "-" + vm['vdu_osm_id'] not in input_vms:  # TODO conside vm_count_index
+                 continue
+             try:
+                 if "add_public_key" in action_dict:
+                     if sce_vnf.get('mgmt_access'):
+                         mgmt_access = yaml.load(sce_vnf['mgmt_access'], Loader=yaml.Loader)
+                         if not input_vms and mgmt_access.get("vdu-id") != vm['vdu_osm_id']:
+                             continue
+                         default_user = mgmt_access.get("default-user")
+                         password = mgmt_access.get("password")
+                         if mgmt_access.get(vm['vdu_osm_id']):
+                             default_user = mgmt_access[vm['vdu_osm_id']].get("default-user", default_user)
+                             password = mgmt_access[vm['vdu_osm_id']].get("password", password)
+                         tenant = mydb.get_rows_by_id('nfvo_tenants', nfvo_tenant)
+                         try:
+                             if 'ip_address' in vm:
+                                     mgmt_ip = vm['ip_address'].split(';')
+                                     priv_RO_key = decrypt_key(tenant[0]['encrypted_RO_priv_key'], tenant[0]['uuid'])
+                                     data  = myvim.inject_user_key(mgmt_ip[0], action_dict.get('user', default_user),
+                                                           action_dict['add_public_key'],
+                                                           password=password, ro_key=priv_RO_key)
+                                     vm_result[ vm['uuid'] ] = {"vim_result": 200,
 -
++                                                    "description": "Public key injected",
++                                                    "name":vm['name']
+                                                     }
 -            content, _ = myvim.new_network(net_name, net_type, net_ipprofile, shared=net_public, vlan=net_vlan) #, **net)
+                         except KeyError:
+                             raise NfvoException("Unable to inject ssh key in vm: {} - Aborting".format(vm['uuid']),
+                                                 httperrors.Internal_Server_Error)
+                     else:
+                         raise NfvoException("Unable to inject ssh key in vm: {} - Aborting".format(vm['uuid']),
+                                             httperrors.Internal_Server_Error)
+                 else:
+                     data = myvim.action_vminstance(vm['vim_vm_id'], action_dict)
+                     if "console" in action_dict:
+                         if not global_config["http_console_proxy"]:
+                             vm_result[ vm['uuid'] ] = {"vim_result": 200,
+                                                        "description": "{protocol}//{ip}:{port}/{suffix}".format(
+                                                                                     protocol=data["protocol"],
+                                                                                     ip = data["server"],
+                                                                                     port = data["port"],
+                                                                                     suffix = data["suffix"]),
+                                                        "name":vm['name']
+                                                     }
+                             vm_ok +=1
+                         elif data["server"]=="127.0.0.1" or data["server"]=="localhost":
+                             vm_result[ vm['uuid'] ] = {"vim_result": -httperrors.Unauthorized,
+                                                        "description": "this console is only reachable by local interface",
+                                                        "name":vm['name']
+                                                     }
+                             vm_error+=1
+                         else:
+                         #print "console data", data
+                             try:
+                                 console_thread = create_or_use_console_proxy_thread(data["server"], data["port"])
+                                 vm_result[ vm['uuid'] ] = {"vim_result": 200,
+                                                            "description": "{protocol}//{ip}:{port}/{suffix}".format(
+                                                                                         protocol=data["protocol"],
+                                                                                         ip = global_config["http_console_host"],
+                                                                                         port = console_thread.port,
+                                                                                         suffix = data["suffix"]),
+                                                            "name":vm['name']
+                                                         }
+                                 vm_ok +=1
+                             except NfvoException as e:
+                                 vm_result[ vm['uuid'] ] = {"vim_result": e.http_code, "name":vm['name'], "description": str(e)}
+                                 vm_error+=1
+                     else:
+                         vm_result[ vm['uuid'] ] = {"vim_result": 200, "description": "ok", "name":vm['name']}
+                         vm_ok +=1
+             except vimconn.vimconnException as e:
+                 vm_result[ vm['uuid'] ] = {"vim_result": e.http_code, "name":vm['name'], "description": str(e)}
+                 vm_error+=1
+     if vm_ok==0: #all goes wrong
+         return vm_result
+     else:
+         return vm_result
+ def instance_action_get(mydb, nfvo_tenant, instance_id, action_id):
+     filter = {}
+     if nfvo_tenant and nfvo_tenant != "any":
+         filter["tenant_id"] = nfvo_tenant
+     if instance_id and instance_id != "any":
+         filter["instance_id"] = instance_id
+     if action_id:
+         filter["uuid"] = action_id
+     rows = mydb.get_rows(FROM="instance_actions", WHERE=filter)
+     if action_id:
+         if not rows:
+             raise NfvoException("Not found any action with this criteria", httperrors.Not_Found)
+         vim_wim_actions = mydb.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": action_id})
+         rows[0]["vim_wim_actions"] = vim_wim_actions
+         # for backward compatibility set vim_actions = vim_wim_actions
+         rows[0]["vim_actions"] = vim_wim_actions
+     return {"actions": rows}
+ def create_or_use_console_proxy_thread(console_server, console_port):
+     #look for a non-used port
+     console_thread_key = console_server + ":" + str(console_port)
+     if console_thread_key in global_config["console_thread"]:
+         #global_config["console_thread"][console_thread_key].start_timeout()
+         return global_config["console_thread"][console_thread_key]
+     for port in  global_config["console_port_iterator"]():
+         #print "create_or_use_console_proxy_thread() port:", port
+         if port in global_config["console_ports"]:
+             continue
+         try:
+             clithread = cli.ConsoleProxyThread(global_config['http_host'], port, console_server, console_port)
+             clithread.start()
+             global_config["console_thread"][console_thread_key] = clithread
+             global_config["console_ports"][port] = console_thread_key
+             return clithread
+         except cli.ConsoleProxyExceptionPortUsed as e:
+             #port used, try with onoher
+             continue
+         except cli.ConsoleProxyException as e:
+             raise NfvoException(str(e), httperrors.Bad_Request)
+     raise NfvoException("Not found any free 'http_console_ports'", httperrors.Conflict)
+ def check_tenant(mydb, tenant_id):
+     '''check that tenant exists at database'''
+     tenant = mydb.get_rows(FROM='nfvo_tenants', SELECT=('uuid',), WHERE={'uuid': tenant_id})
+     if not tenant:
+         raise NfvoException("tenant '{}' not found".format(tenant_id), httperrors.Not_Found)
+     return
+ def new_tenant(mydb, tenant_dict):
+     tenant_uuid = str(uuid4())
+     tenant_dict['uuid'] = tenant_uuid
+     try:
+         pub_key, priv_key = create_RO_keypair(tenant_uuid)
+         tenant_dict['RO_pub_key'] = pub_key
+         tenant_dict['encrypted_RO_priv_key'] = priv_key
+         mydb.new_row("nfvo_tenants", tenant_dict, confidential_data=True)
+     except db_base_Exception as e:
+         raise NfvoException("Error creating the new tenant: {} ".format(tenant_dict['name']) + str(e), e.http_code)
+     return tenant_uuid
+ def delete_tenant(mydb, tenant):
+     #get nfvo_tenant info
+     tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', tenant, 'tenant')
+     mydb.delete_row_by_id("nfvo_tenants", tenant_dict['uuid'])
+     return tenant_dict['uuid'] + " " + tenant_dict["name"]
+ def new_datacenter(mydb, datacenter_descriptor):
+     sdn_port_mapping = None
+     if "config" in datacenter_descriptor:
+         sdn_port_mapping = datacenter_descriptor["config"].pop("sdn-port-mapping", None)
+         datacenter_descriptor["config"] = yaml.safe_dump(datacenter_descriptor["config"], default_flow_style=True,
+                                                          width=256)
+     # Check that datacenter-type is correct
+     datacenter_type = datacenter_descriptor.get("type", "openvim");
+     # module_info = None
+     # load plugin
+     plugin_name = "rovim_" + datacenter_type
+     if plugin_name not in plugins:
+         _load_plugin(plugin_name, type="vim")
+     datacenter_id = mydb.new_row("datacenters", datacenter_descriptor, add_uuid=True, confidential_data=True)
+     if sdn_port_mapping:
+         try:
+             datacenter_sdn_port_mapping_set(mydb, None, datacenter_id, sdn_port_mapping)
+         except Exception as e:
+             mydb.delete_row_by_id("datacenters", datacenter_id)   # Rollback
+             raise e
+     return datacenter_id
+ def edit_datacenter(mydb, datacenter_id_name, datacenter_descriptor):
+     # obtain data, check that only one exist
+     datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id_name)
+     # edit data
+     datacenter_id = datacenter['uuid']
+     where = {'uuid': datacenter['uuid']}
+     remove_port_mapping = False
+     new_sdn_port_mapping = None
+     if "config" in datacenter_descriptor:
+         if datacenter_descriptor['config'] != None:
+             try:
+                 new_config_dict = datacenter_descriptor["config"]
+                 if "sdn-port-mapping" in new_config_dict:
+                     remove_port_mapping = True
+                     new_sdn_port_mapping = new_config_dict.pop("sdn-port-mapping")
+                 # delete null fields
+                 to_delete = []
+                 for k in new_config_dict:
+                     if new_config_dict[k] is None:
+                         to_delete.append(k)
+                         if k == 'sdn-controller':
+                             remove_port_mapping = True
+                 config_text = datacenter.get("config")
+                 if not config_text:
+                     config_text = '{}'
+                 config_dict = yaml.load(config_text, Loader=yaml.Loader)
+                 config_dict.update(new_config_dict)
+                 # delete null fields
+                 for k in to_delete:
+                     del config_dict[k]
+             except Exception as e:
+                 raise NfvoException("Bad format at datacenter:config " + str(e), httperrors.Bad_Request)
+         if config_dict:
+             datacenter_descriptor["config"] = yaml.safe_dump(config_dict, default_flow_style=True, width=256)
+         else:
+             datacenter_descriptor["config"] = None
+         if remove_port_mapping:
+             try:
+                 datacenter_sdn_port_mapping_delete(mydb, None, datacenter_id)
+             except ovimException as e:
+                 raise NfvoException("Error deleting datacenter-port-mapping " + str(e), httperrors.Conflict)
+     mydb.update_rows('datacenters', datacenter_descriptor, where)
+     if new_sdn_port_mapping:
+         try:
+             datacenter_sdn_port_mapping_set(mydb, None, datacenter_id, new_sdn_port_mapping)
+         except ovimException as e:
+             # Rollback
+             mydb.update_rows('datacenters', datacenter, where)
+             raise NfvoException("Error adding datacenter-port-mapping " + str(e), httperrors.Conflict)
+     return datacenter_id
+ def delete_datacenter(mydb, datacenter):
+     #get nfvo_tenant info
+     datacenter_dict = mydb.get_table_by_uuid_name('datacenters', datacenter, 'datacenter')
+     mydb.delete_row_by_id("datacenters", datacenter_dict['uuid'])
+     try:
+         datacenter_sdn_port_mapping_delete(mydb, None, datacenter_dict['uuid'])
+     except ovimException as e:
+         raise NfvoException("Error deleting datacenter-port-mapping " + str(e))
+     return datacenter_dict['uuid'] + " " + datacenter_dict['name']
+ def create_vim_account(mydb, nfvo_tenant, datacenter_id, name=None, vim_id=None, vim_tenant=None, vim_tenant_name=None,
+                        vim_username=None, vim_password=None, config=None):
+     # get datacenter info
+     try:
+         if not datacenter_id:
+             if not vim_id:
+                 raise NfvoException("You must provide 'vim_id", http_code=httperrors.Bad_Request)
+             datacenter_id = vim_id
+         datacenter_id, datacenter_name = get_datacenter_uuid(mydb, None, datacenter_id)
+         create_vim_tenant = True if not vim_tenant and not vim_tenant_name else False
+         # get nfvo_tenant info
+         tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', nfvo_tenant)
+         if vim_tenant_name==None:
+             vim_tenant_name=tenant_dict['name']
+         tenants_datacenter_dict={"nfvo_tenant_id":tenant_dict['uuid'], "datacenter_id":datacenter_id }
+         # #check that this association does not exist before
+         # tenants_datacenters = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+         # if len(tenants_datacenters)>0:
+         #     raise NfvoException("datacenter '{}' and tenant'{}' are already attached".format(datacenter_id, tenant_dict['uuid']), httperrors.Conflict)
+         vim_tenant_id_exist_atdb=False
+         if not create_vim_tenant:
+             where_={"datacenter_id": datacenter_id}
+             if vim_tenant!=None:
+                 where_["vim_tenant_id"] = vim_tenant
+             if vim_tenant_name!=None:
+                 where_["vim_tenant_name"] = vim_tenant_name
+             #check if vim_tenant_id is already at database
+             datacenter_tenants_dict = mydb.get_rows(FROM='datacenter_tenants', WHERE=where_)
+             if len(datacenter_tenants_dict)>=1:
+                 datacenter_tenants_dict = datacenter_tenants_dict[0]
+                 vim_tenant_id_exist_atdb=True
+                 #TODO check if a field has changed and edit entry at datacenter_tenants at DB
+             else: #result=0
+                 datacenter_tenants_dict = {}
+                 #insert at table datacenter_tenants
+         else: #if vim_tenant==None:
+             #create tenant at VIM if not provided
+             try:
+                 _, myvim = get_datacenter_by_name_uuid(mydb, None, datacenter, vim_user=vim_username,
+                                                                    vim_passwd=vim_password)
+                 datacenter_name = myvim["name"]
+                 vim_tenant = myvim.new_tenant(vim_tenant_name, "created by openmano for datacenter "+datacenter_name)
+             except vimconn.vimconnException as e:
+                 raise NfvoException("Not possible to create vim_tenant {} at VIM: {}".format(vim_tenant_id, str(e)), httperrors.Internal_Server_Error)
+             datacenter_tenants_dict = {}
+             datacenter_tenants_dict["created"]="true"
+         #fill datacenter_tenants table
+         if not vim_tenant_id_exist_atdb:
+             datacenter_tenants_dict["vim_tenant_id"] = vim_tenant
+             datacenter_tenants_dict["vim_tenant_name"] = vim_tenant_name
+             datacenter_tenants_dict["user"] = vim_username
+             datacenter_tenants_dict["passwd"] = vim_password
+             datacenter_tenants_dict["datacenter_id"] = datacenter_id
+             if name:
+                 datacenter_tenants_dict["name"] = name
+             else:
+                 datacenter_tenants_dict["name"] = datacenter_name
+             if config:
+                 datacenter_tenants_dict["config"] = yaml.safe_dump(config, default_flow_style=True, width=256)
+             id_ = mydb.new_row('datacenter_tenants', datacenter_tenants_dict, add_uuid=True, confidential_data=True)
+             datacenter_tenants_dict["uuid"] = id_
+         #fill tenants_datacenters table
+         datacenter_tenant_id = datacenter_tenants_dict["uuid"]
+         tenants_datacenter_dict["datacenter_tenant_id"] = datacenter_tenant_id
+         mydb.new_row('tenants_datacenters', tenants_datacenter_dict)
+         # create thread
+         thread_name = get_non_used_vim_name(datacenter_name, datacenter_id, tenant_dict['name'], tenant_dict['uuid'])
+         new_thread = vim_thread(task_lock, plugins, thread_name, None, datacenter_tenant_id, db=db)
+         new_thread.start()
+         thread_id = datacenter_tenants_dict["uuid"]
+         vim_threads["running"][thread_id] = new_thread
+         return thread_id
+     except vimconn.vimconnException as e:
+         raise NfvoException(str(e), httperrors.Bad_Request)
+ def edit_vim_account(mydb, nfvo_tenant, datacenter_tenant_id, datacenter_id=None, name=None, vim_tenant=None,
+                               vim_tenant_name=None, vim_username=None, vim_password=None, config=None):
+     # get vim_account; check is valid for this tenant
+     from_ = "datacenter_tenants as dt JOIN tenants_datacenters as td ON dt.uuid=td.datacenter_tenant_id"
+     where_ = {"td.nfvo_tenant_id": nfvo_tenant}
+     if datacenter_tenant_id:
+         where_["dt.uuid"] = datacenter_tenant_id
+     if datacenter_id:
+         where_["dt.datacenter_id"] = datacenter_id
+     vim_accounts = mydb.get_rows(SELECT="dt.uuid as uuid, config", FROM=from_, WHERE=where_)
+     if not vim_accounts:
+         raise NfvoException("vim_account not found for this tenant", http_code=httperrors.Not_Found)
+     elif len(vim_accounts) > 1:
+         raise NfvoException("found more than one vim_account for this tenant", http_code=httperrors.Conflict)
+     datacenter_tenant_id = vim_accounts[0]["uuid"]
+     original_config = vim_accounts[0]["config"]
+     update_ = {}
+     if config:
+         original_config_dict = yaml.load(original_config, Loader=yaml.Loader)
+         original_config_dict.update(config)
+         update["config"] = yaml.safe_dump(original_config_dict, default_flow_style=True, width=256)
+     if name:
+         update_['name'] = name
+     if vim_tenant:
+         update_['vim_tenant_id'] = vim_tenant
+     if vim_tenant_name:
+         update_['vim_tenant_name'] = vim_tenant_name
+     if vim_username:
+         update_['user'] = vim_username
+     if vim_password:
+         update_['passwd'] = vim_password
+     if update_:
+         mydb.update_rows("datacenter_tenants", UPDATE=update_, WHERE={"uuid": datacenter_tenant_id})
+     vim_threads["running"][datacenter_tenant_id].insert_task("reload")
+     return datacenter_tenant_id
+ def delete_vim_account(mydb, tenant_id, vim_account_id, datacenter=None):
+     #get nfvo_tenant info
+     if not tenant_id or tenant_id=="any":
+         tenant_uuid = None
+     else:
+         tenant_dict = mydb.get_table_by_uuid_name('nfvo_tenants', tenant_id)
+         tenant_uuid = tenant_dict['uuid']
+     #check that this association exist before
+     tenants_datacenter_dict = {}
+     if datacenter:
+         datacenter_id, _ = get_datacenter_uuid(mydb, tenant_uuid, datacenter)
+         tenants_datacenter_dict["datacenter_id"] = datacenter_id
+     elif vim_account_id:
+         tenants_datacenter_dict["datacenter_tenant_id"] = vim_account_id
+     if tenant_uuid:
+         tenants_datacenter_dict["nfvo_tenant_id"] = tenant_uuid
+     tenant_datacenter_list = mydb.get_rows(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+     if len(tenant_datacenter_list)==0 and tenant_uuid:
+         raise NfvoException("datacenter '{}' and tenant '{}' are not attached".format(datacenter_id, tenant_dict['uuid']), httperrors.Not_Found)
+     #delete this association
+     mydb.delete_row(FROM='tenants_datacenters', WHERE=tenants_datacenter_dict)
+     #get vim_tenant info and deletes
+     warning=''
+     for tenant_datacenter_item in tenant_datacenter_list:
+         vim_tenant_dict = mydb.get_table_by_uuid_name('datacenter_tenants', tenant_datacenter_item['datacenter_tenant_id'])
+         #try to delete vim:tenant
+         try:
+             mydb.delete_row_by_id('datacenter_tenants', tenant_datacenter_item['datacenter_tenant_id'])
+             if vim_tenant_dict['created']=='true':
+                 #delete tenant at VIM if created by NFVO
+                 try:
+                     datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+                     myvim.delete_tenant(vim_tenant_dict['vim_tenant_id'])
+                 except vimconn.vimconnException as e:
+                     warning = "Not possible to delete vim_tenant_id {} from VIM: {} ".format(vim_tenant_dict['vim_tenant_id'], str(e))
+                     logger.warn(warning)
+         except db_base_Exception as e:
+             logger.error("Cannot delete datacenter_tenants " + str(e))
+             pass  # the error will be caused because dependencies, vim_tenant can not be deleted
+         thread_id = tenant_datacenter_item["datacenter_tenant_id"]
+         thread = vim_threads["running"].get(thread_id)
+         if thread:
+             thread.insert_task("exit")
+             vim_threads["deleting"][thread_id] = thread
+     return "datacenter {} detached. {}".format(datacenter_id, warning)
+ def datacenter_action(mydb, tenant_id, datacenter, action_dict):
+     #DEPRECATED
+     #get datacenter info
+     datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+     if 'check-connectivity' in action_dict:
+         try:
+             myvim.check_vim_connectivity()
+         except vimconn.vimconnException as e:
+             #logger.error("nfvo.datacenter_action() Not possible to get_network_list from VIM: %s ", str(e))
+             raise NfvoException(str(e), e.http_code)
+     elif 'net-update' in action_dict:
+         try:
+             nets = myvim.get_network_list(filter_dict={'shared': True, 'admin_state_up': True, 'status': 'ACTIVE'})
+             #print content
+         except vimconn.vimconnException as e:
+             #logger.error("nfvo.datacenter_action() Not possible to get_network_list from VIM: %s ", str(e))
+             raise NfvoException(str(e), httperrors.Internal_Server_Error)
+         #update nets Change from VIM format to NFVO format
+         net_list=[]
+         for net in nets:
+             net_nfvo={'datacenter_id': datacenter_id}
+             net_nfvo['name']       = net['name']
+             #net_nfvo['description']= net['name']
+             net_nfvo['vim_net_id'] = net['id']
+             net_nfvo['type']       = net['type'][0:6] #change from ('ptp','data','bridge_data','bridge_man')  to ('bridge','data','ptp')
+             net_nfvo['shared']     = net['shared']
+             net_nfvo['multipoint'] = False if net['type']=='ptp' else True
+             net_list.append(net_nfvo)
+         inserted, deleted = mydb.update_datacenter_nets(datacenter_id, net_list)
+         logger.info("Inserted %d nets, deleted %d old nets", inserted, deleted)
+         return inserted
+     elif 'net-edit' in action_dict:
+         net = action_dict['net-edit'].pop('net')
+         what = 'vim_net_id' if utils.check_valid_uuid(net) else 'name'
+         result = mydb.update_rows('datacenter_nets', action_dict['net-edit'],
+                                 WHERE={'datacenter_id':datacenter_id, what: net})
+         return result
+     elif 'net-delete' in action_dict:
+         net = action_dict['net-deelte'].get('net')
+         what = 'vim_net_id' if utils.check_valid_uuid(net) else 'name'
+         result = mydb.delete_row(FROM='datacenter_nets',
+                                 WHERE={'datacenter_id':datacenter_id, what: net})
+         return result
+     else:
+         raise NfvoException("Unknown action " + str(action_dict), httperrors.Bad_Request)
+ def datacenter_edit_netmap(mydb, tenant_id, datacenter, netmap, action_dict):
+     #get datacenter info
+     datacenter_id, _  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+     what = 'uuid' if utils.check_valid_uuid(netmap) else 'name'
+     result = mydb.update_rows('datacenter_nets', action_dict['netmap'],
+                             WHERE={'datacenter_id':datacenter_id, what: netmap})
+     return result
+ def datacenter_new_netmap(mydb, tenant_id, datacenter, action_dict=None):
+     #get datacenter info
+     datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+     filter_dict={}
+     if action_dict:
+         action_dict = action_dict["netmap"]
+         if 'vim_id' in action_dict:
+             filter_dict["id"] = action_dict['vim_id']
+         if 'vim_name' in action_dict:
+             filter_dict["name"] = action_dict['vim_name']
+     else:
+         filter_dict["shared"] = True
+     try:
+         vim_nets = myvim.get_network_list(filter_dict=filter_dict)
+     except vimconn.vimconnException as e:
+         #logger.error("nfvo.datacenter_new_netmap() Not possible to get_network_list from VIM: %s ", str(e))
+         raise NfvoException(str(e), httperrors.Internal_Server_Error)
+     if len(vim_nets)>1 and action_dict:
+         raise NfvoException("more than two networks found, specify with vim_id", httperrors.Conflict)
+     elif len(vim_nets)==0: # and action_dict:
+         raise NfvoException("Not found a network at VIM with " + str(filter_dict), httperrors.Not_Found)
+     net_list=[]
+     for net in vim_nets:
+         net_nfvo={'datacenter_id': datacenter_id}
+         if action_dict and "name" in action_dict:
+             net_nfvo['name']       = action_dict['name']
+         else:
+             net_nfvo['name']       = net['name']
+         #net_nfvo['description']= net['name']
+         net_nfvo['vim_net_id'] = net['id']
+         net_nfvo['type']       = net['type'][0:6] #change from ('ptp','data','bridge_data','bridge_man')  to ('bridge','data','ptp')
+         net_nfvo['shared']     = net['shared']
+         net_nfvo['multipoint'] = False if net['type']=='ptp' else True
+         try:
+             net_id = mydb.new_row("datacenter_nets", net_nfvo, add_uuid=True)
+             net_nfvo["status"] = "OK"
+             net_nfvo["uuid"] = net_id
+         except db_base_Exception as e:
+             if action_dict:
+                 raise
+             else:
+                 net_nfvo["status"] = "FAIL: " + str(e)
+         net_list.append(net_nfvo)
+     return net_list
+ def get_sdn_net_id(mydb, tenant_id, datacenter, network_id):
+     # obtain all network data
+     try:
+         if utils.check_valid_uuid(network_id):
+             filter_dict = {"id": network_id}
+         else:
+             filter_dict = {"name": network_id}
+         datacenter_id, myvim = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+         network = myvim.get_network_list(filter_dict=filter_dict)
+     except vimconn.vimconnException as e:
+         raise NfvoException("Not possible to get_sdn_net_id from VIM: {}".format(str(e)), e.http_code)
+     # ensure the network is defined
+     if len(network) == 0:
+         raise NfvoException("Network {} is not present in the system".format(network_id),
+                             httperrors.Bad_Request)
+     # ensure there is only one network with the provided name
+     if len(network) > 1:
+         raise NfvoException("Multiple networks present in vim identified by {}".format(network_id), httperrors.Bad_Request)
+     # ensure it is a dataplane network
+     if network[0]['type'] != 'data':
+         return None
+     # ensure we use the id
+     network_id = network[0]['id']
+     # search in dabase mano_db in table instance nets for the sdn_net_id that corresponds to the vim_net_id==network_id
+     # and with instance_scenario_id==NULL
+     #search_dict = {'vim_net_id': network_id, 'instance_scenario_id': None}
+     search_dict = {'vim_net_id': network_id}
+     try:
+         #sdn_network_id = mydb.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets', WHERE=search_dict)[0]['sdn_net_id']
+         result =  mydb.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets', WHERE=search_dict)
+     except db_base_Exception as e:
+         raise NfvoException("db_base_Exception obtaining SDN network to associated to vim network {}".format(
+             network_id) + str(e), e.http_code)
+     sdn_net_counter = 0
+     for net in result:
+         if net['sdn_net_id'] != None:
+             sdn_net_counter+=1
+             sdn_net_id = net['sdn_net_id']
+     if sdn_net_counter == 0:
+         return None
+     elif sdn_net_counter == 1:
+         return sdn_net_id
+     else:
+         raise NfvoException("More than one SDN network is associated to vim network {}".format(
+             network_id), httperrors.Internal_Server_Error)
+ def get_sdn_controller_id(mydb, datacenter):
+     # Obtain sdn controller id
+     config = mydb.get_rows(SELECT=('config',), FROM='datacenters', WHERE={'uuid': datacenter})[0].get('config', '{}')
+     if not config:
+         return None
+     return yaml.load(config, Loader=yaml.Loader).get('sdn-controller')
+ def vim_net_sdn_attach(mydb, tenant_id, datacenter, network_id, descriptor):
+     try:
+         sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, network_id)
+         if not sdn_network_id:
+             raise NfvoException("No SDN network is associated to vim-network {}".format(network_id), httperrors.Internal_Server_Error)
+         #Obtain sdn controller id
+         controller_id = get_sdn_controller_id(mydb, datacenter)
+         if not controller_id:
+             raise NfvoException("No SDN controller is set for datacenter {}".format(datacenter), httperrors.Internal_Server_Error)
+         #Obtain sdn controller info
+         sdn_controller = ovim.show_of_controller(controller_id)
+         port_data = {
+             'name': 'external_port',
+             'net_id': sdn_network_id,
+             'ofc_id': controller_id,
+             'switch_dpid': sdn_controller['dpid'],
+             'switch_port': descriptor['port']
+         }
+         if 'vlan' in descriptor:
+             port_data['vlan'] = descriptor['vlan']
+         if 'mac' in descriptor:
+             port_data['mac'] = descriptor['mac']
+         result = ovim.new_port(port_data)
+     except ovimException as e:
+         raise NfvoException("ovimException attaching SDN network {} to vim network {}".format(
+             sdn_network_id, network_id) + str(e), httperrors.Internal_Server_Error)
+     except db_base_Exception as e:
+         raise NfvoException("db_base_Exception attaching SDN network to vim network {}".format(
+             network_id) + str(e), e.http_code)
+     return 'Port uuid: '+ result
+ def vim_net_sdn_detach(mydb, tenant_id, datacenter, network_id, port_id=None):
+     if port_id:
+         filter = {'uuid': port_id}
+     else:
+         sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, network_id)
+         if not sdn_network_id:
+             raise NfvoException("No SDN network is associated to vim-network {}".format(network_id),
+                                 httperrors.Internal_Server_Error)
+         #in case no port_id is specified only ports marked as 'external_port' will be detached
+         filter = {'name': 'external_port', 'net_id': sdn_network_id}
+     try:
+         port_list = ovim.get_ports(columns={'uuid'}, filter=filter)
+     except ovimException as e:
+         raise NfvoException("ovimException obtaining external ports for net {}. ".format(network_id) + str(e),
+                             httperrors.Internal_Server_Error)
+     if len(port_list) == 0:
+         raise NfvoException("No ports attached to the network {} were found with the requested criteria".format(network_id),
+                             httperrors.Bad_Request)
+     port_uuid_list = []
+     for port in port_list:
+         try:
+             port_uuid_list.append(port['uuid'])
+             ovim.delete_port(port['uuid'])
+         except ovimException as e:
+             raise NfvoException("ovimException deleting port {} for net {}. ".format(port['uuid'], network_id) + str(e), httperrors.Internal_Server_Error)
+     return 'Detached ports uuid: {}'.format(','.join(port_uuid_list))
+ def vim_action_get(mydb, tenant_id, datacenter, item, name):
+     #get datacenter info
+     datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+     filter_dict={}
+     if name:
+         if utils.check_valid_uuid(name):
+             filter_dict["id"] = name
+         else:
+             filter_dict["name"] = name
+     try:
+         if item=="networks":
+             #filter_dict['tenant_id'] = myvim['tenant_id']
+             content = myvim.get_network_list(filter_dict=filter_dict)
+             if len(content) == 0:
+                 raise NfvoException("Network {} is not present in the system. ".format(name),
+                                     httperrors.Bad_Request)
+             #Update the networks with the attached ports
+             for net in content:
+                 sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, net['id'])
+                 if sdn_network_id != None:
+                     try:
+                         #port_list = ovim.get_ports(columns={'uuid', 'switch_port', 'vlan'}, filter={'name': 'external_port', 'net_id': sdn_network_id})
+                         port_list = ovim.get_ports(columns={'uuid', 'switch_port', 'vlan','name'}, filter={'net_id': sdn_network_id})
+                     except ovimException as e:
+                         raise NfvoException("ovimException obtaining external ports for net {}. ".format(network_id) + str(e), httperrors.Internal_Server_Error)
+                     #Remove field name and if port name is external_port save it as 'type'
+                     for port in port_list:
+                         if port['name'] == 'external_port':
+                             port['type'] = "External"
+                         del port['name']
+                     net['sdn_network_id'] = sdn_network_id
+                     net['sdn_attached_ports'] = port_list
+         elif item=="tenants":
+             content = myvim.get_tenant_list(filter_dict=filter_dict)
+         elif item == "images":
+             content = myvim.get_image_list(filter_dict=filter_dict)
+         else:
+             raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
+         logger.debug("vim_action response %s", content) #update nets Change from VIM format to NFVO format
+         if name and len(content)==1:
+             return {item[:-1]: content[0]}
+         elif name and len(content)==0:
+             raise NfvoException("No {} found with ".format(item[:-1]) + " and ".join(map(lambda x: str(x[0])+": "+str(x[1]), filter_dict.items())),
+                  datacenter)
+         else:
+             return {item: content}
+     except vimconn.vimconnException as e:
+         print("vim_action Not possible to get_{}_list from VIM: {} ".format(item, str(e)))
+         raise NfvoException("Not possible to get_{}_list from VIM: {}".format(item, str(e)), e.http_code)
+ def vim_action_delete(mydb, tenant_id, datacenter, item, name):
+     #get datacenter info
+     if tenant_id == "any":
+         tenant_id=None
+     datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+     #get uuid name
+     content = vim_action_get(mydb, tenant_id, datacenter, item, name)
+     logger.debug("vim_action_delete vim response: " + str(content))
+     items = next(iter(content.values()))
+     if type(items)==list and len(items)==0:
+         raise NfvoException("Not found " + item, httperrors.Not_Found)
+     elif type(items)==list and len(items)>1:
+         raise NfvoException("Found more than one {} with this name. Use uuid.".format(item), httperrors.Not_Found)
+     else: # it is a dict
+         item_id = items["id"]
+         item_name = str(items.get("name"))
+     try:
+         if item=="networks":
+             # If there is a SDN network associated to the vim-network, proceed to clear the relationship and delete it
+             sdn_network_id = get_sdn_net_id(mydb, tenant_id, datacenter, item_id)
+             if sdn_network_id != None:
+                 #Delete any port attachment to this network
+                 try:
+                     port_list = ovim.get_ports(columns={'uuid'}, filter={'net_id': sdn_network_id})
+                 except ovimException as e:
+                     raise NfvoException(
+                         "ovimException obtaining external ports for net {}. ".format(sdn_network_id) + str(e),
+                         httperrors.Internal_Server_Error)
+                 # By calling one by one all ports to be detached we ensure that not only the external_ports get detached
+                 for port in port_list:
+                     vim_net_sdn_detach(mydb, tenant_id, datacenter, item_id, port['uuid'])
+                 #Delete from 'instance_nets' the correspondence between the vim-net-id and the sdn-net-id
+                 try:
+                     mydb.delete_row(FROM='instance_nets', WHERE={'instance_scenario_id': None,
+                                                                  'sdn_net_id': sdn_network_id,
+                                                                  'vim_net_id': item_id})
+                 except db_base_Exception as e:
+                     raise NfvoException("Error deleting correspondence for VIM/SDN dataplane networks{}: {}".format(
+                         item_id, e), e.http_code)
+                 #Delete the SDN network
+                 try:
+                     ovim.delete_network(sdn_network_id)
+                 except ovimException as e:
+                     logger.error("ovimException deleting SDN network={} ".format(sdn_network_id) + str(e), exc_info=True)
+                     raise NfvoException("ovimException deleting SDN network={} ".format(sdn_network_id) + str(e),
+                                         httperrors.Internal_Server_Error)
+             content = myvim.delete_network(item_id)
+         elif item=="tenants":
+             content = myvim.delete_tenant(item_id)
+         elif item == "images":
+             content = myvim.delete_image(item_id)
+         else:
+             raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
+     except vimconn.vimconnException as e:
+         #logger.error( "vim_action Not possible to delete_{} {}from VIM: {} ".format(item, name, str(e)))
+         raise NfvoException("Not possible to delete_{} {} from VIM: {}".format(item, name, str(e)), e.http_code)
+     return "{} {} {} deleted".format(item[:-1], item_id,item_name)
+ def vim_action_create(mydb, tenant_id, datacenter, item, descriptor):
+     #get datacenter info
+     logger.debug("vim_action_create descriptor %s", str(descriptor))
+     if tenant_id == "any":
+         tenant_id=None
+     datacenter_id, myvim  = get_datacenter_by_name_uuid(mydb, tenant_id, datacenter)
+     try:
+         if item=="networks":
+             net = descriptor["network"]
+             net_name = net.pop("name")
+             net_type = net.pop("type", "bridge")
+             net_public = net.pop("shared", False)
+             net_ipprofile = net.pop("ip_profile", None)
+             net_vlan = net.pop("vlan", None)
++            net_provider_network_profile = None
++            if net_vlan:
++                net_provider_network_profile = {"segmentation-id": net_vlan}
++            content, _ = myvim.new_network(net_name, net_type, net_ipprofile, shared=net_public, provider_network_profile=net_provider_network_profile) #, **net)
+             #If the datacenter has a SDN controller defined and the network is of dataplane type, then create the sdn network
+             if get_sdn_controller_id(mydb, datacenter) != None and (net_type == 'data' or net_type == 'ptp'):
+                 #obtain datacenter_tenant_id
+                 datacenter_tenant_id = mydb.get_rows(SELECT=('uuid',),
+                                                      FROM='datacenter_tenants',
+                                                      WHERE={'datacenter_id': datacenter})[0]['uuid']
+                 try:
+                     sdn_network = {}
+                     sdn_network['vlan'] = net_vlan
+                     sdn_network['type'] = net_type
+                     sdn_network['name'] = net_name
+                     sdn_network['region'] = datacenter_tenant_id
+                     ovim_content  = ovim.new_network(sdn_network)
+                 except ovimException as e:
+                     logger.error("ovimException creating SDN network={} ".format(
+                         sdn_network) + str(e), exc_info=True)
+                     raise NfvoException("ovimException creating SDN network={} ".format(sdn_network) + str(e),
+                                         httperrors.Internal_Server_Error)
+                 # Save entry in in dabase mano_db in table instance_nets to stablish a dictionary  vim_net_id <->sdn_net_id
+                 # use instance_scenario_id=None to distinguish from real instaces of nets
+                 correspondence = {'instance_scenario_id': None,
+                                   'sdn_net_id': ovim_content,
+                                   'vim_net_id': content,
+                                   'datacenter_tenant_id': datacenter_tenant_id
+                                   }
+                 try:
+                     mydb.new_row('instance_nets', correspondence, add_uuid=True)
+                 except db_base_Exception as e:
+                     raise NfvoException("Error saving correspondence for VIM/SDN dataplane networks{}: {}".format(
+                         correspondence, e), e.http_code)
+         elif item=="tenants":
+             tenant = descriptor["tenant"]
+             content = myvim.new_tenant(tenant["name"], tenant.get("description"))
+         else:
+             raise NfvoException(item + "?", httperrors.Method_Not_Allowed)
+     except vimconn.vimconnException as e:
+         raise NfvoException("Not possible to create {} at VIM: {}".format(item, str(e)), e.http_code)
+     return vim_action_get(mydb, tenant_id, datacenter, item, content)
+ def sdn_controller_create(mydb, tenant_id, sdn_controller):
+     wim_id = ovim.new_of_controller(sdn_controller)
+     thread_name = get_non_used_vim_name(sdn_controller['name'], wim_id, wim_id, None)
+     new_thread = vim_thread(task_lock, plugins, thread_name, wim_id, None, db=db)
+     new_thread.start()
+     thread_id = wim_id
+     vim_threads["running"][thread_id] = new_thread
+     logger.debug('New SDN controller created with uuid {}'.format(wim_id))
+     return wim_id
+ def sdn_controller_update(mydb, tenant_id, controller_id, sdn_controller):
+     data = ovim.edit_of_controller(controller_id, sdn_controller)
+     msg = 'SDN controller {} updated'.format(data)
+     vim_threads["running"][controller_id].insert_task("reload")
+     logger.debug(msg)
+     return msg
+ def sdn_controller_list(mydb, tenant_id, controller_id=None):
+     if controller_id == None:
+         data = ovim.get_of_controllers()
+     else:
+         data = ovim.show_of_controller(controller_id)
+     msg = 'SDN controller list:\n {}'.format(data)
+     logger.debug(msg)
+     return data
+ def sdn_controller_delete(mydb, tenant_id, controller_id):
+     select_ = ('uuid', 'config')
+     datacenters = mydb.get_rows(FROM='datacenters', SELECT=select_)
+     for datacenter in datacenters:
+         if datacenter['config']:
+             config = yaml.load(datacenter['config'], Loader=yaml.Loader)
+             if 'sdn-controller' in config and config['sdn-controller'] == controller_id:
+                 raise NfvoException("SDN controller {} is in use by datacenter {}".format(controller_id, datacenter['uuid']), httperrors.Conflict)
+     data = ovim.delete_of_controller(controller_id)
+     msg = 'SDN controller {} deleted'.format(data)
+     logger.debug(msg)
+     return msg
+ def datacenter_sdn_port_mapping_set(mydb, tenant_id, datacenter_id, sdn_port_mapping):
+     controller = mydb.get_rows(FROM="datacenters", SELECT=("config",), WHERE={"uuid":datacenter_id})
+     if len(controller) < 1:
+         raise NfvoException("Datacenter {} not present in the database".format(datacenter_id), httperrors.Not_Found)
+     try:
+         sdn_controller_id = yaml.load(controller[0]["config"], Loader=yaml.Loader)["sdn-controller"]
+     except:
+         raise NfvoException("The datacenter {} has not an SDN controller associated".format(datacenter_id), httperrors.Bad_Request)
+     sdn_controller = ovim.show_of_controller(sdn_controller_id)
+     switch_dpid = sdn_controller["dpid"]
+     maps = list()
+     for compute_node in sdn_port_mapping:
+         #element = {"ofc_id": sdn_controller_id, "region": datacenter_id, "switch_dpid": switch_dpid}
+         element = dict()
+         element["compute_node"] = compute_node["compute_node"]
+         if compute_node["ports"]:
+             for port in compute_node["ports"]:
+                 pci = port.get("pci")
+                 element["switch_port"] = port.get("switch_port")
+                 element["switch_mac"] = port.get("switch_mac")
+                 if not element["switch_port"] and not element["switch_mac"]:
+                     raise NfvoException ("The mapping must contain 'switch_port' or 'switch_mac'", httperrors.Bad_Request)
+                 for pci_expanded in utils.expand_brackets(pci):
+                     element["pci"] = pci_expanded
+                     maps.append(dict(element))
+     out = ovim.set_of_port_mapping(maps, sdn_id=sdn_controller_id, switch_dpid=switch_dpid, vim_id=datacenter_id)
+     vim_threads["running"][sdn_controller_id].insert_task("reload")
+     return out
+ def datacenter_sdn_port_mapping_list(mydb, tenant_id, datacenter_id):
+     maps = ovim.get_of_port_mappings(db_filter={"datacenter_id": datacenter_id})
+     result = {
+         "sdn-controller": None,
+         "datacenter-id": datacenter_id,
+         "dpid": None,
+         "ports_mapping": list()
+     }
+     datacenter = mydb.get_table_by_uuid_name('datacenters', datacenter_id)
+     if datacenter['config']:
+         config = yaml.load(datacenter['config'], Loader=yaml.Loader)
+         if 'sdn-controller' in config:
+             controller_id = config['sdn-controller']
+             sdn_controller = sdn_controller_list(mydb, tenant_id, controller_id)
+             result["sdn-controller"] = controller_id
+             result["dpid"] = sdn_controller["dpid"]
+     if result["sdn-controller"] == None:
+         raise NfvoException("SDN controller is not defined for datacenter {}".format(datacenter_id), httperrors.Bad_Request)
+     if result["dpid"] == None:
+         raise NfvoException("It was not possible to determine DPID for SDN controller {}".format(result["sdn-controller"]),
+                         httperrors.Internal_Server_Error)
+     if len(maps) == 0:
+         return result
+     ports_correspondence_dict = dict()
+     for link in maps:
+         if result["sdn-controller"] != link["wim_id"]:
+             raise NfvoException("The sdn-controller specified for different port mappings differ", httperrors.Internal_Server_Error)
+         if result["dpid"] != link["switch_dpid"]:
+             raise NfvoException("The dpid specified for different port mappings differ", httperrors.Internal_Server_Error)
+         link_config = link["service_mapping_info"]
+         element = dict()
+         element["pci"] = link.get("device_interface_id")
+         if link["switch_port"]:
+             element["switch_port"] = link["switch_port"]
+         if link_config["switch_mac"]:
+             element["switch_mac"] = link_config.get("switch_mac")
+         if not link.get("interface_id") in ports_correspondence_dict:
+             content = dict()
+             content["compute_node"] = link.get("interface_id")
+             content["ports"] = list()
+             ports_correspondence_dict[link.get("interface_id")] = content
+         ports_correspondence_dict[link["interface_id"]]["ports"].append(element)
+     for key in sorted(ports_correspondence_dict):
+         result["ports_mapping"].append(ports_correspondence_dict[key])
+     return result
+ def datacenter_sdn_port_mapping_delete(mydb, tenant_id, datacenter_id):
+     return ovim.clear_of_port_mapping(db_filter={"datacenter_id":datacenter_id})
+ def create_RO_keypair(tenant_id):
+     """
+     Creates a public / private keys for a RO tenant and returns their values
+     Params:
+         tenant_id: ID of the tenant
+     Return:
+         public_key: Public key for the RO tenant
+         private_key: Encrypted private key for RO tenant
+     """
+     bits = 2048
+     key = RSA.generate(bits)
+     try:
+         public_key = key.publickey().exportKey('OpenSSH')
+         if isinstance(public_key, ValueError):
+             raise NfvoException("Unable to create public key: {}".format(public_key), httperrors.Internal_Server_Error)
+         private_key = key.exportKey(passphrase=tenant_id, pkcs=8)
+     except (ValueError, NameError) as e:
+         raise NfvoException("Unable to create private key: {}".format(e), httperrors.Internal_Server_Error)
+     return public_key, private_key
+ def decrypt_key (key, tenant_id):
+     """
+     Decrypts an encrypted RSA key
+     Params:
+         key: Private key to be decrypted
+         tenant_id: ID of the tenant
+     Return:
+         unencrypted_key: Unencrypted private key for RO tenant
+     """
+     try:
+         key = RSA.importKey(key,tenant_id)
+         unencrypted_key = key.exportKey('PEM')
+         if isinstance(unencrypted_key, ValueError):
+             raise NfvoException("Unable to decrypt the private key: {}".format(unencrypted_key), httperrors.Internal_Server_Error)
+     except ValueError as e:
+         raise NfvoException("Unable to decrypt the private key: {}".format(e), httperrors.Internal_Server_Error)
+     return unencrypted_key
diff --combined RO/osm_ro/vim_thread.py
index 0000000,728b659..8d397c2
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,1499 +1,1500 @@@
 -            vim_net_id, created_items = self.vim.new_network(*params[0:3])
+ # -*- coding: utf-8 -*-
+ ##
+ # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+ # This file is part of openvim
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+ """"
+ This is thread that interacts with a VIM. It processes TASKs sequentially against a single VIM.
+ The tasks are stored at database in table vim_wim_actions
+ Several vim_wim_actions can refer to the same element at VIM (flavor, network, ...). This is somethng to avoid if RO
+ is migrated to a non-relational database as mongo db. Each vim_wim_actions reference a different instance_Xxxxx
+ In this case "related" colunm contains the same value, to know they refer to the same vim. In case of deletion, it
+ there is related tasks using this element, it is not deleted, The vim_info needed to delete is transfered to other task
+ The task content is (M: stored at memory, D: stored at database):
+     MD  instance_action_id:  reference a global action over an instance-scenario: database instance_actions
+     MD  task_index:     index number of the task. This together with the previous forms a unique key identifier
+     MD  datacenter_vim_id:  should contain the uuid of the VIM managed by this thread
+     MD  vim_id:     id of the vm,net,etc at VIM
+     MD  item:       database table name, can be instance_vms, instance_nets, TODO: datacenter_flavors, datacenter_images
+     MD  item_id:    uuid of the referenced entry in the previous table
+     MD  action:     CREATE, DELETE, FIND
+     MD  status:     SCHEDULED: action need to be done
+                     BUILD: not used
+                     DONE: Done and it must be polled to VIM periodically to see status. ONLY for action=CREATE or FIND
+                     FAILED: It cannot be created/found/deleted
+                     FINISHED: similar to DONE, but no refresh is needed anymore. Task is maintained at database but
+                         it is never processed by any thread
+                     SUPERSEDED: similar to FINSISHED, but nothing has been done to completed the task.
+     MD  extra:      text with yaml format at database, dict at memory with:
+             params:     list with the params to be sent to the VIM for CREATE or FIND. For DELETE the vim_id is taken
+                         from other related tasks
+             find:       (only for CREATE tasks) if present it should FIND before creating and use if existing. Contains
+                         the FIND params
+             depends_on: list with the 'task_index'es of tasks that must be completed before. e.g. a vm creation depends
+                         on a net creation
+                         can contain an int (single index on the same instance-action) or str (compete action ID)
+             sdn_net_id: used for net.
+             interfaces: used for VMs. Each key is the uuid of the instance_interfaces entry at database
+                 iface_id: uuid of intance_interfaces
+                 sdn_port_id:
+                 sdn_net_id:
+                 vim_info
+             created_items: dictionary with extra elements created that need to be deleted. e.g. ports, volumes,...
+             created:    False if the VIM element is not created by other actions, and it should not be deleted
+             vim_status: VIM status of the element. Stored also at database in the instance_XXX
+             vim_info:   Detailed information of a vm/net from the VIM. Stored at database in the instance_XXX but not at
+                         vim_wim_actions
+     M   depends:    dict with task_index(from depends_on) to dependency task
+     M   params:     same as extra[params]
+     MD  error_msg:  descriptive text upon an error.Stored also at database instance_XXX
+     MD  created_at: task creation time. The task of creation must be the oldest
+     MD  modified_at: next time task need to be processed. For example, for a refresh, it contain next time refresh must
+                      be done
+     MD related:     All the tasks over the same VIM element have same "related". Note that other VIMs can contain the
+                     same value of related, but this thread only process those task of one VIM.  Also related can be the
+                     same among several NS os isntance-scenarios
+     MD worker:      Used to lock in case of several thread workers.
+ """
+ import threading
+ import time
+ import queue
+ import logging
+ from osm_ro import vimconn
+ from osm_ro.wim.sdnconn import SdnConnectorError
+ import yaml
+ from osm_ro.db_base import db_base_Exception
+ from http import HTTPStatus
+ from copy import deepcopy
+ __author__ = "Alfonso Tierno, Pablo Montes"
+ __date__ = "$28-Sep-2017 12:07:15$"
+ def is_task_id(task_id):
+     return task_id.startswith("TASK-")
+ class VimThreadException(Exception):
+     pass
+ class VimThreadExceptionNotFound(VimThreadException):
+     pass
+ class vim_thread(threading.Thread):
+     REFRESH_BUILD = 5  # 5 seconds
+     REFRESH_ACTIVE = 60  # 1 minute
+     REFRESH_ERROR = 600
+     REFRESH_DELETE = 3600 * 10
+     def __init__(self, task_lock, plugins, name=None, wim_account_id=None, datacenter_tenant_id=None, db=None):
+         """Init a thread.
+         Arguments:
+             'id' number of thead
+             'name' name of thread
+             'host','user':  host ip or name to manage and user
+             'db', 'db_lock': database class and lock to use it in exclusion
+         """
+         threading.Thread.__init__(self)
+         self.plugins = plugins
+         self.plugin_name = "unknown"
+         self.vim = None
+         self.sdnconnector = None
+         self.sdnconn_config = None
+         self.error_status = None
+         self.wim_account_id = wim_account_id
+         self.datacenter_tenant_id = datacenter_tenant_id
+         self.port_mapping = None
+         if self.wim_account_id:
+             self.target_k = "wim_account_id"
+             self.target_v = self.wim_account_id
+         else:
+             self.target_k = "datacenter_vim_id"
+             self.target_v = self.datacenter_tenant_id
+         if not name:
+             self.name = wim_account_id or str(datacenter_tenant_id)
+         else:
+             self.name = name
+         self.vim_persistent_info = {}
+         self.my_id = self.name[:64]
+         self.logger = logging.getLogger('openmano.{}.{}'.format("vim" if self.datacenter_tenant_id else "sdn",
+                                                                 self.name))
+         self.db = db
+         self.task_lock = task_lock
+         self.task_queue = queue.Queue(2000)
+     def _proccess_sdn_exception(self, exc):
+         if isinstance(exc, SdnConnectorError):
+             raise
+         else:
+             self.logger.error("plugin={} throws a non SdnConnectorError exception {}".format(self.plugin_name, exc),
+                               exc_info=True)
+             raise SdnConnectorError(str(exc), http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value) from exc
+     def _proccess_vim_exception(self, exc):
+         if isinstance(exc, vimconn.vimconnException):
+             raise
+         else:
+             self.logger.error("plugin={} throws a non vimconnException exception {}".format(self.plugin_name, exc),
+                               exc_info=True)
+             raise vimconn.vimconnException(str(exc), http_code=HTTPStatus.INTERNAL_SERVER_ERROR.value) from exc
+     def get_vim_sdn_connector(self):
+         if self.datacenter_tenant_id:
+             try:
+                 from_ = "datacenter_tenants as dt join datacenters as d on dt.datacenter_id=d.uuid"
+                 select_ = ('type', 'd.config as config', 'd.uuid as datacenter_id', 'vim_url', 'vim_url_admin',
+                            'd.name as datacenter_name', 'dt.uuid as datacenter_tenant_id',
+                            'dt.vim_tenant_name as vim_tenant_name', 'dt.vim_tenant_id as vim_tenant_id',
+                            'user', 'passwd', 'dt.config as dt_config')
+                 where_ = {"dt.uuid": self.datacenter_tenant_id}
+                 vims = self.db.get_rows(FROM=from_, SELECT=select_, WHERE=where_)
+                 vim = vims[0]
+                 vim_config = {}
+                 if vim["config"]:
+                     vim_config.update(yaml.load(vim["config"], Loader=yaml.Loader))
+                 if vim["dt_config"]:
+                     vim_config.update(yaml.load(vim["dt_config"], Loader=yaml.Loader))
+                 vim_config['datacenter_tenant_id'] = vim.get('datacenter_tenant_id')
+                 vim_config['datacenter_id'] = vim.get('datacenter_id')
+                 # get port_mapping
+                 # vim_port_mappings = self.ovim.get_of_port_mappings(
+                 #     db_filter={"datacenter_id": vim_config['datacenter_id']})
+                 # vim_config["wim_external_ports"] = [x for x in vim_port_mappings
+                 #                                     if x["service_mapping_info"].get("wim")]
+                 self.plugin_name = "rovim_" + vim["type"]
+                 self.vim = self.plugins[self.plugin_name].vimconnector(
+                     uuid=vim['datacenter_id'], name=vim['datacenter_name'],
+                     tenant_id=vim['vim_tenant_id'], tenant_name=vim['vim_tenant_name'],
+                     url=vim['vim_url'], url_admin=vim['vim_url_admin'],
+                     user=vim['user'], passwd=vim['passwd'],
+                     config=vim_config, persistent_info=self.vim_persistent_info
+                 )
+                 self.error_status = None
+                 self.logger.info("Vim Connector loaded for vim_account={}, plugin={}".format(
+                     self.datacenter_tenant_id, self.plugin_name))
+             except Exception as e:
+                 self.logger.error("Cannot load vimconnector for vim_account={} plugin={}: {}".format(
+                     self.datacenter_tenant_id, self.plugin_name, e))
+                 self.vim = None
+                 self.error_status = "Error loading vimconnector: {}".format(e)
+         else:
+             try:
+                 wim_account = self.db.get_rows(FROM="wim_accounts", WHERE={"uuid": self.wim_account_id})[0]
+                 wim = self.db.get_rows(FROM="wims", WHERE={"uuid": wim_account["wim_id"]})[0]
+                 if wim["config"]:
+                     self.sdnconn_config = yaml.load(wim["config"], Loader=yaml.Loader)
+                 else:
+                     self.sdnconn_config = {}
+                 if wim_account["config"]:
+                     self.sdnconn_config.update(yaml.load(wim_account["config"], Loader=yaml.Loader))
+                 self.port_mappings = self.db.get_rows(FROM="wim_port_mappings", WHERE={"wim_id": wim_account["wim_id"]})
+                 if self.port_mappings:
+                     self.sdnconn_config["service_endpoint_mapping"] = self.port_mappings
+                 self.plugin_name = "rosdn_" + wim["type"]
+                 self.sdnconnector = self.plugins[self.plugin_name](
+                     wim, wim_account, config=self.sdnconn_config)
+                 self.error_status = None
+                 self.logger.info("Sdn Connector loaded for wim_account={}, plugin={}".format(
+                     self.wim_account_id, self.plugin_name))
+             except Exception as e:
+                 self.logger.error("Cannot load sdn connector for wim_account={}, plugin={}: {}".format(
+                     self.wim_account_id, self.plugin_name, e))
+                 self.sdnconnector = None
+                 self.error_status = "Error loading sdn connector: {}".format(e)
+     def _get_db_task(self):
+         """
+         Read actions from database and reload them at memory. Fill self.refresh_list, pending_list, vim_actions
+         :return: None
+         """
+         now = time.time()
+         try:
+             database_limit = 20
+             task_related = None
+             while True:
+                 # get 20 (database_limit) entries each time
+                 vim_actions = self.db.get_rows(FROM="vim_wim_actions",
+                                                WHERE={self.target_k: self.target_v,
+                                                       "status": ['SCHEDULED', 'BUILD', 'DONE'],
+                                                       "worker": [None, self.my_id], "modified_at<=": now
+                                                       },
+                                                ORDER_BY=("modified_at", "created_at",),
+                                                LIMIT=database_limit)
+                 if not vim_actions:
+                     return None, None
+                 # if vim_actions[0]["modified_at"] > now:
+                 #     return int(vim_actions[0] - now)
+                 for task in vim_actions:
+                     # block related task
+                     if task_related == task["related"]:
+                         continue  # ignore if a locking has already tried for these task set
+                     task_related = task["related"]
+                     # lock ...
+                     self.db.update_rows("vim_wim_actions", UPDATE={"worker": self.my_id}, modified_time=0,
+                                         WHERE={self.target_k: self.target_v,
+                                                "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+                                                "worker": [None, self.my_id],
+                                                "related": task_related,
+                                                "item": task["item"],
+                                                })
+                     # ... and read all related and check if locked
+                     related_tasks = self.db.get_rows(FROM="vim_wim_actions",
+                                                      WHERE={self.target_k: self.target_v,
+                                                             "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+                                                             "related": task_related,
+                                                             "item": task["item"],
+                                                             },
+                                                      ORDER_BY=("created_at",))
+                     # check that all related tasks have been locked. If not release and try again. It can happen
+                     # for race conditions if a new related task has been inserted by nfvo in the process
+                     some_tasks_locked = False
+                     some_tasks_not_locked = False
+                     creation_task = None
+                     for relate_task in related_tasks:
+                         if relate_task["worker"] != self.my_id:
+                             some_tasks_not_locked = True
+                         else:
+                             some_tasks_locked = True
+                         if not creation_task and relate_task["action"] in ("CREATE", "FIND"):
+                             creation_task = relate_task
+                     if some_tasks_not_locked:
+                         if some_tasks_locked:  # unlock
+                             self.db.update_rows("vim_wim_actions", UPDATE={"worker": None}, modified_time=0,
+                                                 WHERE={self.target_k: self.target_v,
+                                                        "worker": self.my_id,
+                                                        "related": task_related,
+                                                        "item": task["item"],
+                                                        })
+                         continue
+                     # task of creation must be the first in the list of related_task
+                     assert(related_tasks[0]["action"] in ("CREATE", "FIND"))
+                     task["params"] = None
+                     if task["extra"]:
+                         extra = yaml.load(task["extra"], Loader=yaml.Loader)
+                     else:
+                         extra = {}
+                     task["extra"] = extra
+                     if extra.get("depends_on"):
+                         task["depends"] = {}
+                     if extra.get("params"):
+                         task["params"] = deepcopy(extra["params"])
+                     return task, related_tasks
+         except Exception as e:
+             self.logger.critical("Unexpected exception at _get_db_task: " + str(e), exc_info=True)
+             return None, None
+     def _delete_task(self, task):
+         """
+         Determine if this task need to be done or superseded
+         :return: None
+         """
+         def copy_extra_created(copy_to, copy_from):
+             copy_to["created"] = copy_from["created"]
+             if copy_from.get("sdn_net_id"):
+                 copy_to["sdn_net_id"] = copy_from["sdn_net_id"]
+             if copy_from.get("interfaces"):
+                 copy_to["interfaces"] = copy_from["interfaces"]
+             if copy_from.get("created_items"):
+                 if not copy_to.get("created_items"):
+                     copy_to["created_items"] = {}
+                 copy_to["created_items"].update(copy_from["created_items"])
+         task_create = None
+         dependency_task = None
+         deletion_needed = False
+         if task["status"] == "FAILED":
+             return   # TODO need to be retry??
+         try:
+             # get all related tasks
+             related_tasks = self.db.get_rows(FROM="vim_wim_actions",
+                                              WHERE={self.target_k: self.target_v,
+                                                     "status": ['SCHEDULED', 'BUILD', 'DONE', 'FAILED'],
+                                                     "action": ["FIND", "CREATE"],
+                                                     "related": task["related"],
+                                                     },
+                                              ORDER_BY=("created_at",),
+                                              )
+             for related_task in related_tasks:
+                 if related_task["item"] == task["item"] and related_task["item_id"] == task["item_id"]:
+                     task_create = related_task
+                     # TASK_CREATE
+                     if related_task["extra"]:
+                         extra_created = yaml.load(related_task["extra"], Loader=yaml.Loader)
+                         if extra_created.get("created"):
+                             deletion_needed = True
+                         related_task["extra"] = extra_created
+                 elif not dependency_task:
+                     dependency_task = related_task
+                 if task_create and dependency_task:
+                     break
+             # mark task_create as FINISHED
+             self.db.update_rows("vim_wim_actions", UPDATE={"status": "FINISHED"},
+                                 WHERE={self.target_k: self.target_v,
+                                        "instance_action_id": task_create["instance_action_id"],
+                                        "task_index": task_create["task_index"]
+                                        })
+             if not deletion_needed:
+                 return
+             elif dependency_task:
+                 # move create information  from task_create to relate_task
+                 extra_new_created = yaml.load(dependency_task["extra"], Loader=yaml.Loader) or {}
+                 extra_new_created["created"] = extra_created["created"]
+                 copy_extra_created(copy_to=extra_new_created, copy_from=extra_created)
+                 self.db.update_rows("vim_wim_actions",
+                                     UPDATE={"extra": yaml.safe_dump(extra_new_created, default_flow_style=True,
+                                                                     width=256),
+                                             "vim_id": task_create.get("vim_id")},
+                                     WHERE={self.target_k: self.target_v,
+                                            "instance_action_id": dependency_task["instance_action_id"],
+                                            "task_index": dependency_task["task_index"]
+                                            })
+                 return False
+             else:
+                 task["vim_id"] = task_create["vim_id"]
+                 copy_extra_created(copy_to=task["extra"], copy_from=task_create["extra"])
+                 return True
+         except Exception as e:
+             self.logger.critical("Unexpected exception at _delete_task: " + str(e), exc_info=True)
+     def _refres_vm(self, task):
+         """Call VIM to get VMs status"""
+         database_update = None
+         vim_id = task["vim_id"]
+         vm_to_refresh_list = [vim_id]
+         try:
+             vim_dict = self.vim.refresh_vms_status(vm_to_refresh_list)
+             vim_info = vim_dict[vim_id]
+         except vimconn.vimconnException as e:
+             # Mark all tasks at VIM_ERROR status
+             self.logger.error("task=several get-VM: vimconnException when trying to refresh vms " + str(e))
+             vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+         task_id = task["instance_action_id"] + "." + str(task["task_index"])
+         self.logger.debug("task={} get-VM: vim_vm_id={} result={}".format(task_id, task["vim_id"], vim_info))
+         # check and update interfaces
+         task_warning_msg = ""
+         for interface in vim_info.get("interfaces", ()):
+             vim_interface_id = interface["vim_interface_id"]
+             if vim_interface_id not in task["extra"]["interfaces"]:
+                 self.logger.critical("task={} get-VM: Interface not found {} on task info {}".format(
+                     task_id, vim_interface_id, task["extra"]["interfaces"]), exc_info=True)
+                 continue
+             task_interface = task["extra"]["interfaces"][vim_interface_id]
+             task_vim_interface = task_interface.get("vim_info")
+             if task_vim_interface != interface:
+                 # delete old port
+                 # if task_interface.get("sdn_port_id"):
+                 #     try:
+                 #         self.ovim.delete_port(task_interface["sdn_port_id"], idempotent=True)
+                 #         task_interface["sdn_port_id"] = None
+                 #     except ovimException as e:
+                 #         error_text = "ovimException deleting external_port={}: {}".format(
+                 #             task_interface["sdn_port_id"], e)
+                 #         self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+                 #         task_warning_msg += error_text
+                 #         # TODO Set error_msg at instance_nets instead of instance VMs
+                 # Create SDN port
+                 # sdn_net_id = task_interface.get("sdn_net_id")
+                 # if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
+                 #     sdn_port_name = sdn_net_id + "." + task["vim_id"]
+                 #     sdn_port_name = sdn_port_name[:63]
+                 #     try:
+                 #         sdn_port_id = self.ovim.new_external_port(
+                 #             {"compute_node": interface["compute_node"],
+                 #                 "pci": interface["pci"],
+                 #                 "vlan": interface.get("vlan"),
+                 #                 "net_id": sdn_net_id,
+                 #                 "region": self.vim["config"]["datacenter_id"],
+                 #                 "name": sdn_port_name,
+                 #                 "mac": interface.get("mac_address")})
+                 #         task_interface["sdn_port_id"] = sdn_port_id
+                 #     except (ovimException, Exception) as e:
+                 #         error_text = "ovimException creating new_external_port compute_node={} pci={} vlan={} {}".\
+                 #             format(interface["compute_node"], interface["pci"], interface.get("vlan"), e)
+                 #         self.logger.error("task={} get-VM: {}".format(task_id, error_text), exc_info=True)
+                 #         task_warning_msg += error_text
+                 #         # TODO Set error_msg at instance_nets instead of instance VMs
+                 self.db.update_rows('instance_interfaces',
+                                     UPDATE={"mac_address": interface.get("mac_address"),
+                                             "ip_address": interface.get("ip_address"),
+                                             "vim_interface_id": interface.get("vim_interface_id"),
+                                             "vim_info": interface.get("vim_info"),
+                                             "sdn_port_id": task_interface.get("sdn_port_id"),
+                                             "compute_node": interface.get("compute_node"),
+                                             "pci": interface.get("pci"),
+                                             "vlan": interface.get("vlan")},
+                                     WHERE={'uuid': task_interface["iface_id"]})
+                 task_interface["vim_info"] = interface
+                 # if sdn_net_id and interface.get("compute_node") and interface.get("pci"):
+                 # # TODO Send message to task SDN to update
+         # check and update task and instance_vms database
+         vim_info_error_msg = None
+         if vim_info.get("error_msg"):
+             vim_info_error_msg = self._format_vim_error_msg(vim_info["error_msg"] + task_warning_msg)
+         elif task_warning_msg:
+             vim_info_error_msg = self._format_vim_error_msg(task_warning_msg)
+         task_vim_info = task["extra"].get("vim_info")
+         task_error_msg = task.get("error_msg")
+         task_vim_status = task["extra"].get("vim_status")
+         if task_vim_status != vim_info["status"] or task_error_msg != vim_info_error_msg or \
+                 (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+             database_update = {"status": vim_info["status"], "error_msg": vim_info_error_msg}
+             if vim_info.get("vim_info"):
+                 database_update["vim_info"] = vim_info["vim_info"]
+             task["extra"]["vim_status"] = vim_info["status"]
+             task["error_msg"] = vim_info_error_msg
+             if vim_info.get("vim_info"):
+                 task["extra"]["vim_info"] = vim_info["vim_info"]
+         return database_update
+     def _refres_net(self, task):
+         """Call VIM to get network status"""
+         database_update = None
+         vim_id = task["vim_id"]
+         net_to_refresh_list = [vim_id]
+         try:
+             vim_dict = self.vim.refresh_nets_status(net_to_refresh_list)
+             vim_info = vim_dict[vim_id]
+         except vimconn.vimconnException as e:
+             # Mark all tasks at VIM_ERROR status
+             self.logger.error("task=several get-net: vimconnException when trying to refresh nets " + str(e))
+             vim_info = {"status": "VIM_ERROR", "error_msg": str(e)}
+         task_id = task["instance_action_id"] + "." + str(task["task_index"])
+         self.logger.debug("task={} get-net: vim_net_id={} result={}".format(task_id, task["vim_id"], vim_info))
+         task_vim_info = task["extra"].get("vim_info")
+         task_vim_status = task["extra"].get("vim_status")
+         task_error_msg = task.get("error_msg")
+         # task_sdn_net_id = task["extra"].get("sdn_net_id")
+         vim_info_status = vim_info["status"]
+         vim_info_error_msg = vim_info.get("error_msg")
+         # get ovim status
+         # if task_sdn_net_id:
+         #     try:
+         #         sdn_net = self.ovim.show_network(task_sdn_net_id)
+         #     except (ovimException, Exception) as e:
+         #         text_error = "ovimException getting network snd_net_id={}: {}".format(task_sdn_net_id, e)
+         #         self.logger.error("task={} get-net: {}".format(task_id, text_error), exc_info=True)
+         #         sdn_net = {"status": "ERROR", "last_error": text_error}
+         #     if sdn_net["status"] == "ERROR":
+         #         if not vim_info_error_msg:
+         #             vim_info_error_msg = str(sdn_net.get("last_error"))
+         #         else:
+         #             vim_info_error_msg = "VIM_ERROR: {} && SDN_ERROR: {}".format(
+         #                 self._format_vim_error_msg(vim_info_error_msg, 1024 // 2 - 14),
+         #                 self._format_vim_error_msg(sdn_net["last_error"], 1024 // 2 - 14))
+         #         vim_info_status = "ERROR"
+         #     elif sdn_net["status"] == "BUILD":
+         #         if vim_info_status == "ACTIVE":
+         #             vim_info_status = "BUILD"
+         # update database
+         if vim_info_error_msg:
+             vim_info_error_msg = self._format_vim_error_msg(vim_info_error_msg)
+         if task_vim_status != vim_info_status or task_error_msg != vim_info_error_msg or \
+                 (vim_info.get("vim_info") and task_vim_info != vim_info["vim_info"]):
+             task["extra"]["vim_status"] = vim_info_status
+             task["error_msg"] = vim_info_error_msg
+             if vim_info.get("vim_info"):
+                 task["extra"]["vim_info"] = vim_info["vim_info"]
+             database_update = {"status": vim_info_status, "error_msg": vim_info_error_msg}
+             if vim_info.get("vim_info"):
+                 database_update["vim_info"] = vim_info["vim_info"]
+         return database_update
+     def _proccess_pending_tasks(self, task, related_tasks):
+         old_task_status = task["status"]
+         create_or_find = False   # if as result of processing this task something is created or found
+         next_refresh = 0
+         try:
+             if task["status"] == "SCHEDULED":
+                 # check if tasks that this depends on have been completed
+                 dependency_not_completed = False
+                 dependency_modified_at = 0
+                 for task_index in task["extra"].get("depends_on", ()):
+                     task_dependency = self._look_for_task(task["instance_action_id"], task_index)
+                     if not task_dependency:
+                         raise VimThreadException(
+                             "Cannot get depending net task trying to get depending task {}.{}".format(
+                                 task["instance_action_id"], task_index))
+                     # task["depends"]["TASK-" + str(task_index)] = task_dependency #it references another object,so
+                     # database must be look again
+                     if task_dependency["status"] == "SCHEDULED":
+                         dependency_not_completed = True
+                         dependency_modified_at = task_dependency["modified_at"]
+                         break
+                     elif task_dependency["status"] == "FAILED":
+                         raise VimThreadException(
+                             "Cannot {} {}, (task {}.{}) because depends on failed {}.{}, (task{}.{}): {}".format(
+                                 task["action"], task["item"],
+                                 task["instance_action_id"], task["task_index"],
+                                 task_dependency["instance_action_id"], task_dependency["task_index"],
+                                 task_dependency["action"], task_dependency["item"], task_dependency.get("error_msg")))
+                     task["depends"]["TASK-"+str(task_index)] = task_dependency
+                     task["depends"]["TASK-{}.{}".format(task["instance_action_id"], task_index)] = task_dependency
+                 if dependency_not_completed:
+                     # Move this task to the time dependency is going to be modified plus 10 seconds.
+                     self.db.update_rows("vim_wim_actions", modified_time=dependency_modified_at + 10,
+                                         UPDATE={"worker": None},
+                                         WHERE={self.target_k: self.target_v, "worker": self.my_id,
+                                                "related": task["related"],
+                                                })
+                     # task["extra"]["tries"] = task["extra"].get("tries", 0) + 1
+                     # if task["extra"]["tries"] > 3:
+                     #     raise VimThreadException(
+                     #         "Cannot {} {}, (task {}.{}) because timeout waiting to complete {} {}, "
+                     #         "(task {}.{})".format(task["action"], task["item"],
+                     #                               task["instance_action_id"], task["task_index"],
+                     #                               task_dependency["instance_action_id"], task_dependency["task_index"]
+                     #                               task_dependency["action"], task_dependency["item"]))
+                     return
+             database_update = None
+             if task["action"] == "DELETE":
+                 deleted_needed = self._delete_task(task)
+                 if not deleted_needed:
+                     task["status"] = "SUPERSEDED"  # with FINISHED instead of DONE it will not be refreshing
+                     task["error_msg"] = None
+             if task["status"] == "SUPERSEDED":
+                 # not needed to do anything but update database with the new status
+                 database_update = None
+             elif not self.vim and not self.sdnconnector:
+                 task["status"] = "FAILED"
+                 task["error_msg"] = self.error_status
+                 database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
+             elif task["item_id"] != related_tasks[0]["item_id"] and task["action"] in ("FIND", "CREATE"):
+                 # Do nothing, just copy values from one to another and updata database
+                 task["status"] = related_tasks[0]["status"]
+                 task["error_msg"] = related_tasks[0]["error_msg"]
+                 task["vim_id"] = related_tasks[0]["vim_id"]
+                 extra = yaml.load(related_tasks[0]["extra"], Loader=yaml.Loader)
+                 task["extra"]["vim_status"] = extra.get("vim_status")
+                 next_refresh = related_tasks[0]["modified_at"] + 0.001
+                 database_update = {"status": task["extra"].get("vim_status", "VIM_ERROR"),
+                                    "error_msg": task["error_msg"]}
+                 if task["item"] == 'instance_vms':
+                     database_update["vim_vm_id"] = task["vim_id"]
+                 elif task["item"] == 'instance_nets':
+                     database_update["vim_net_id"] = task["vim_id"]
+             elif task["item"] == 'instance_vms':
+                 if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+                     database_update = self._refres_vm(task)
+                     create_or_find = True
+                 elif task["action"] == "CREATE":
+                     create_or_find = True
+                     database_update = self.new_vm(task)
+                 elif task["action"] == "DELETE":
+                     self.del_vm(task)
+                 else:
+                     raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+             elif task["item"] == 'instance_nets':
+                 if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+                     database_update = self._refres_net(task)
+                     create_or_find = True
+                 elif task["action"] == "CREATE":
+                     create_or_find = True
+                     database_update = self.new_net(task)
+                 elif task["action"] == "DELETE":
+                     self.del_net(task)
+                 elif task["action"] == "FIND":
+                     database_update = self.get_net(task)
+                 else:
+                     raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+             elif task["item"] == 'instance_wim_nets':
+                 if task["status"] in ('BUILD', 'DONE') and task["action"] in ("FIND", "CREATE"):
+                     database_update = self.new_or_update_sdn_net(task)
+                     create_or_find = True
+                 elif task["action"] == "CREATE":
+                     create_or_find = True
+                     database_update = self.new_or_update_sdn_net(task)
+                 elif task["action"] == "DELETE":
+                     self.del_sdn_net(task)
+                 elif task["action"] == "FIND":
+                     database_update = self.get_sdn_net(task)
+                 else:
+                     raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+             elif task["item"] == 'instance_sfis':
+                 if task["action"] == "CREATE":
+                     create_or_find = True
+                     database_update = self.new_sfi(task)
+                 elif task["action"] == "DELETE":
+                     self.del_sfi(task)
+                 else:
+                     raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+             elif task["item"] == 'instance_sfs':
+                 if task["action"] == "CREATE":
+                     create_or_find = True
+                     database_update = self.new_sf(task)
+                 elif task["action"] == "DELETE":
+                     self.del_sf(task)
+                 else:
+                     raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+             elif task["item"] == 'instance_classifications':
+                 if task["action"] == "CREATE":
+                     create_or_find = True
+                     database_update = self.new_classification(task)
+                 elif task["action"] == "DELETE":
+                     self.del_classification(task)
+                 else:
+                     raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+             elif task["item"] == 'instance_sfps':
+                 if task["action"] == "CREATE":
+                     create_or_find = True
+                     database_update = self.new_sfp(task)
+                 elif task["action"] == "DELETE":
+                     self.del_sfp(task)
+                 else:
+                     raise vimconn.vimconnException(self.name + "unknown task action {}".format(task["action"]))
+             else:
+                 raise vimconn.vimconnException(self.name + "unknown task item {}".format(task["item"]))
+                 # TODO
+         except VimThreadException as e:
+             task["error_msg"] = str(e)
+             task["status"] = "FAILED"
+             database_update = {"status": "VIM_ERROR", "error_msg": task["error_msg"]}
+             if task["item"] == 'instance_vms':
+                 database_update["vim_vm_id"] = None
+             elif task["item"] == 'instance_nets':
+                 database_update["vim_net_id"] = None
+         task_id = task["instance_action_id"] + "." + str(task["task_index"])
+         self.logger.debug("task={} item={} action={} result={}:'{}' params={}".format(
+             task_id, task["item"], task["action"], task["status"],
+             task["vim_id"] if task["status"] == "DONE" else task.get("error_msg"), task["params"]))
+         try:
+             if not next_refresh:
+                 if task["status"] == "DONE":
+                     next_refresh = time.time()
+                     if task["extra"].get("vim_status") == "BUILD":
+                         next_refresh += self.REFRESH_BUILD
+                     elif task["extra"].get("vim_status") in ("ERROR", "VIM_ERROR"):
+                         next_refresh += self.REFRESH_ERROR
+                     elif task["extra"].get("vim_status") == "DELETED":
+                         next_refresh += self.REFRESH_DELETE
+                     else:
+                         next_refresh += self.REFRESH_ACTIVE
+                 elif task["status"] == "FAILED":
+                     next_refresh = time.time() + self.REFRESH_DELETE
+             if create_or_find:
+                 # modify all related task with action FIND/CREATED non SCHEDULED
+                 self.db.update_rows(
+                     table="vim_wim_actions", modified_time=next_refresh + 0.001,
+                     UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
+                             "error_msg": task["error_msg"],
+                             },
+                     WHERE={self.target_k: self.target_v,
+                            "worker": self.my_id,
+                            "action": ["FIND", "CREATE"],
+                            "related": task["related"],
+                            "status<>": "SCHEDULED",
+                            })
+             # modify own task
+             self.db.update_rows(
+                 table="vim_wim_actions", modified_time=next_refresh,
+                 UPDATE={"status": task["status"], "vim_id": task.get("vim_id"),
+                         "error_msg": task["error_msg"],
+                         "extra": yaml.safe_dump(task["extra"], default_flow_style=True, width=256)},
+                 WHERE={"instance_action_id": task["instance_action_id"], "task_index": task["task_index"]})
+             # Unlock tasks
+             self.db.update_rows(
+                 table="vim_wim_actions", modified_time=0,
+                 UPDATE={"worker": None},
+                 WHERE={self.target_k: self.target_v,
+                        "worker": self.my_id,
+                        "related": task["related"],
+                        })
+             # Update table instance_actions
+             if old_task_status == "SCHEDULED" and task["status"] != old_task_status:
+                 self.db.update_rows(
+                     table="instance_actions",
+                     UPDATE={("number_failed" if task["status"] == "FAILED" else "number_done"): {"INCREMENT": 1}},
+                     WHERE={"uuid": task["instance_action_id"]})
+             if database_update:
+                 where_filter = {"related": task["related"]}
+                 if task["item"] == "instance_nets" and task["datacenter_vim_id"]:
+                     where_filter["datacenter_tenant_id"] = task["datacenter_vim_id"] 
+                 self.db.update_rows(table=task["item"],
+                                     UPDATE=database_update,
+                                     WHERE=where_filter)
+         except db_base_Exception as e:
+             self.logger.error("task={} Error updating database {}".format(task_id, e), exc_info=True)
+     def insert_task(self, task):
+         try:
+             self.task_queue.put(task, False)
+             return None
+         except queue.Full:
+             raise vimconn.vimconnException(self.name + ": timeout inserting a task")
+     def del_task(self, task):
+         with self.task_lock:
+             if task["status"] == "SCHEDULED":
+                 task["status"] = "SUPERSEDED"
+                 return True
+             else:  # task["status"] == "processing"
+                 self.task_lock.release()
+                 return False
+     def run(self):
+         self.logger.debug("Starting")
+         while True:
+             self.get_vim_sdn_connector()
+             self.logger.debug("Vimconnector loaded")
+             reload_thread = False
+             while True:
+                 try:
+                     while not self.task_queue.empty():
+                         task = self.task_queue.get()
+                         if isinstance(task, list):
+                             pass
+                         elif isinstance(task, str):
+                             if task == 'exit':
+                                 return 0
+                             elif task == 'reload':
+                                 reload_thread = True
+                                 break
+                         self.task_queue.task_done()
+                     if reload_thread:
+                         break
+                     task, related_tasks = self._get_db_task()
+                     if task:
+                         self._proccess_pending_tasks(task, related_tasks)
+                     else:
+                         time.sleep(5)
+                 except Exception as e:
+                     self.logger.critical("Unexpected exception at run: " + str(e), exc_info=True)
+         self.logger.debug("Finishing")
+     def _look_for_task(self, instance_action_id, task_id):
+         """
+         Look for a concrete task at vim_actions database table
+         :param instance_action_id: The instance_action_id
+         :param task_id: Can have several formats:
+             <task index>: integer
+             TASK-<task index> :backward compatibility,
+             [TASK-]<instance_action_id>.<task index>: this instance_action_id overrides the one in the parameter
+         :return: Task dictionary or None if not found
+         """
+         if isinstance(task_id, int):
+             task_index = task_id
+         else:
+             if task_id.startswith("TASK-"):
+                 task_id = task_id[5:]
+             ins_action_id, _, task_index = task_id.rpartition(".")
+             if ins_action_id:
+                 instance_action_id = ins_action_id
+         tasks = self.db.get_rows(FROM="vim_wim_actions", WHERE={"instance_action_id": instance_action_id,
+                                                                 "task_index": task_index})
+         if not tasks:
+             return None
+         task = tasks[0]
+         task["params"] = None
+         task["depends"] = {}
+         if task["extra"]:
+             extra = yaml.load(task["extra"], Loader=yaml.Loader)
+             task["extra"] = extra
+             task["params"] = extra.get("params")
+         else:
+             task["extra"] = {}
+         return task
+     @staticmethod
+     def _format_vim_error_msg(error_text, max_length=1024):
+         if error_text and len(error_text) >= max_length:
+             return error_text[:max_length // 2 - 3] + " ... " + error_text[-max_length // 2 + 3:]
+         return error_text
+     def new_vm(self, task):
+         task_id = task["instance_action_id"] + "." + str(task["task_index"])
+         try:
+             params = task["params"]
+             depends = task.get("depends")
+             net_list = params[5]
+             for net in net_list:
+                 if "net_id" in net and is_task_id(net["net_id"]):  # change task_id into network_id
+                     network_id = task["depends"][net["net_id"]].get("vim_id")
+                     if not network_id:
+                         raise VimThreadException(
+                             "Cannot create VM because depends on a network not created or found: " +
+                             str(depends[net["net_id"]]["error_msg"]))
+                     net["net_id"] = network_id
+             params_copy = deepcopy(params)
+             vim_vm_id, created_items = self.vim.new_vminstance(*params_copy)
+             # fill task_interfaces. Look for snd_net_id at database for each interface
+             task_interfaces = {}
+             for iface in params_copy[5]:
+                 task_interfaces[iface["vim_id"]] = {"iface_id": iface["uuid"]}
+                 result = self.db.get_rows(
+                     SELECT=('sdn_net_id', 'interface_id'),
+                     FROM='instance_nets as ine join instance_interfaces as ii on ii.instance_net_id=ine.uuid',
+                     WHERE={'ii.uuid': iface["uuid"]})
+                 if result:
+                     task_interfaces[iface["vim_id"]]["sdn_net_id"] = result[0]['sdn_net_id']
+                     task_interfaces[iface["vim_id"]]["interface_id"] = result[0]['interface_id']
+                 else:
+                     self.logger.critical("task={} new-VM: instance_nets uuid={} not found at DB".format(task_id,
+                                                                                                         iface["uuid"]),
+                                          exc_info=True)
+             task["vim_info"] = {}
+             task["extra"]["interfaces"] = task_interfaces
+             task["extra"]["created"] = True
+             task["extra"]["created_items"] = created_items
+             task["extra"]["vim_status"] = "BUILD"
+             task["error_msg"] = None
+             task["status"] = "DONE"
+             task["vim_id"] = vim_vm_id
+             instance_element_update = {"status": "BUILD", "vim_vm_id": vim_vm_id, "error_msg": None}
+             return instance_element_update
+         except (vimconn.vimconnException, VimThreadException) as e:
+             self.logger.error("task={} new-VM: {}".format(task_id, e))
+             error_text = self._format_vim_error_msg(str(e))
+             task["error_msg"] = error_text
+             task["status"] = "FAILED"
+             task["vim_id"] = None
+             instance_element_update = {"status": "VIM_ERROR", "vim_vm_id": None, "error_msg": error_text}
+             return instance_element_update
+     def del_vm(self, task):
+         # task_id = task["instance_action_id"] + "." + str(task["task_index"])
+         vm_vim_id = task["vim_id"]
+         # interfaces = task["extra"].get("interfaces", ())
+         try:
+             # for iface in interfaces.values():
+             #     if iface.get("sdn_port_id"):
+             #         try:
+             #             self.ovim.delete_port(iface["sdn_port_id"], idempotent=True)
+             #         except ovimException as e:
+             #             self.logger.error("task={} del-VM: ovimException when deleting external_port={}: {} ".format(
+             #                 task_id, iface["sdn_port_id"], e), exc_info=True)
+             #             # TODO Set error_msg at instance_nets
+             self.vim.delete_vminstance(vm_vim_id, task["extra"].get("created_items"))
+             task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+             task["error_msg"] = None
+             return None
+         except vimconn.vimconnException as e:
+             task["error_msg"] = self._format_vim_error_msg(str(e))
+             if isinstance(e, vimconn.vimconnNotFoundException):
+                 # If not found mark as Done and fill error_msg
+                 task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                 return None
+             task["status"] = "FAILED"
+             return None
+     def _get_net_internal(self, task, filter_param):
+         """
+         Common code for get_net and new_net. It looks for a network on VIM with the filter_params
+         :param task: task for this find or find-or-create action
+         :param filter_param: parameters to send to the vimconnector
+         :return: a dict with the content to update the instance_nets database table. Raises an exception on error, or
+             when network is not found or found more than one
+         """
+         vim_nets = self.vim.get_network_list(filter_param)
+         if not vim_nets:
+             raise VimThreadExceptionNotFound("Network not found with this criteria: '{}'".format(filter_param))
+         elif len(vim_nets) > 1:
+             raise VimThreadException("More than one network found with this criteria: '{}'".format(filter_param))
+         vim_net_id = vim_nets[0]["id"]
+         # Discover if this network is managed by a sdn controller
+         sdn_net_id = None
+         result = self.db.get_rows(SELECT=('sdn_net_id',), FROM='instance_nets',
+                                   WHERE={'vim_net_id': vim_net_id, 'datacenter_tenant_id': self.datacenter_tenant_id},
+                                   ORDER="instance_scenario_id")
+         if result:
+             sdn_net_id = result[0]['sdn_net_id']
+         task["status"] = "DONE"
+         task["extra"]["vim_info"] = {}
+         task["extra"]["created"] = False
+         task["extra"]["vim_status"] = "BUILD"
+         task["extra"]["sdn_net_id"] = sdn_net_id
+         task["error_msg"] = None
+         task["vim_id"] = vim_net_id
+         instance_element_update = {"vim_net_id": vim_net_id, "created": False, "status": "BUILD",
+                                    "error_msg": None, "sdn_net_id": sdn_net_id}
+         return instance_element_update
+     def get_net(self, task):
+         task_id = task["instance_action_id"] + "." + str(task["task_index"])
+         try:
+             params = task["params"]
+             filter_param = params[0]
+             instance_element_update = self._get_net_internal(task, filter_param)
+             return instance_element_update
+         except (vimconn.vimconnException, VimThreadException) as e:
+             self.logger.error("task={} get-net: {}".format(task_id, e))
+             task["status"] = "FAILED"
+             task["vim_id"] = None
+             task["error_msg"] = self._format_vim_error_msg(str(e))
+             instance_element_update = {"vim_net_id": None, "status": "VIM_ERROR",
+                                        "error_msg": task["error_msg"]}
+             return instance_element_update
+     def new_net(self, task):
+         vim_net_id = None
+         task_id = task["instance_action_id"] + "." + str(task["task_index"])
+         action_text = ""
+         try:
+             # FIND
+             if task["extra"].get("find"):
+                 action_text = "finding"
+                 filter_param = task["extra"]["find"][0]
+                 try:
+                     instance_element_update = self._get_net_internal(task, filter_param)
+                     return instance_element_update
+                 except VimThreadExceptionNotFound:
+                     pass
+             # CREATE
+             params = task["params"]
+             action_text = "creating VIM"
 -            # if len(params) >= 4:
 -            #     wim_account_name = params[3]
++
++            vim_net_id, created_items = self.vim.new_network(*params[0:5])
+             # net_name = params[0]
+             # net_type = params[1]
+             # wim_account_name = None
++            # if len(params) >= 6:
++            #     wim_account_name = params[5]
+             # TODO fix at nfvo adding external port
+             # if wim_account_name and self.vim.config["wim_external_ports"]:
+             #     # add external port to connect WIM. Try with compute node __WIM:wim_name and __WIM
+             #     action_text = "attaching external port to ovim network"
+             #     sdn_port_name = "external_port"
+             #     sdn_port_data = {
+             #         "compute_node": "__WIM:" + wim_account_name[0:58],
+             #         "pci": None,
+             #         "vlan": network["vlan"],
+             #         "net_id": sdn_net_id,
+             #         "region": self.vim["config"]["datacenter_id"],
+             #         "name": sdn_port_name,
+             #     }
+             #     try:
+             #         sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
+             #     except ovimException:
+             #         sdn_port_data["compute_node"] = "__WIM"
+             #         sdn_external_port_id = self.ovim.new_external_port(sdn_port_data)
+             #     self.logger.debug("Added sdn_external_port {} to sdn_network {}".format(sdn_external_port_id,
+             #                                                                             sdn_net_id))
+             task["status"] = "DONE"
+             task["extra"]["vim_info"] = {}
+             # task["extra"]["sdn_net_id"] = sdn_net_id
+             task["extra"]["vim_status"] = "BUILD"
+             task["extra"]["created"] = True
+             task["extra"]["created_items"] = created_items
+             task["error_msg"] = None
+             task["vim_id"] = vim_net_id
+             instance_element_update = {"vim_net_id": vim_net_id, "status": "BUILD",
+                                        "created": True, "error_msg": None}
+             return instance_element_update
+         except vimconn.vimconnException as e:
+             self.logger.error("task={} new-net: Error {}: {}".format(task_id, action_text, e))
+             task["status"] = "FAILED"
+             task["vim_id"] = vim_net_id
+             task["error_msg"] = self._format_vim_error_msg(str(e))
+             # task["extra"]["sdn_net_id"] = sdn_net_id
+             instance_element_update = {"vim_net_id": vim_net_id, "status": "VIM_ERROR",
+                                        "error_msg": task["error_msg"]}
+             return instance_element_update
+     def del_net(self, task):
+         net_vim_id = task["vim_id"]
+         # sdn_net_id = task["extra"].get("sdn_net_id")
+         try:
+             if net_vim_id:
+                 self.vim.delete_network(net_vim_id, task["extra"].get("created_items"))
+             # if sdn_net_id:
+             #     # Delete any attached port to this sdn network. There can be ports associated to this network in case
+             #     # it was manually done using 'openmano vim-net-sdn-attach'
+             #     port_list = self.ovim.get_ports(columns={'uuid'},
+             #                                     filter={'name': 'external_port', 'net_id': sdn_net_id})
+             #     for port in port_list:
+             #         self.ovim.delete_port(port['uuid'], idempotent=True)
+             #     self.ovim.delete_network(sdn_net_id, idempotent=True)
+             task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+             task["error_msg"] = None
+             return None
+         except vimconn.vimconnException as e:
+             task["error_msg"] = self._format_vim_error_msg(str(e))
+             if isinstance(e, vimconn.vimconnNotFoundException):
+                 # If not found mark as Done and fill error_msg
+                 task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                 return None
+         task["status"] = "FAILED"
+         return None
+     def new_or_update_sdn_net(self, task):
+         wimconn_net_id = task["vim_id"]
+         created_items = task["extra"].get("created_items")
+         connected_ports = task["extra"].get("connected_ports", [])
+         new_connected_ports = []
+         last_update = task["extra"].get("last_update", 0)
+         sdn_status = "BUILD"
+         sdn_info = None
+         task_id = task["instance_action_id"] + "." + str(task["task_index"])
+         error_list = []
+         try:
+             # FIND
+             if task["extra"].get("find"):
+                 wimconn_id = task["extra"]["find"][0]
+                 try:
+                     instance_element_update = self.sdnconnector.get_connectivity_service_status(wimconn_id)
+                     wimconn_net_id = wimconn_id
+                     instance_element_update = {"wim_internal_id": wimconn_net_id, "created": False, "status": "BUILD",
+                                                "error_msg": None, }
+                     return instance_element_update
+                 except Exception as e:
+                     if isinstance(e, SdnConnectorError) and e.http_error == HTTPStatus.NOT_FOUND.value:
+                         pass
+                     else: 
+                         self._proccess_sdn_exception(e)
+             params = task["params"]
+             # CREATE
+             # look for ports
+             sdn_ports = []
+             pending_ports = 0
+             ports = self.db.get_rows(FROM='instance_interfaces', WHERE={'instance_wim_net_id': task["item_id"]})
+             sdn_need_update = False
+             for port in ports:
+                 # TODO. Do not connect if already done
+                 if port.get("compute_node") and port.get("pci"):
+                     for map in self.port_mappings:
+                         if map.get("device_id") == port["compute_node"] and \
+                                 map.get("device_interface_id") == port["pci"]:
+                             break
+                     else:
+                         if self.sdnconn_config.get("mapping_not_needed"):
+                             map = {
+                                 "service_endpoint_id": "{}:{}".format(port["compute_node"], port["pci"]),
+                                 "service_endpoint_encapsulation_info": {
+                                     "vlan": port["vlan"],
+                                     "mac": port["mac_address"],
+                                     "device_id": port["compute_node"],
+                                     "device_interface_id": port["pci"]
+                                 }
+                             }
+                         else:
+                             map = None
+                             error_list.append("Port mapping not found for compute_node={} pci={}".format(
+                                 port["compute_node"], port["pci"]))
+                     if map:
+                         if port["uuid"] not in connected_ports or port["modified_at"] > last_update:
+                             sdn_need_update = True
+                         new_connected_ports.append(port["uuid"])
+                         sdn_ports.append({
+                             "service_endpoint_id": map["service_endpoint_id"],
+                             "service_endpoint_encapsulation_type": "dot1q" if port["model"] == "SR-IOV" else None,
+                             "service_endpoint_encapsulation_info": {
+                                 "vlan": port["vlan"],
+                                 "mac": port["mac_address"],
+                                 "device_id": map.get("device_id"),
+                                 "device_interface_id": map.get("device_interface_id"),
+                                 "switch_dpid": map.get("switch_dpid"),
+                                 "switch_port": map.get("switch_port"),
+                                 "service_mapping_info": map.get("service_mapping_info"),
+                             }
+                         })
+                 else:
+                     pending_ports += 1
+             if pending_ports:
+                 error_list.append("Waiting for getting interfaces location from VIM. Obtained '{}' of {}"
+                                   .format(len(ports)-pending_ports, len(ports)))
+             # if there are more ports to connect or they have been modified, call create/update
+             if sdn_need_update and len(sdn_ports) >= 2:
+                 if not wimconn_net_id:
+                     if params[0] == "data":
+                         net_type = "ELAN"
+                     elif params[0] == "ptp":
+                         net_type = "ELINE"
+                     else:
+                         net_type = "L3"
+                     wimconn_net_id, created_items = self.sdnconnector.create_connectivity_service(net_type, sdn_ports)
+                 else:
+                     created_items = self.sdnconnector.edit_connectivity_service(wimconn_net_id, conn_info=created_items,
+                                                                                 connection_points=sdn_ports)
+                 last_update = time.time()
+                 connected_ports = new_connected_ports
+             elif wimconn_net_id:
+                 try: 
+                     wim_status_dict = self.sdnconnector.get_connectivity_service_status(wimconn_net_id,
+                                                                                         conn_info=created_items)
+                     sdn_status = wim_status_dict["sdn_status"]
+                     if wim_status_dict.get("error_msg"):
+                         error_list.append(wim_status_dict.get("error_msg"))
+                     if wim_status_dict.get("sdn_info"):
+                         sdn_info = str(wim_status_dict.get("sdn_info"))
+                 except Exception as e:
+                     self._proccess_sdn_exception(e)
+             task["status"] = "DONE"
+             task["extra"]["vim_info"] = {}
+             # task["extra"]["sdn_net_id"] = sdn_net_id
+             task["extra"]["vim_status"] = "BUILD"
+             task["extra"]["created"] = True
+             task["extra"]["created_items"] = created_items
+             task["extra"]["connected_ports"] = connected_ports
+             task["extra"]["last_update"] = last_update
+             task["error_msg"] = self._format_vim_error_msg(" ; ".join(error_list))
+             task["vim_id"] = wimconn_net_id
+             instance_element_update = {"wim_internal_id": wimconn_net_id, "status": sdn_status,
+                                        "created": True, "error_msg": task["error_msg"] or None}
+         except (vimconn.vimconnException, SdnConnectorError) as e:
+             self.logger.error("task={} new-sdn-net: Error: {}".format(task_id, e))
+             task["status"] = "FAILED"
+             task["vim_id"] = wimconn_net_id
+             task["error_msg"] = self._format_vim_error_msg(str(e))
+             # task["extra"]["sdn_net_id"] = sdn_net_id
+             instance_element_update = {"wim_internal_id": wimconn_net_id, "status": "WIM_ERROR",
+                                        "error_msg": task["error_msg"]}
+         if sdn_info:
+             instance_element_update["wim_info"] = sdn_info
+         return instance_element_update
+     def del_sdn_net(self, task):
+         wimconn_net_id = task["vim_id"]
+         try:
+             try:
+                 if wimconn_net_id:
+                     self.sdnconnector.delete_connectivity_service(wimconn_net_id, task["extra"].get("created_items"))
+                 task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                 task["error_msg"] = None
+                 return None
+             except Exception as e:
+                 self._proccess_sdn_exception(e)
+         except SdnConnectorError as e:
+             task["error_msg"] = self._format_vim_error_msg(str(e))
+             if e.http_code == HTTPStatus.NOT_FOUND.value:
+                 # If not found mark as Done and fill error_msg
+                 task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                 task["error_msg"] = None
+                 return None
+         task["status"] = "FAILED"
+         return None
+     # Service Function Instances
+     def new_sfi(self, task):
+         vim_sfi_id = None
+         try:
+             # Waits for interfaces to be ready (avoids failure)
+             time.sleep(1)
+             dep_id = "TASK-" + str(task["extra"]["depends_on"][0])
+             task_id = task["instance_action_id"] + "." + str(task["task_index"])
+             error_text = ""
+             interfaces = task["depends"][dep_id]["extra"].get("interfaces")
+             ingress_interface_id = task.get("extra").get("params").get("ingress_interface_id")
+             egress_interface_id = task.get("extra").get("params").get("egress_interface_id")
+             ingress_vim_interface_id = None
+             egress_vim_interface_id = None
+             for vim_interface, interface_data in interfaces.items():
+                 if interface_data.get("interface_id") == ingress_interface_id:
+                     ingress_vim_interface_id = vim_interface
+                     break
+             if ingress_interface_id != egress_interface_id:
+                 for vim_interface, interface_data in interfaces.items():
+                     if interface_data.get("interface_id") == egress_interface_id:
+                         egress_vim_interface_id = vim_interface
+                         break
+             else:
+                 egress_vim_interface_id = ingress_vim_interface_id
+             if not ingress_vim_interface_id or not egress_vim_interface_id:
+                 error_text = "Error creating Service Function Instance, Ingress: {}, Egress: {}".format(
+                     ingress_vim_interface_id, egress_vim_interface_id)
+                 self.logger.error(error_text)
+                 task["error_msg"] = error_text
+                 task["status"] = "FAILED"
+                 task["vim_id"] = None
+                 return None
+             # At the moment, every port associated with the VM will be used both as ingress and egress ports.
+             # Bear in mind that different VIM connectors might support SFI differently. In the case of OpenStack,
+             # only the first ingress and first egress ports will be used to create the SFI (Port Pair).
+             ingress_port_id_list = [ingress_vim_interface_id]
+             egress_port_id_list = [egress_vim_interface_id]
+             name = "sfi-{}".format(task["item_id"][:8])
+             # By default no form of IETF SFC Encapsulation will be used
+             vim_sfi_id = self.vim.new_sfi(name, ingress_port_id_list, egress_port_id_list, sfc_encap=False)
+             task["extra"]["created"] = True
+             task["extra"]["vim_status"] = "ACTIVE"
+             task["error_msg"] = None
+             task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+             task["vim_id"] = vim_sfi_id
+             instance_element_update = {"status": "ACTIVE", "vim_sfi_id": vim_sfi_id, "error_msg": None}
+             return instance_element_update
+         except (vimconn.vimconnException, VimThreadException) as e:
+             self.logger.error("Error creating Service Function Instance, task=%s: %s", task_id, str(e))
+             error_text = self._format_vim_error_msg(str(e))
+             task["error_msg"] = error_text
+             task["status"] = "FAILED"
+             task["vim_id"] = None
+             instance_element_update = {"status": "VIM_ERROR", "vim_sfi_id": None, "error_msg": error_text}
+             return instance_element_update
+     def del_sfi(self, task):
+         sfi_vim_id = task["vim_id"]
+         try:
+             self.vim.delete_sfi(sfi_vim_id)
+             task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+             task["error_msg"] = None
+             return None
+         except vimconn.vimconnException as e:
+             task["error_msg"] = self._format_vim_error_msg(str(e))
+             if isinstance(e, vimconn.vimconnNotFoundException):
+                 # If not found mark as Done and fill error_msg
+                 task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                 return None
+             task["status"] = "FAILED"
+             return None
+     def new_sf(self, task):
+         vim_sf_id = None
+         try:
+             task_id = task["instance_action_id"] + "." + str(task["task_index"])
+             error_text = ""
+             depending_tasks = ["TASK-" + str(dep_id) for dep_id in task["extra"]["depends_on"]]
+             # sfis = next(iter(task.get("depends").values())).get("extra").get("params")[5]
+             sfis = [task.get("depends").get(dep_task) for dep_task in depending_tasks]
+             sfi_id_list = []
+             for sfi in sfis:
+                 sfi_id_list.append(sfi.get("vim_id"))
+             name = "sf-{}".format(task["item_id"][:8])
+             # By default no form of IETF SFC Encapsulation will be used
+             vim_sf_id = self.vim.new_sf(name, sfi_id_list, sfc_encap=False)
+             task["extra"]["created"] = True
+             task["extra"]["vim_status"] = "ACTIVE"
+             task["error_msg"] = None
+             task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+             task["vim_id"] = vim_sf_id
+             instance_element_update = {"status": "ACTIVE", "vim_sf_id": vim_sf_id, "error_msg": None}
+             return instance_element_update
+         except (vimconn.vimconnException, VimThreadException) as e:
+             self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+             error_text = self._format_vim_error_msg(str(e))
+             task["error_msg"] = error_text
+             task["status"] = "FAILED"
+             task["vim_id"] = None
+             instance_element_update = {"status": "VIM_ERROR", "vim_sf_id": None, "error_msg": error_text}
+             return instance_element_update
+     def del_sf(self, task):
+         sf_vim_id = task["vim_id"]
+         try:
+             self.vim.delete_sf(sf_vim_id)
+             task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+             task["error_msg"] = None
+             return None
+         except vimconn.vimconnException as e:
+             task["error_msg"] = self._format_vim_error_msg(str(e))
+             if isinstance(e, vimconn.vimconnNotFoundException):
+                 # If not found mark as Done and fill error_msg
+                 task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                 return None
+             task["status"] = "FAILED"
+             return None
+     def new_classification(self, task):
+         vim_classification_id = None
+         try:
+             params = task["params"]
+             task_id = task["instance_action_id"] + "." + str(task["task_index"])
+             dep_id = "TASK-" + str(task["extra"]["depends_on"][0])
+             error_text = ""
+             interfaces = task.get("depends").get(dep_id).get("extra").get("interfaces").keys()
+             # Bear in mind that different VIM connectors might support Classifications differently.
+             # In the case of OpenStack, only the first VNF attached to the classifier will be used
+             # to create the Classification(s) (the "logical source port" of the "Flow Classifier").
+             # Since the VNFFG classifier match lacks the ethertype, classification defaults to
+             # using the IPv4 flow classifier.
+             name = "c-{}".format(task["item_id"][:8])
+             # if not CIDR is given for the IP addresses, add /32:
+             ip_proto = int(params.get("ip_proto"))
+             source_ip = params.get("source_ip")
+             destination_ip = params.get("destination_ip")
+             source_port = params.get("source_port")
+             destination_port = params.get("destination_port")
+             definition = {"logical_source_port": interfaces[0]}
+             if ip_proto:
+                 if ip_proto == 1:
+                     ip_proto = 'icmp'
+                 elif ip_proto == 6:
+                     ip_proto = 'tcp'
+                 elif ip_proto == 17:
+                     ip_proto = 'udp'
+                 definition["protocol"] = ip_proto
+             if source_ip:
+                 if '/' not in source_ip:
+                     source_ip += '/32'
+                 definition["source_ip_prefix"] = source_ip
+             if source_port:
+                 definition["source_port_range_min"] = source_port
+                 definition["source_port_range_max"] = source_port
+             if destination_port:
+                 definition["destination_port_range_min"] = destination_port
+                 definition["destination_port_range_max"] = destination_port
+             if destination_ip:
+                 if '/' not in destination_ip:
+                     destination_ip += '/32'
+                 definition["destination_ip_prefix"] = destination_ip
+             vim_classification_id = self.vim.new_classification(
+                 name, 'legacy_flow_classifier', definition)
+             task["extra"]["created"] = True
+             task["extra"]["vim_status"] = "ACTIVE"
+             task["error_msg"] = None
+             task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+             task["vim_id"] = vim_classification_id
+             instance_element_update = {"status": "ACTIVE", "vim_classification_id": vim_classification_id,
+                                        "error_msg": None}
+             return instance_element_update
+         except (vimconn.vimconnException, VimThreadException) as e:
+             self.logger.error("Error creating Classification, task=%s: %s", task_id, str(e))
+             error_text = self._format_vim_error_msg(str(e))
+             task["error_msg"] = error_text
+             task["status"] = "FAILED"
+             task["vim_id"] = None
+             instance_element_update = {"status": "VIM_ERROR", "vim_classification_id": None, "error_msg": error_text}
+             return instance_element_update
+     def del_classification(self, task):
+         classification_vim_id = task["vim_id"]
+         try:
+             self.vim.delete_classification(classification_vim_id)
+             task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+             task["error_msg"] = None
+             return None
+         except vimconn.vimconnException as e:
+             task["error_msg"] = self._format_vim_error_msg(str(e))
+             if isinstance(e, vimconn.vimconnNotFoundException):
+                 # If not found mark as Done and fill error_msg
+                 task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                 return None
+             task["status"] = "FAILED"
+             return None
+     def new_sfp(self, task):
+         vim_sfp_id = None
+         try:
+             task_id = task["instance_action_id"] + "." + str(task["task_index"])
+             depending_tasks = [task.get("depends").get("TASK-" + str(tsk_id)) for tsk_id in
+                                task.get("extra").get("depends_on")]
+             error_text = ""
+             sf_id_list = []
+             classification_id_list = []
+             for dep in depending_tasks:
+                 vim_id = dep.get("vim_id")
+                 resource = dep.get("item")
+                 if resource == "instance_sfs":
+                     sf_id_list.append(vim_id)
+                 elif resource == "instance_classifications":
+                     classification_id_list.append(vim_id)
+             name = "sfp-{}".format(task["item_id"][:8])
+             # By default no form of IETF SFC Encapsulation will be used
+             vim_sfp_id = self.vim.new_sfp(name, classification_id_list, sf_id_list, sfc_encap=False)
+             task["extra"]["created"] = True
+             task["extra"]["vim_status"] = "ACTIVE"
+             task["error_msg"] = None
+             task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+             task["vim_id"] = vim_sfp_id
+             instance_element_update = {"status": "ACTIVE", "vim_sfp_id": vim_sfp_id, "error_msg": None}
+             return instance_element_update
+         except (vimconn.vimconnException, VimThreadException) as e:
+             self.logger.error("Error creating Service Function, task=%s: %s", task_id, str(e))
+             error_text = self._format_vim_error_msg(str(e))
+             task["error_msg"] = error_text
+             task["status"] = "FAILED"
+             task["vim_id"] = None
+             instance_element_update = {"status": "VIM_ERROR", "vim_sfp_id": None, "error_msg": error_text}
+             return instance_element_update
+     def del_sfp(self, task):
+         sfp_vim_id = task["vim_id"]
+         try:
+             self.vim.delete_sfp(sfp_vim_id)
+             task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+             task["error_msg"] = None
+             return None
+         except vimconn.vimconnException as e:
+             task["error_msg"] = self._format_vim_error_msg(str(e))
+             if isinstance(e, vimconn.vimconnNotFoundException):
+                 # If not found mark as Done and fill error_msg
+                 task["status"] = "FINISHED"  # with FINISHED instead of DONE it will not be refreshing
+                 return None
+             task["status"] = "FAILED"
+             return None
diff --combined RO/osm_ro/vimconn.py
index 0000000,6e20654..c97370d
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,942 +1,942 @@@
 -    def new_network(self, net_name, net_type, ip_profile=None, shared=False, vlan=None):
+ # -*- coding: utf-8 -*-
+ ##
+ # Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U.
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+ """
+ vimconn implement an Abstract class for the vim connector plugins
+  with the definition of the method to be implemented.
+ """
+ import logging
+ import paramiko
+ import socket
+ from io import StringIO
+ import yaml
+ import sys
+ from email.mime.multipart import MIMEMultipart
+ from email.mime.text import MIMEText
+ from osm_ro.utils import deprecated
+ __author__ = "Alfonso Tierno, Igor D.C."
+ __date__  = "$14-aug-2017 23:59:59$"
+ #Error variables 
+ HTTP_Bad_Request = 400
+ HTTP_Unauthorized = 401 
+ HTTP_Not_Found = 404 
+ HTTP_Method_Not_Allowed = 405 
+ HTTP_Request_Timeout = 408
+ HTTP_Conflict = 409
+ HTTP_Not_Implemented = 501
+ HTTP_Service_Unavailable = 503 
+ HTTP_Internal_Server_Error = 500 
+ class vimconnException(Exception):
+     """Common and base class Exception for all vimconnector exceptions"""
+     def __init__(self, message, http_code=HTTP_Bad_Request):
+         Exception.__init__(self, message)
+         self.http_code = http_code
+ class vimconnConnectionException(vimconnException):
+     """Connectivity error with the VIM"""
+     def __init__(self, message, http_code=HTTP_Service_Unavailable):
+         vimconnException.__init__(self, message, http_code)
+ class vimconnUnexpectedResponse(vimconnException):
+     """Get an wrong response from VIM"""
+     def __init__(self, message, http_code=HTTP_Service_Unavailable):
+         vimconnException.__init__(self, message, http_code)
+ class vimconnAuthException(vimconnException):
+     """Invalid credentials or authorization to perform this action over the VIM"""
+     def __init__(self, message, http_code=HTTP_Unauthorized):
+         vimconnException.__init__(self, message, http_code)
+ class vimconnNotFoundException(vimconnException):
+     """The item is not found at VIM"""
+     def __init__(self, message, http_code=HTTP_Not_Found):
+         vimconnException.__init__(self, message, http_code)
+ class vimconnConflictException(vimconnException):
+     """There is a conflict, e.g. more item found than one"""
+     def __init__(self, message, http_code=HTTP_Conflict):
+         vimconnException.__init__(self, message, http_code)
+ class vimconnNotSupportedException(vimconnException):
+     """The request is not supported by connector"""
+     def __init__(self, message, http_code=HTTP_Service_Unavailable):
+         vimconnException.__init__(self, message, http_code)
+ class vimconnNotImplemented(vimconnException):
+     """The method is not implemented by the connected"""
+     def __init__(self, message, http_code=HTTP_Not_Implemented):
+         vimconnException.__init__(self, message, http_code)
+ class vimconnector():
+     """Abstract base class for all the VIM connector plugins
+     These plugins must implement a vimconnector class derived from this 
+     and all these privated methods
+     """ 
+     def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None,
+                  config={}, persitent_info={}):
+         """
+         Constructor of VIM. Raise an exception is some needed parameter is missing, but it must not do any connectivity
+             checking against the VIM
+         :param uuid: internal id of this VIM
+         :param name: name assigned to this VIM, can be used for logging
+         :param tenant_id: 'tenant_id': (only one of them is mandatory) VIM tenant to be used
+         :param tenant_name: 'tenant_name': (only one of them is mandatory) VIM tenant to be used
+         :param url: url used for normal operations
+         :param url_admin: (optional), url used for administrative tasks
+         :param user: user to access
+         :param passwd: password
+         :param log_level: provided if it should use a different log_level than the general one
+         :param config: dictionary with extra VIM information. This contains a consolidate version of VIM config
+                     at VIM_ACCOUNT (attach)
+         :param persitent_info: dict where the class can store information that will be available among class
+                     destroy/creation cycles. This info is unique per VIM/credential. At first call it will contain an
+                     empty dict. Useful to store login/tokens information for speed up communication
+         """
+         self.id = uuid
+         self.name = name
+         self.url = url
+         self.url_admin = url_admin
+         self.tenant_id = tenant_id
+         self.tenant_name = tenant_name
+         self.user = user
+         self.passwd = passwd
+         self.config = config or {}
+         self.availability_zone = None
+         self.logger = logging.getLogger('openmano.vim')
+         if log_level:
+             self.logger.setLevel(getattr(logging, log_level))
+         if not self.url_admin:   # try to use normal url
+             self.url_admin = self.url
+     
+     def __getitem__(self, index):
+         if index == 'tenant_id':
+             return self.tenant_id
+         if index == 'tenant_name':
+             return self.tenant_name
+         elif index == 'id':
+             return self.id
+         elif index == 'name':
+             return self.name
+         elif index == 'user':
+             return self.user
+         elif index == 'passwd':
+             return self.passwd
+         elif index == 'url':
+             return self.url
+         elif index == 'url_admin':
+             return self.url_admin
+         elif index == "config":
+             return self.config
+         else:
+             raise KeyError("Invalid key '{}'".format(index))
+         
+     def __setitem__(self, index, value):
+         if index == 'tenant_id':
+             self.tenant_id = value
+         if index == 'tenant_name':
+             self.tenant_name = value
+         elif index == 'id':
+             self.id = value
+         elif index == 'name':
+             self.name = value
+         elif index == 'user':
+             self.user = value
+         elif index == 'passwd':
+             self.passwd = value
+         elif index == 'url':
+             self.url = value
+         elif index == 'url_admin':
+             self.url_admin = value
+         else:
+             raise KeyError("Invalid key '{}'".format(index))
+     @staticmethod
+     def _create_mimemultipart(content_list):
+         """Creates a MIMEmultipart text combining the content_list
+         :param content_list: list of text scripts to be combined
+         :return: str of the created MIMEmultipart. If the list is empty returns None, if the list contains only one
+         element MIMEmultipart is not created and this content is returned
+         """
+         if not content_list:
+             return None
+         elif len(content_list) == 1:
+             return content_list[0]
+         combined_message = MIMEMultipart()
+         for content in content_list:
+             if content.startswith('#include'):
+                 mime_format = 'text/x-include-url'
+             elif content.startswith('#include-once'):
+                 mime_format = 'text/x-include-once-url'
+             elif content.startswith('#!'):
+                 mime_format = 'text/x-shellscript'
+             elif content.startswith('#cloud-config'):
+                 mime_format = 'text/cloud-config'
+             elif content.startswith('#cloud-config-archive'):
+                 mime_format = 'text/cloud-config-archive'
+             elif content.startswith('#upstart-job'):
+                 mime_format = 'text/upstart-job'
+             elif content.startswith('#part-handler'):
+                 mime_format = 'text/part-handler'
+             elif content.startswith('#cloud-boothook'):
+                 mime_format = 'text/cloud-boothook'
+             else:  # by default
+                 mime_format = 'text/x-shellscript'
+             sub_message = MIMEText(content, mime_format, sys.getdefaultencoding())
+             combined_message.attach(sub_message)
+         return combined_message.as_string()
+     def _create_user_data(self, cloud_config):
+         """
+         Creates a script user database on cloud_config info
+         :param cloud_config: dictionary with
+             'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+             'users': (optional) list of users to be inserted, each item is a dict with:
+                 'name': (mandatory) user name,
+                 'key-pairs': (optional) list of strings with the public key to be inserted to the user
+             'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                 or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+             'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                 'dest': (mandatory) string with the destination absolute path
+                 'encoding': (optional, by default text). Can be one of:
+                     'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                 'content' (mandatory): string with the content of the file
+                 'permissions': (optional) string with file permissions, typically octal notation '0644'
+                 'owner': (optional) file owner, string with the format 'owner:group'
+             'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+         :return: config_drive, userdata. The first is a boolean or None, the second a string or None
+         """
+         config_drive = None
+         userdata = None
+         userdata_list = []
+         if isinstance(cloud_config, dict):
+             if cloud_config.get("user-data"):
+                 if isinstance(cloud_config["user-data"], str):
+                     userdata_list.append(cloud_config["user-data"])
+                 else:
+                     for u in cloud_config["user-data"]:
+                         userdata_list.append(u)
+             if cloud_config.get("boot-data-drive") is not None:
+                 config_drive = cloud_config["boot-data-drive"]
+             if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"):
+                 userdata_dict = {}
+                 # default user
+                 if cloud_config.get("key-pairs"):
+                     userdata_dict["ssh-authorized-keys"] = cloud_config["key-pairs"]
+                     userdata_dict["users"] = [{"default": None, "ssh-authorized-keys": cloud_config["key-pairs"]}]
+                 if cloud_config.get("users"):
+                     if "users" not in userdata_dict:
+                         userdata_dict["users"] = ["default"]
+                     for user in cloud_config["users"]:
+                         user_info = {
+                             "name": user["name"],
+                             "sudo": "ALL = (ALL)NOPASSWD:ALL"
+                         }
+                         if "user-info" in user:
+                             user_info["gecos"] = user["user-info"]
+                         if user.get("key-pairs"):
+                             user_info["ssh-authorized-keys"] = user["key-pairs"]
+                         userdata_dict["users"].append(user_info)
+                 if cloud_config.get("config-files"):
+                     userdata_dict["write_files"] = []
+                     for file in cloud_config["config-files"]:
+                         file_info = {
+                             "path": file["dest"],
+                             "content": file["content"]
+                         }
+                         if file.get("encoding"):
+                             file_info["encoding"] = file["encoding"]
+                         if file.get("permissions"):
+                             file_info["permissions"] = file["permissions"]
+                         if file.get("owner"):
+                             file_info["owner"] = file["owner"]
+                         userdata_dict["write_files"].append(file_info)
+                 userdata_list.append("#cloud-config\n" + yaml.safe_dump(userdata_dict, indent=4,
+                                                                         default_flow_style=False))
+             userdata = self._create_mimemultipart(userdata_list)
+             self.logger.debug("userdata: %s", userdata)
+         elif isinstance(cloud_config, str):
+             userdata = cloud_config
+         return config_drive, userdata
+     def check_vim_connectivity(self):
+         """Checks VIM can be reached and user credentials are ok.
+         Returns None if success or raises vimconnConnectionException, vimconnAuthException, ...
+         """
+         # by default no checking until each connector implements it
+         return None
+     def new_tenant(self, tenant_name, tenant_description):
+         """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided
+         "tenant_name": string max lenght 64
+         "tenant_description": string max length 256
+         returns the tenant identifier or raise exception
+         """
+         raise vimconnNotImplemented("Should have implemented this")
+     def delete_tenant(self, tenant_id):
+         """Delete a tenant from VIM
+         tenant_id: returned VIM tenant_id on "new_tenant"
+         Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException
+         """
+         raise vimconnNotImplemented("Should have implemented this")
+     def get_tenant_list(self, filter_dict={}):
+         """Obtain tenants of VIM
+         filter_dict dictionary that can contain the following keys:
+             name: filter by tenant name
+             id: filter by tenant uuid/id
+             <other VIM specific>
+         Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:
+             [{'name':'<name>, 'id':'<id>, ...}, ...]
+         """
+         raise vimconnNotImplemented("Should have implemented this")
 -            'vlan': in case of a data or ptp net_type, the intended vlan tag to be used for the network
++    def new_network(self, net_name, net_type, ip_profile=None, shared=False, provider_network_profile=None):
+         """Adds a tenant network to VIM
+         Params:
+             'net_name': name of the network
+             'net_type': one of:
+                 'bridge': overlay isolated network
+                 'data':   underlay E-LAN network for Passthrough and SRIOV interfaces
+                 'ptp':    underlay E-LINE network for Passthrough and SRIOV interfaces.
+             'ip_profile': is a dict containing the IP parameters of the network
+                 'ip_version': can be "IPv4" or "IPv6" (Currently only IPv4 is implemented)
+                 'subnet_address': ip_prefix_schema, that is X.X.X.X/Y
+                 'gateway_address': (Optional) ip_schema, that is X.X.X.X
+                 'dns_address': (Optional) comma separated list of ip_schema, e.g. X.X.X.X[,X,X,X,X]
+                 'dhcp_enabled': True or False
+                 'dhcp_start_address': ip_schema, first IP to grant
+                 'dhcp_count': number of IPs to grant.
+             'shared': if this network can be seen/use by other tenants/organization
++            'provider_network_profile': (optional) contains {segmentation-id: vlan, provider-network: vim_netowrk}
+         Returns a tuple with the network identifier and created_items, or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_network. Can be used to store created segments, created l2gw connections, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
+         raise vimconnNotImplemented("Should have implemented this")
+     def get_network_list(self, filter_dict={}):
+         """Obtain tenant networks of VIM
+         Params:
+             'filter_dict' (optional) contains entries to return only networks that matches ALL entries:
+                 name: string  => returns only networks with this name
+                 id:   string  => returns networks with this VIM id, this imply returns one network at most
+                 shared: boolean >= returns only networks that are (or are not) shared
+                 tenant_id: sting => returns only networks that belong to this tenant/project
+                 ,#(not used yet) admin_state_up: boolean => returns only networks that are (or are not) in admin state active
+                 #(not used yet) status: 'ACTIVE','ERROR',... => filter networks that are on this status
+         Returns the network list of dictionaries. each dictionary contains:
+             'id': (mandatory) VIM network id
+             'name': (mandatory) VIM network name
+             'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'network_type': (optional) can be 'vxlan', 'vlan' or 'flat'
+             'segmentation_id': (optional) in case network_type is vlan or vxlan this field contains the segmentation id
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         List can be empty if no network map the filter_dict. Raise an exception only upon VIM connectivity,
+             authorization, or some other unspecific error
+         """
+         raise vimconnNotImplemented("Should have implemented this")
+     def get_network(self, net_id):
+         """Obtain network details from the 'net_id' VIM network
+         Return a dict that contains:
+             'id': (mandatory) VIM network id, that is, net_id
+             'name': (mandatory) VIM network name
+             'status': (mandatory) can be 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         Raises an exception upon error or when network is not found
+         """
+         raise vimconnNotImplemented("Should have implemented this")
+     def delete_network(self, net_id, created_items=None):
+         """
+         Removes a tenant network from VIM and its associated elements
+         :param net_id: VIM identifier of the network, provided by method new_network
+         :param created_items: dictionary with extra items to be deleted. provided by method new_network
+         Returns the network identifier or raises an exception upon error or when network is not found
+         """
+         raise vimconnNotImplemented("Should have implemented this")
+     def refresh_nets_status(self, net_list):
+         """Get the status of the networks
+         Params:
+             'net_list': a list with the VIM network id to be get the status
+         Returns a dictionary with:
+             'net_id':         #VIM id of this network
+                 status:     #Mandatory. Text with one of:
+                     #  DELETED (not found at vim)
+                     #  VIM_ERROR (Cannot connect to VIM, authentication problems, VIM response error, ...)
+                     #  OTHER (Vim reported other status not understood)
+                     #  ERROR (VIM indicates an ERROR status)
+                     #  ACTIVE, INACTIVE, DOWN (admin down),
+                     #  BUILD (on building process)
+                 error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR
+                 vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+             'net_id2': ...
+         """
+         raise vimconnNotImplemented("Should have implemented this")
+     def get_flavor(self, flavor_id):
+         """Obtain flavor details from the VIM
+         Returns the flavor dict details {'id':<>, 'name':<>, other vim specific }
+         Raises an exception upon error or if not found
+         """
+         raise vimconnNotImplemented("Should have implemented this")
+     def get_flavor_id_from_data(self, flavor_dict):
+         """Obtain flavor id that match the flavor description
+         Params:
+             'flavor_dict': dictionary that contains:
+                 'disk': main hard disk in GB
+                 'ram': meomry in MB
+                 'vcpus': number of virtual cpus
+                 #TODO: complete parameters for EPA
+         Returns the flavor_id or raises a vimconnNotFoundException
+         """
+         raise vimconnNotImplemented("Should have implemented this")
+     def new_flavor(self, flavor_data):
+         """Adds a tenant flavor to VIM
+             flavor_data contains a dictionary with information, keys:
+                 name: flavor name
+                 ram: memory (cloud type) in MBytes
+                 vpcus: cpus (cloud type)
+                 extended: EPA parameters
+                   - numas: #items requested in same NUMA
+                         memory: number of 1G huge pages memory
+                         paired-threads|cores|threads: number of paired hyperthreads, complete cores OR individual threads
+                         interfaces: # passthrough(PT) or SRIOV interfaces attached to this numa
+                           - name: interface name
+                             dedicated: yes|no|yes:sriov;  for PT, SRIOV or only one SRIOV for the physical NIC
+                             bandwidth: X Gbps; requested guarantee bandwidth
+                             vpci: requested virtual PCI address   
+                 disk: disk size
+                 is_public:
+                  #TODO to concrete
+         Returns the flavor identifier"""
+         raise vimconnNotImplemented("Should have implemented this")
+     def delete_flavor(self, flavor_id):
+         """Deletes a tenant flavor from VIM identify by its id
+         Returns the used id or raise an exception"""
+         raise vimconnNotImplemented("Should have implemented this")
+     def new_image(self, image_dict):
+         """ Adds a tenant image to VIM
+         Returns the image id or raises an exception if failed
+         """
+         raise vimconnNotImplemented("Should have implemented this")
+     def delete_image(self, image_id):
+         """Deletes a tenant image from VIM
+         Returns the image_id if image is deleted or raises an exception on error"""
+         raise vimconnNotImplemented("Should have implemented this")
+     def get_image_id_from_path(self, path):
+         """Get the image id from image path in the VIM database.
+            Returns the image_id or raises a vimconnNotFoundException
+         """
+         raise vimconnNotImplemented("Should have implemented this")
+         
+     def get_image_list(self, filter_dict={}):
+         """Obtain tenant images from VIM
+         Filter_dict can be:
+             name: image name
+             id: image uuid
+             checksum: image checksum
+             location: image path
+         Returns the image list of dictionaries:
+             [{<the fields at Filter_dict plus some VIM specific>}, ...]
+             List can be empty
+         """
+         raise vimconnNotImplemented( "Should have implemented this" )
+     def new_vminstance(self, name, description, start, image_id, flavor_id, net_list, cloud_config=None, disk_list=None,
+         availability_zone_index=None, availability_zone_list=None):
+         """Adds a VM instance to VIM
+         Params:
+             'start': (boolean) indicates if VM must start or created in pause mode.
+             'image_id','flavor_id': image and flavor VIM id to use for the VM
+             'net_list': list of interfaces, each one is a dictionary with:
+                 'name': (optional) name for the interface.
+                 'net_id': VIM network id where this interface must be connect to. Mandatory for type==virtual
+                 'vpci': (optional) virtual vPCI address to assign at the VM. Can be ignored depending on VIM capabilities
+                 'model': (optional and only have sense for type==virtual) interface model: virtio, e1000, ...
+                 'mac_address': (optional) mac address to assign to this interface
+                 'ip_address': (optional) IP address to assign to this interface
+                 #TODO: CHECK if an optional 'vlan' parameter is needed for VIMs when type if VF and net_id is not provided,
+                     the VLAN tag to be used. In case net_id is provided, the internal network vlan is used for tagging VF
+                 'type': (mandatory) can be one of:
+                     'virtual', in this case always connected to a network of type 'net_type=bridge'
+                      'PCI-PASSTHROUGH' or 'PF' (passthrough): depending on VIM capabilities it can be connected to a data/ptp network ot it
+                            can created unconnected
+                      'SR-IOV' or 'VF' (SRIOV with VLAN tag): same as PF for network connectivity.
+                      'VFnotShared'(SRIOV without VLAN tag) same as PF for network connectivity. VF where no other VFs
+                             are allocated on the same physical NIC
+                 'bw': (optional) only for PF/VF/VFnotShared. Minimal Bandwidth required for the interface in GBPS
+                 'port_security': (optional) If False it must avoid any traffic filtering at this interface. If missing
+                                 or True, it must apply the default VIM behaviour
+                 After execution the method will add the key:
+                 'vim_id': must be filled/added by this method with the VIM identifier generated by the VIM for this
+                         interface. 'net_list' is modified
+             'cloud_config': (optional) dictionary with:
+                 'key-pairs': (optional) list of strings with the public key to be inserted to the default user
+                 'users': (optional) list of users to be inserted, each item is a dict with:
+                     'name': (mandatory) user name,
+                     'key-pairs': (optional) list of strings with the public key to be inserted to the user
+                 'user-data': (optional) can be a string with the text script to be passed directly to cloud-init,
+                     or a list of strings, each one contains a script to be passed, usually with a MIMEmultipart file
+                 'config-files': (optional). List of files to be transferred. Each item is a dict with:
+                     'dest': (mandatory) string with the destination absolute path
+                     'encoding': (optional, by default text). Can be one of:
+                         'b64', 'base64', 'gz', 'gz+b64', 'gz+base64', 'gzip+b64', 'gzip+base64'
+                     'content' (mandatory): string with the content of the file
+                     'permissions': (optional) string with file permissions, typically octal notation '0644'
+                     'owner': (optional) file owner, string with the format 'owner:group'
+                 'boot-data-drive': boolean to indicate if user-data must be passed using a boot drive (hard disk)
+             'disk_list': (optional) list with additional disks to the VM. Each item is a dict with:
+                 'image_id': (optional). VIM id of an existing image. If not provided an empty disk must be mounted
+                 'size': (mandatory) string with the size of the disk in GB
+             availability_zone_index: Index of availability_zone_list to use for this this VM. None if not AV required
+             availability_zone_list: list of availability zones given by user in the VNFD descriptor.  Ignore if
+                 availability_zone_index is None
+         Returns a tuple with the instance identifier and created_items or raises an exception on error
+             created_items can be None or a dictionary where this method can include key-values that will be passed to
+             the method delete_vminstance and action_vminstance. Can be used to store created ports, volumes, etc.
+             Format is vimconnector dependent, but do not use nested dictionaries and a value of None should be the same
+             as not present.
+         """
+         raise vimconnNotImplemented( "Should have implemented this" )
+         
+     def get_vminstance(self,vm_id):
+         """Returns the VM instance information from VIM"""
+         raise vimconnNotImplemented( "Should have implemented this" )
+         
+     def delete_vminstance(self, vm_id, created_items=None):
+         """
+         Removes a VM instance from VIM and its associated elements
+         :param vm_id: VIM identifier of the VM, provided by method new_vminstance
+         :param created_items: dictionary with extra items to be deleted. provided by method new_vminstance and/or method
+             action_vminstance
+         :return: None or the same vm_id. Raises an exception on fail
+         """
+         raise vimconnNotImplemented( "Should have implemented this" )
+     def refresh_vms_status(self, vm_list):
+         """Get the status of the virtual machines and their interfaces/ports
+            Params: the list of VM identifiers
+            Returns a dictionary with:
+                 vm_id:          #VIM id of this Virtual Machine
+                     status:     #Mandatory. Text with one of:
+                                 #  DELETED (not found at vim)
+                                 #  VIM_ERROR (Cannot connect to VIM, VIM response error, ...) 
+                                 #  OTHER (Vim reported other status not understood)
+                                 #  ERROR (VIM indicates an ERROR status)
+                                 #  ACTIVE, PAUSED, SUSPENDED, INACTIVE (not running), 
+                                 #  BUILD (on building process), ERROR
+                                 #  ACTIVE:NoMgmtIP (Active but any of its interface has an IP address
+                                 #
+                     error_msg:  #Text with VIM error message, if any. Or the VIM connection ERROR 
+                     vim_info:   #Text with plain information obtained from vim (yaml.safe_dump)
+                     interfaces: list with interface info. Each item a dictionary with:
+                         vim_info:         #Text with plain information obtained from vim (yaml.safe_dump)
+                         mac_address:      #Text format XX:XX:XX:XX:XX:XX
+                         vim_net_id:       #network id where this interface is connected, if provided at creation
+                         vim_interface_id: #interface/port VIM id
+                         ip_address:       #null, or text with IPv4, IPv6 address
+                         compute_node:     #identification of compute node where PF,VF interface is allocated
+                         pci:              #PCI address of the NIC that hosts the PF,VF
+                         vlan:             #physical VLAN used for VF
+         """
+         raise vimconnNotImplemented( "Should have implemented this" )
+     
+     def action_vminstance(self, vm_id, action_dict, created_items={}):
+         """
+         Send and action over a VM instance. Returns created_items if the action was successfully sent to the VIM.
+         created_items is a dictionary with items that
+         :param vm_id: VIM identifier of the VM, provided by method new_vminstance
+         :param action_dict: dictionary with the action to perform
+         :param created_items: provided by method new_vminstance is a dictionary with key-values that will be passed to
+             the method delete_vminstance. Can be used to store created ports, volumes, etc. Format is vimconnector
+             dependent, but do not use nested dictionaries and a value of None should be the same as not present. This
+             method can modify this value
+         :return: None, or a console dict
+         """
+         raise vimconnNotImplemented( "Should have implemented this" )
+     
+     def get_vminstance_console(self, vm_id, console_type="vnc"):
+         """
+         Get a console for the virtual machine
+         Params:
+             vm_id: uuid of the VM
+             console_type, can be:
+                 "novnc" (by default), "xvpvnc" for VNC types, 
+                 "rdp-html5" for RDP types, "spice-html5" for SPICE types
+         Returns dict with the console parameters:
+                 protocol: ssh, ftp, http, https, ...
+                 server:   usually ip address 
+                 port:     the http, ssh, ... port 
+                 suffix:   extra text, e.g. the http path and query string   
+         """
+         raise vimconnNotImplemented( "Should have implemented this" )
+     def inject_user_key(self, ip_addr=None, user=None, key=None, ro_key=None, password=None):
+         """
+         Inject a ssh public key in a VM
+         Params:
+             ip_addr: ip address of the VM
+             user: username (default-user) to enter in the VM
+             key: public key to be injected in the VM
+             ro_key: private key of the RO, used to enter in the VM if the password is not provided
+             password: password of the user to enter in the VM
+         The function doesn't return a value:
+         """
+         if not ip_addr or not user:
+             raise vimconnNotSupportedException("All parameters should be different from 'None'")
+         elif not ro_key and not password:
+             raise vimconnNotSupportedException("All parameters should be different from 'None'")
+         else:
+             commands = {'mkdir -p ~/.ssh/', 'echo "{}" >> ~/.ssh/authorized_keys'.format(key),
+                         'chmod 644 ~/.ssh/authorized_keys', 'chmod 700 ~/.ssh/'}
+             client = paramiko.SSHClient()
+             try:
+                 if ro_key:
+                     pkey = paramiko.RSAKey.from_private_key(StringIO(ro_key))
+                 else:
+                     pkey = None
+                 client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
+                 client.connect(ip_addr, username=user, password=password, pkey=pkey, timeout=10)
+                 for command in commands:
+                     (i, o, e) = client.exec_command(command, timeout=10)
+                     returncode = o.channel.recv_exit_status()
+                     output = o.read()
+                     outerror = e.read()
+                     if returncode != 0:
+                         text = "run_command='{}' Error='{}'".format(command, outerror)
+                         raise vimconnUnexpectedResponse("Cannot inject ssh key in VM: '{}'".format(text))
+                         return
+             except (socket.error, paramiko.AuthenticationException, paramiko.SSHException) as message:
+                 raise vimconnUnexpectedResponse(
+                     "Cannot inject ssh key in VM: '{}' - {}".format(ip_addr, str(message)))
+                 return
+ # Optional methods
+     def new_tenant(self,tenant_name,tenant_description):
+         """Adds a new tenant to VIM with this name and description, this is done using admin_url if provided
+         "tenant_name": string max lenght 64
+         "tenant_description": string max length 256
+         returns the tenant identifier or raise exception
+         """
+         raise vimconnNotImplemented( "Should have implemented this" )
+     def delete_tenant(self,tenant_id,):
+         """Delete a tenant from VIM
+         tenant_id: returned VIM tenant_id on "new_tenant"
+         Returns None on success. Raises and exception of failure. If tenant is not found raises vimconnNotFoundException
+         """
+         raise vimconnNotImplemented( "Should have implemented this" )
+     def get_tenant_list(self, filter_dict=None):
+         """Obtain tenants of VIM
+         filter_dict dictionary that can contain the following keys:
+             name: filter by tenant name
+             id: filter by tenant uuid/id
+             <other VIM specific>
+         Returns the tenant list of dictionaries, and empty list if no tenant match all the filers:
+             [{'name':'<name>, 'id':'<id>, ...}, ...]
+         """
+         raise vimconnNotImplemented( "Should have implemented this" )
+     def new_classification(self, name, ctype, definition):
+         """Creates a traffic classification in the VIM
+         Params:
+             'name': name of this classification
+             'ctype': type of this classification
+             'definition': definition of this classification (type-dependent free-form text)
+         Returns the VIM's classification ID on success or raises an exception on failure
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def get_classification(self, classification_id):
+         """Obtain classification details of the VIM's classification with ID='classification_id'
+         Return a dict that contains:
+             'id': VIM's classification ID (same as classification_id)
+             'name': VIM's classification name
+             'type': type of this classification
+             'definition': definition of the classification
+             'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible
+         Raises an exception upon error or when classification is not found
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def get_classification_list(self, filter_dict={}):
+         """Obtain classifications from the VIM
+         Params:
+             'filter_dict' (optional): contains the entries to filter the classifications on and only return those that match ALL:
+                 id:   string => returns classifications with this VIM's classification ID, which implies a return of one classification at most
+                 name: string => returns only classifications with this name
+                 type: string => returns classifications of this type
+                 definition: string => returns classifications that have this definition
+                 tenant_id: string => returns only classifications that belong to this tenant/project
+         Returns a list of classification dictionaries, each dictionary contains:
+             'id': (mandatory) VIM's classification ID
+             'name': (mandatory) VIM's classification name
+             'type': type of this classification
+             'definition': definition of the classification
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         List can be empty if no classification matches the filter_dict. Raise an exception only upon VIM connectivity,
+             authorization, or some other unspecific error
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def delete_classification(self, classification_id):
+         """Deletes a classification from the VIM
+         Returns the classification ID (classification_id) or raises an exception upon error or when classification is not found
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def new_sfi(self, name, ingress_ports, egress_ports, sfc_encap=True):
+         """Creates a service function instance in the VIM
+         Params:
+             'name': name of this service function instance
+             'ingress_ports': set of ingress ports (VIM's port IDs)
+             'egress_ports': set of egress ports (VIM's port IDs)
+             'sfc_encap': boolean stating whether this specific instance supports IETF SFC Encapsulation
+         Returns the VIM's service function instance ID on success or raises an exception on failure
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def get_sfi(self, sfi_id):
+         """Obtain service function instance details of the VIM's service function instance with ID='sfi_id'
+         Return a dict that contains:
+             'id': VIM's sfi ID (same as sfi_id)
+             'name': VIM's sfi name
+             'ingress_ports': set of ingress ports (VIM's port IDs)
+             'egress_ports': set of egress ports (VIM's port IDs)
+             'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible
+         Raises an exception upon error or when service function instance is not found
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def get_sfi_list(self, filter_dict={}):
+         """Obtain service function instances from the VIM
+         Params:
+             'filter_dict' (optional): contains the entries to filter the sfis on and only return those that match ALL:
+                 id:   string  => returns sfis with this VIM's sfi ID, which implies a return of one sfi at most
+                 name: string  => returns only service function instances with this name
+                 tenant_id: string => returns only service function instances that belong to this tenant/project
+         Returns a list of service function instance dictionaries, each dictionary contains:
+             'id': (mandatory) VIM's sfi ID
+             'name': (mandatory) VIM's sfi name
+             'ingress_ports': set of ingress ports (VIM's port IDs)
+             'egress_ports': set of egress ports (VIM's port IDs)
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         List can be empty if no sfi matches the filter_dict. Raise an exception only upon VIM connectivity,
+             authorization, or some other unspecific error
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def delete_sfi(self, sfi_id):
+         """Deletes a service function instance from the VIM
+         Returns the service function instance ID (sfi_id) or raises an exception upon error or when sfi is not found
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def new_sf(self, name, sfis, sfc_encap=True):
+         """Creates (an abstract) service function in the VIM
+         Params:
+             'name': name of this service function
+             'sfis': set of service function instances of this (abstract) service function
+             'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
+         Returns the VIM's service function ID on success or raises an exception on failure
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def get_sf(self, sf_id):
+         """Obtain service function details of the VIM's service function with ID='sf_id'
+         Return a dict that contains:
+             'id': VIM's sf ID (same as sf_id)
+             'name': VIM's sf name
+             'sfis': VIM's sf's set of VIM's service function instance IDs
+             'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
+             'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible
+         Raises an exception upon error or when sf is not found
+         """
+     def get_sf_list(self, filter_dict={}):
+         """Obtain service functions from the VIM
+         Params:
+             'filter_dict' (optional): contains the entries to filter the sfs on and only return those that match ALL:
+                 id:   string  => returns sfs with this VIM's sf ID, which implies a return of one sf at most
+                 name: string  => returns only service functions with this name
+                 tenant_id: string => returns only service functions that belong to this tenant/project
+         Returns a list of service function dictionaries, each dictionary contains:
+             'id': (mandatory) VIM's sf ID
+             'name': (mandatory) VIM's sf name
+             'sfis': VIM's sf's set of VIM's service function instance IDs
+             'sfc_encap': boolean stating whether this service function supports IETF SFC Encapsulation
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         List can be empty if no sf matches the filter_dict. Raise an exception only upon VIM connectivity,
+             authorization, or some other unspecific error
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def delete_sf(self, sf_id):
+         """Deletes (an abstract) service function from the VIM
+         Returns the service function ID (sf_id) or raises an exception upon error or when sf is not found
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def new_sfp(self, name, classifications, sfs, sfc_encap=True, spi=None):
+         """Creates a service function path
+         Params:
+             'name': name of this service function path
+             'classifications': set of traffic classifications that should be matched on to get into this sfp
+             'sfs': list of every service function that constitutes this path , from first to last
+             'sfc_encap': whether this is an SFC-Encapsulated chain (i.e using NSH), True by default
+             'spi': (optional) the Service Function Path identifier (SPI: Service Path Identifier) for this path
+         Returns the VIM's sfp ID on success or raises an exception on failure
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def get_sfp(self, sfp_id):
+         """Obtain service function path details of the VIM's sfp with ID='sfp_id'
+         Return a dict that contains:
+             'id': VIM's sfp ID (same as sfp_id)
+             'name': VIM's sfp name
+             'classifications': VIM's sfp's list of VIM's classification IDs
+             'sfs': VIM's sfp's list of VIM's service function IDs
+             'status': 'ACTIVE', 'INACTIVE', 'DOWN', 'BUILD', 'ERROR', 'VIM_ERROR', 'OTHER'
+             'error_msg': (optional) text that explains the ERROR status
+             other VIM specific fields: (optional) whenever possible
+         Raises an exception upon error or when sfp is not found
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def get_sfp_list(self, filter_dict={}):
+         """Obtain service function paths from VIM
+         Params:
+             'filter_dict' (optional): contains the entries to filter the sfps on, and only return those that match ALL:
+                 id:   string  => returns sfps with this VIM's sfp ID , which implies a return of one sfp at most
+                 name: string  => returns only sfps with this name
+                 tenant_id: string => returns only sfps that belong to this tenant/project
+         Returns a list of service function path dictionaries, each dictionary contains:
+             'id': (mandatory) VIM's sfp ID
+             'name': (mandatory) VIM's sfp name
+             'classifications': VIM's sfp's list of VIM's classification IDs
+             'sfs': VIM's sfp's list of VIM's service function IDs
+             other VIM specific fields: (optional) whenever possible using the same naming of filter_dict param
+         List can be empty if no sfp matches the filter_dict. Raise an exception only upon VIM connectivity,
+             authorization, or some other unspecific error
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+     def delete_sfp(self, sfp_id):
+         """Deletes a service function path from the VIM
+         Returns the sfp ID (sfp_id) or raises an exception upon error or when sf is not found
+         """
+         raise vimconnNotImplemented( "SFC support not implemented" )
+ # NOT USED METHODS in current version. Deprecated
+     @deprecated
+     def host_vim2gui(self, host, server_dict):
+         """Transform host dictionary from VIM format to GUI format,
+         and append to the server_dict
+         """
+         raise vimconnNotImplemented( "Should have implemented this" )
+     @deprecated
+     def get_hosts_info(self):
+         """Get the information of deployed hosts
+         Returns the hosts content"""
+         raise vimconnNotImplemented( "Should have implemented this" )
+     @deprecated
+     def get_hosts(self, vim_tenant):
+         """Get the hosts and deployed instances
+         Returns the hosts content"""
+         raise vimconnNotImplemented( "Should have implemented this" )
+     @deprecated
+     def get_processor_rankings(self):
+         """Get the processor rankings in the VIM database"""
+         raise vimconnNotImplemented( "Should have implemented this" )
+     
+     @deprecated
+     def new_host(self, host_data):
+         """Adds a new host to VIM"""
+         """Returns status code of the VIM response"""
+         raise vimconnNotImplemented( "Should have implemented this" )
+     
+     @deprecated
+     def new_external_port(self, port_data):
+         """Adds a external port to VIM"""
+         """Returns the port identifier"""
+         raise vimconnNotImplemented( "Should have implemented this" )
+         
+     @deprecated
+     def new_external_network(self,net_name,net_type):
+         """Adds a external network to VIM (shared)"""
+         """Returns the network identifier"""
+         raise vimconnNotImplemented( "Should have implemented this" )
+     @deprecated
+     @deprecated
+     def connect_port_network(self, port_id, network_id, admin=False):
+         """Connects a external port to a network"""
+         """Returns status code of the VIM response"""
+         raise vimconnNotImplemented( "Should have implemented this" )
+     @deprecated
+     def new_vminstancefromJSON(self, vm_data):
+         """Adds a VM instance to VIM"""
+         """Returns the instance identifier"""
+         raise vimconnNotImplemented( "Should have implemented this" )
diff --combined RO/test/test_RO.py
index 0000000,5d17087..030dfe0
mode 000000,100755..100755
--- /dev/null
@@@ -1,0 -1,2549 +1,2690 @@@
 -        elif test_config['vimtype'] == 'openstack':
+ #!/usr/bin/env python2
+ # -*- coding: utf-8 -*-
+ ##
+ # Copyright 2017
+ # This file is part of openmano
+ # All Rights Reserved.
+ #
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ ##
++# DEBUG WITH PDB
++from os import getenv
++if getenv('OSMRO_PDB_DEBUG'):
++    import sys
++    print(sys.path)
++    import pdb
++    pdb.set_trace()
++
++
+ """
+ Module for testing openmano functionality. It uses openmanoclient.py for invoking openmano
+ """
+ import logging
+ import os
+ import argcomplete
+ import unittest
+ import string
+ import inspect
+ import random
+ # import traceback
+ import glob
+ import yaml
+ import sys
+ import time
+ import uuid
+ from argparse import ArgumentParser
+ __author__ = "Pablo Montes, Alfonso Tierno"
+ __date__ = "$16-Feb-2017 17:08:16$"
+ __version__ = "0.1.0"
+ version_date = "Oct 2017"
+ test_config = {}    # used for global variables with the test configuration
+ class test_base(unittest.TestCase):
+     test_index = 1
+     test_text = None
+     @classmethod
+     def setUpClass(cls):
+         logger.info("{}. {}".format(test_config["test_number"], cls.__name__))
+     @classmethod
+     def tearDownClass(cls):
+         test_config["test_number"] += 1
+     def tearDown(self):
+         exec_info = sys.exc_info()
+         if exec_info == (None, None, None):
+             logger.info(self.__class__.test_text+" -> TEST OK")
+         else:
+             logger.warning(self.__class__.test_text+" -> TEST NOK")
+             logger.critical("Traceback error",exc_info=True)
+ def check_instance_scenario_active(uuid):
+     instance = test_config["client"].get_instance(uuid=uuid)
+     for net in instance['nets']:
+         status = net['status']
+         if status != 'ACTIVE':
+             return (False, status)
+     for vnf in instance['vnfs']:
+         for vm in vnf['vms']:
+             status = vm['status']
+             if status != 'ACTIVE':
+                 return (False, status)
+     return (True, None)
+ '''
+ IMPORTANT NOTE
+ All unittest classes for code based tests must have prefix 'test_' in order to be taken into account for tests
+ '''
+ class test_VIM_datacenter_tenant_operations(test_base):
+     tenant_name = None
+     def test_000_create_RO_tenant(self):
+         self.__class__.tenant_name = _get_random_string(20)
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
++        logger.debug("Test create tenant")
+         tenant = test_config["client"].create_tenant(name=self.__class__.tenant_name,
+                                                      description=self.__class__.tenant_name)
+         logger.debug("{}".format(tenant))
+         self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.tenant_name)
+     def test_010_list_RO_tenant(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         tenant = test_config["client"].get_tenant(name=self.__class__.tenant_name)
+         logger.debug("{}".format(tenant))
+         self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.tenant_name)
+     def test_020_delete_RO_tenant(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         tenant = test_config["client"].delete_tenant(name=self.__class__.tenant_name)
+         logger.debug("{}".format(tenant))
+         assert('deleted' in tenant.get('result',""))
+ class test_VIM_datacenter_operations(test_base):
+     datacenter_name = None
+     def test_000_create_datacenter(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.datacenter_name = _get_random_string(20)
+         self.__class__.test_index += 1
+         self.datacenter = test_config["client"].create_datacenter(name=self.__class__.datacenter_name,
+                                                                   vim_url="http://fakeurl/fake")
+         logger.debug("{}".format(self.datacenter))
+         self.assertEqual (self.datacenter.get('datacenter', {}).get('name',''), self.__class__.datacenter_name)
+     def test_010_list_datacenter(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         self.datacenter = test_config["client"].get_datacenter(all_tenants=True, name=self.__class__.datacenter_name)
+         logger.debug("{}".format(self.datacenter))
+         self.assertEqual (self.datacenter.get('datacenter', {}).get('name', ''), self.__class__.datacenter_name)
+     def test_020_attach_datacenter(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         self.datacenter = test_config["client"].attach_datacenter(name=self.__class__.datacenter_name,
+                                                                   vim_tenant_name='fake')
+         logger.debug("{}".format(self.datacenter))
+         assert ('uuid' in self.datacenter.get('datacenter', {}))
+     def test_030_list_attached_datacenter(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         self.datacenter = test_config["client"].get_datacenter(all_tenants=False, name=self.__class__.datacenter_name)
+         logger.debug("{}".format(self.datacenter))
+         self.assertEqual (self.datacenter.get('datacenter', {}).get('name', ''), self.__class__.datacenter_name)
+     def test_040_detach_datacenter(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         self.datacenter = test_config["client"].detach_datacenter(name=self.__class__.datacenter_name)
+         logger.debug("{}".format(self.datacenter))
+         assert ('detached' in self.datacenter.get('result', ""))
+     def test_050_delete_datacenter(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         self.datacenter = test_config["client"].delete_datacenter(name=self.__class__.datacenter_name)
+         logger.debug("{}".format(self.datacenter))
+         assert('deleted' in self.datacenter.get('result',""))
+ class test_VIM_network_operations(test_base):
+     vim_network_name = None
+     vim_network_uuid = None
+     def test_000_create_VIM_network(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.vim_network_name = _get_random_string(20)
+         self.__class__.test_index += 1
+         network = test_config["client"].vim_action("create", "networks", name=self.__class__.vim_network_name)
+         logger.debug("{}".format(network))
+         self.__class__.vim_network_uuid = network["network"]["id"]
+         self.assertEqual(network.get('network', {}).get('name', ''), self.__class__.vim_network_name)
+     def test_010_list_VIM_networks(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         networks = test_config["client"].vim_action("list", "networks")
+         logger.debug("{}".format(networks))
+     def test_020_get_VIM_network_by_uuid(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         network = test_config["client"].vim_action("show", "networks", uuid=self.__class__.vim_network_uuid)
+         logger.debug("{}".format(network))
+         self.assertEqual(network.get('network', {}).get('name', ''), self.__class__.vim_network_name)
+     def test_030_delete_VIM_network_by_uuid(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         network = test_config["client"].vim_action("delete", "networks", uuid=self.__class__.vim_network_uuid)
+         logger.debug("{}".format(network))
+         assert ('deleted' in network.get('result', ""))
+ class test_VIM_image_operations(test_base):
+     def test_000_list_VIM_images(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         images = test_config["client"].vim_action("list", "images")
+         logger.debug("{}".format(images))
+ '''
+ The following is a non critical test that will fail most of the times.
+ In case of OpenStack datacenter these tests will only success if RO has access to the admin endpoint
+ This test will only be executed in case it is specifically requested by the user
+ '''
+ class test_VIM_tenant_operations(test_base):
+     vim_tenant_name = None
+     vim_tenant_uuid = None
+     @classmethod
+     def setUpClass(cls):
+         test_base.setUpClass(cls)
+         logger.warning("In case of OpenStack datacenter these tests will only success "
+                        "if RO has access to the admin endpoint")
+     def test_000_create_VIM_tenant(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.vim_tenant_name = _get_random_string(20)
+         self.__class__.test_index += 1
+         tenant = test_config["client"].vim_action("create", "tenants", name=self.__class__.vim_tenant_name)
+         logger.debug("{}".format(tenant))
+         self.__class__.vim_tenant_uuid = tenant["tenant"]["id"]
+         self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.vim_tenant_name)
+     def test_010_list_VIM_tenants(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         tenants = test_config["client"].vim_action("list", "tenants")
+         logger.debug("{}".format(tenants))
+     def test_020_get_VIM_tenant_by_uuid(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         tenant = test_config["client"].vim_action("show", "tenants", uuid=self.__class__.vim_tenant_uuid)
+         logger.debug("{}".format(tenant))
+         self.assertEqual(tenant.get('tenant', {}).get('name', ''), self.__class__.vim_tenant_name)
+     def test_030_delete_VIM_tenant_by_uuid(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         tenant = test_config["client"].vim_action("delete", "tenants", uuid=self.__class__.vim_tenant_uuid)
+         logger.debug("{}".format(tenant))
+         assert ('deleted' in tenant.get('result', ""))
+ class test_vimconn_connect(test_base):
+     def test_000_connect(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         if test_config['vimtype'] == 'vmware':
+             vca_object = test_config["vim_conn"].connect()
+             logger.debug("{}".format(vca_object))
+             self.assertIsNotNone(vca_object)
 -        logger.debug("{}".format(network))
++        elif test_config['vimtype'] in ('openstack', 'azure'):
+             test_config["vim_conn"]._reload_connection()
+             network_list = test_config["vim_conn"].get_network_list()
+             logger.debug("{}".format(network_list))
+             self.assertIsNotNone(network_list)
++
+ class test_vimconn_new_network(test_base):
+     network_name = None
+     def test_000_new_network(self):
+         self.__class__.network_name = _get_random_string(20)
+         network_type = 'bridge'
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                      self.__class__.test_index, inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                           net_type=network_type)
+         self.__class__.network_id = network
 -            internal_connections_list = vnf_descriptor['vnf']['internal-connections']
++        logger.debug("Created network {}".format(network))
+         network_list = test_config["vim_conn"].get_network_list()
++        logger.debug("Network list {}".format(network_list))
+         for net in network_list:
+             if self.__class__.network_name in net.get('name'):
+                 self.assertIn(self.__class__.network_name, net.get('name'))
+                 self.assertEqual(net.get('type'), network_type)
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.__class__.network_id))
++        network_list = test_config["vim_conn"].get_network_list()
++        logger.debug("Network list after deletion {}".format(network_list))
++
+     def test_010_new_network_by_types(self):
+         delete_net_ids = []
+         network_types = ['data','bridge','mgmt']
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
++        network_list = test_config["vim_conn"].get_network_list()
++        logger.debug("Network list at start {}".format(network_list))
+         self.__class__.test_index += 1
+         for net_type in network_types:
+             self.__class__.network_name = _get_random_string(20)
+             network_id, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                                                 net_type=net_type)
+             delete_net_ids.append(network_id)
+             logger.debug("{}".format(network_id))
+             network_list = test_config["vim_conn"].get_network_list()
+             for net in network_list:
+                 if self.__class__.network_name in net.get('name'):
+                     self.assertIn(self.__class__.network_name, net.get('name'))
+                 if net_type in net.get('type'):
+                     self.assertEqual(net.get('type'), net_type)
+                 else:
+                     self.assertNotEqual(net.get('type'), net_type)
+         # Deleting created network
+         for net_id in delete_net_ids:
+             result = test_config["vim_conn"].delete_network(net_id)
+             if result:
+                 logger.info("Network id {} sucessfully deleted".format(net_id))
+             else:
+                 logger.info("Failed to delete network id {}".format(net_id))
++        network_list = test_config["vim_conn"].get_network_list()
++        logger.debug("Network list after test {}".format(network_list))
+     def test_020_new_network_by_ipprofile(self):
+         test_directory_content = os.listdir(test_config["test_directory"])
+         for dir_name in test_directory_content:
+             if dir_name == 'simple_multi_vnfc':
+                 self.__class__.scenario_test_path = test_config["test_directory"] + '/'+ dir_name
+                 vnfd_files = glob.glob(self.__class__.scenario_test_path+'/vnfd_*.yaml')
+                 break
+         for vnfd in vnfd_files:
+             with open(vnfd, 'r') as stream:
+                 vnf_descriptor = yaml.load(stream, Loader=yaml.Loader)
 -                if 'ip-profile' in item:
 -                    version = item['ip-profile']['ip-version']
 -                    dhcp_count = item['ip-profile']['dhcp']['count']
 -                    dhcp_enabled = item['ip-profile']['dhcp']['enabled']
 -                    dhcp_start_address = item['ip-profile']['dhcp']['start-address']
 -                    subnet_address = item['ip-profile']['subnet-address']
 -
++            #internal_connections_list = vnf_descriptor['vnf']['internal-connections']
++            internal_connections_list = vnf_descriptor['vnfd-catalog']['vnfd'][0]['ip-profiles']
+             for item in internal_connections_list:
 -        if test_config['vimtype'] == 'openstack':
++                version = item['ip-version']
++                dhcp_count = item['dhcp-params']['count']
++                dhcp_enabled = item['dhcp-params']['enabled']
++                dhcp_start_address = item['dhcp-params']['start-address']
++                subnet_address = item['subnet-address']
+         self.__class__.network_name = _get_random_string(20)
+         ip_profile = {'dhcp_count': dhcp_count,
+                       'dhcp_enabled': dhcp_enabled,
+                       'dhcp_start_address': dhcp_start_address,
+                       'ip_version': version,
+                       'subnet_address': subnet_address
+                      }
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                                            net_type='mgmt',
+                                                                      ip_profile=ip_profile)
+         self.__class__.network_id = network
+         logger.debug("{}".format(network))
+         network_list = test_config["vim_conn"].get_network_list()
++        logger.debug("Created network by ip_profile {}".format(network_list))
+         for net in network_list:
+             if self.__class__.network_name in net.get('name'):
+                 self.assertIn(self.__class__.network_name, net.get('name'))
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+     def test_030_new_network_by_isshared(self):
+         self.__class__.network_name = _get_random_string(20)
+         shared = True
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                                          net_type='bridge',
+                                                                              shared=shared)
+         self.__class__.network_id = network
+         logger.debug("{}".format(network))
+         network_list = test_config["vim_conn"].get_network_list()
+         for net in network_list:
+             if self.__class__.network_name in net.get('name'):
+                 self.assertIn(self.__class__.network_name, net.get('name'))
+                 self.assertEqual(net.get('shared'), shared)
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+     def test_040_new_network_by_negative(self):
+         self.__class__.network_name = _get_random_string(20)
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                                     net_type='unknowntype')
+         self.__class__.network_id = network
+         logger.debug("{}".format(network))
+         network_list = test_config["vim_conn"].get_network_list()
+         for net in network_list:
+             if self.__class__.network_name in net.get('name'):
+                 self.assertIn(self.__class__.network_name, net.get('name'))
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+     def test_050_refresh_nets_status(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         # creating new network
+         network_name = _get_random_string(20)
+         net_type = 'bridge'
+         network_id, _ = test_config["vim_conn"].new_network(net_name=network_name,
+                                                           net_type=net_type)
+         # refresh net status
+         net_dict = test_config["vim_conn"].refresh_nets_status([network_id])
+         for attr in net_dict[network_id]:
+             if attr == 'status':
+                 self.assertEqual(net_dict[network_id][attr], 'ACTIVE')
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(network_id))
+     def test_060_refresh_nets_status_negative(self):
+         unknown_net_id = str(uuid.uuid4())
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         # refresh net status
++        # if azure network name must have the following format
++        if test_config['vimtype'] == 'azure':
++            unknown_net_id = "/" + "/".join(["subscriptions", test_config["vim_conn"].subscription_id,
++                                      "resourceGroups", test_config["vim_conn"].resource_group,
++                                      "providers", "Microsoft.Network",
++                                      "virtualNetworks", test_config["vim_conn"].vnet_name,
++                                      "subnets", unknown_net_id])
++        #unknown_net_id = "/subscriptions/ca3d18ab-d373-4afb-a5d6-7c44f098d16a/resourceGroups/osmRG/providers/Microsoft.Network/virtualNetworks/osm_vnet/subnets/unnkown_net"
++
+         net_dict = test_config["vim_conn"].refresh_nets_status([unknown_net_id])
 -                self.assertEqual(net.get('shared'), False)
++        if test_config['vimtype'] in ('openstack', 'azure'):
+             self.assertEqual(net_dict[unknown_net_id]['status'], 'DELETED')
+         else:
+             # TODO : Fix vmware connector to return status DELETED as per vimconn.py
+             self.assertEqual(net_dict, {})
+ class test_vimconn_get_network_list(test_base):
+     network_name = None
+     def setUp(self):
+         # creating new network
+         self.__class__.network_name = _get_random_string(20)
+         self.__class__.net_type = 'bridge'
+         network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                           net_type=self.__class__.net_type)
+         self.__class__.network_id = network
+         logger.debug("{}".format(network))
+     def tearDown(self):
+         test_base.tearDown(self)
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+     def test_000_get_network_list(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         network_list = test_config["vim_conn"].get_network_list()
+         for net in network_list:
+             if self.__class__.network_name in net.get('name'):
+                 self.assertIn(self.__class__.network_name, net.get('name'))
+                 self.assertEqual(net.get('type'), self.__class__.net_type)
+                 self.assertEqual(net.get('status'), 'ACTIVE')
 -        if test_config['vimtype'] == 'openstack':
++                if test_config['vimtype'] != 'azure':
++                    self.assertEqual(net.get('shared'), False)
+     def test_010_get_network_list_by_name(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -        if test_config['vimtype'] == 'openstack':
++        if test_config['vimtype'] in ('openstack', 'azure'):
+             network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+         else:
+             network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+         # find network from list by it's name
+         new_network_list = test_config["vim_conn"].get_network_list({'name': network_name})
+         for list_item in new_network_list:
+             if self.__class__.network_name in list_item.get('name'):
+                 self.assertEqual(network_name, list_item.get('name'))
+                 self.assertEqual(list_item.get('type'), self.__class__.net_type)
+                 self.assertEqual(list_item.get('status'), 'ACTIVE')
+     def test_020_get_network_list_by_id(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         # find network from list by it's id
+         new_network_list = test_config["vim_conn"].get_network_list({'id':self.__class__.network_id})
+         for list_item in new_network_list:
+             if self.__class__.network_id in list_item.get('id'):
+                 self.assertEqual(self.__class__.network_id, list_item.get('id'))
+                 self.assertEqual(list_item.get('type'), self.__class__.net_type)
+                 self.assertEqual(list_item.get('status'), 'ACTIVE')
+     def test_030_get_network_list_by_shared(self):
+         Shared = False
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -        if test_config['vimtype'] == 'openstack':
++        if test_config['vimtype'] in ('openstack', 'azure'):
+             network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+         else:
+             network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+         # find network from list by it's shared value
+         new_network_list = test_config["vim_conn"].get_network_list({'shared':Shared,
+                                                                 'name':network_name})
+         for list_item in new_network_list:
+             if list_item.get('shared') == Shared:
+                 self.assertEqual(list_item.get('shared'), Shared)
+                 self.assertEqual(list_item.get('type'), self.__class__.net_type)
+                 self.assertEqual(network_name, list_item.get('name'))
+     def test_040_get_network_list_by_tenant_id(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         tenant_list = test_config["vim_conn"].get_tenant_list()
 -        if test_config['vimtype'] == 'openstack':
++        if test_config['vimtype'] in ('openstack', 'azure'):
+             network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+         else:
+             network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+         for tenant_item in tenant_list:
+             if test_config['tenant'] == tenant_item.get('name'):
+                 # find network from list by it's tenant id
+                 tenant_id = tenant_item.get('id')
+                 new_network_list = test_config["vim_conn"].get_network_list({'tenant_id':tenant_id,
+                                                                               'name':network_name})
+                 for list_item in new_network_list:
+                     self.assertEqual(tenant_id, list_item.get('tenant_id'))
+                     self.assertEqual(network_name, list_item.get('name'))
+                     self.assertEqual(list_item.get('type'), self.__class__.net_type)
+                     self.assertEqual(list_item.get('status'), 'ACTIVE')
+     def test_050_get_network_list_by_status(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         status = 'ACTIVE'
 -        flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vpcus': 1, 'disk': 10}
++        if test_config['vimtype'] in ('openstack', 'azure'):
+             network_name = test_config['vim_conn'].get_network(self.__class__.network_id)['name']
+         else:
+             network_name = test_config['vim_conn'].get_network_name_by_id(self.__class__.network_id)
+         # find network from list by it's status
+         new_network_list = test_config["vim_conn"].get_network_list({'status':status,
+                                                                'name': network_name})
+         for list_item in new_network_list:
+             self.assertIn(self.__class__.network_name, list_item.get('name'))
+             self.assertEqual(list_item.get('type'), self.__class__.net_type)
+             self.assertEqual(list_item.get('status'), status)
+     def test_060_get_network_list_by_negative(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         network_list = test_config["vim_conn"].get_network_list({'name': 'unknown_name'})
+         self.assertEqual(network_list, [])
+ class test_vimconn_get_network(test_base):
+     network_name = None
+     def setUp(self):
+         # creating new network
+         self.__class__.network_name = _get_random_string(20)
+         self.__class__.net_type = 'bridge'
+         network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                           net_type=self.__class__.net_type)
+         self.__class__.network_id = network
+         logger.debug("{}".format(network))
+     def tearDown(self):
+         test_base.tearDown(self)
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+     def test_000_get_network(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         network_info = test_config["vim_conn"].get_network(self.__class__.network_id)
+         self.assertEqual(network_info.get('status'), 'ACTIVE')
+         self.assertIn(self.__class__.network_name, network_info.get('name'))
+         self.assertEqual(network_info.get('type'), self.__class__.net_type)
+         self.assertEqual(network_info.get('id'), self.__class__.network_id)
+     def test_010_get_network_negative(self):
+         Non_exist_id = str(uuid.uuid4())
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         with self.assertRaises(Exception) as context:
+             test_config["vim_conn"].get_network(Non_exist_id)
+         self.assertEqual((context.exception).http_code, 404)
+ class test_vimconn_delete_network(test_base):
+     network_name = None
+     def test_000_delete_network(self):
+         # Creating network
+         self.__class__.network_name = _get_random_string(20)
+         self.__class__.net_type = 'bridge'
+         network, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                           net_type=self.__class__.net_type)
+         self.__class__.network_id = network
+         logger.debug("{}".format(network))
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+         time.sleep(5)
+         # after deleting network we check in network list
+         network_list = test_config["vim_conn"].get_network_list({ 'id':self.__class__.network_id })
+         self.assertEqual(network_list, [])
+     def test_010_delete_network_negative(self):
+         Non_exist_id = str(uuid.uuid4())
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         with self.assertRaises(Exception) as context:
+             test_config["vim_conn"].delete_network(Non_exist_id)
+         self.assertEqual((context.exception).http_code, 404)
+ class test_vimconn_get_flavor(test_base):
+     def test_000_get_flavor(self):
+         test_directory_content = os.listdir(test_config["test_directory"])
+         for dir_name in test_directory_content:
+             if dir_name == 'simple_linux':
+                 self.__class__.scenario_test_path = test_config["test_directory"] + '/'+ dir_name
+                 vnfd_files = glob.glob(self.__class__.scenario_test_path+'/vnfd_*.yaml')
+                 break
+         for vnfd in vnfd_files:
+             with open(vnfd, 'r') as stream:
+                 vnf_descriptor = yaml.load(stream, Loader=yaml.Loader)
+             vnfc_list = vnf_descriptor['vnf']['VNFC']
+             for item in vnfc_list:
+                 if 'ram' in item and 'vcpus' in item and 'disk' in item:
+                     ram = item['ram']
+                     vcpus = item['vcpus']
+                     disk = item['disk']
+         flavor_data = {
+                       'name' : _get_random_string(20),
+                       'ram': ram,
+                       'vcpus': vcpus,
+                       'disk': disk
+                     }
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         # get flavor by id
+         result = test_config["vim_conn"].get_flavor(flavor_id)
+         self.assertEqual(ram, result['ram'])
+         self.assertEqual(vcpus, result['vcpus'])
+         self.assertEqual(disk, result['disk'])
+         # delete flavor
+         result = test_config["vim_conn"].delete_flavor(flavor_id)
+         if result:
+             logger.info("Flavor id {} sucessfully deleted".format(result))
+         else:
+             logger.info("Failed to delete flavor id {}".format(result))
+     def test_010_get_flavor_negative(self):
+         Non_exist_flavor_id = str(uuid.uuid4())
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         with self.assertRaises(Exception) as context:
+             test_config["vim_conn"].get_flavor(Non_exist_flavor_id)
+         self.assertEqual((context.exception).http_code, 404)
+ class test_vimconn_new_flavor(test_base):
+     flavor_id = None
+     def test_000_new_flavor(self):
 -        # create new flavor
 -        self.__class__.flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
 -        self.assertIsInstance(self.__class__.flavor_id, (str, unicode))
 -        self.assertIsInstance(uuid.UUID(self.__class__.flavor_id), uuid.UUID)
++        flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -        result = test_config["vim_conn"].delete_flavor(self.__class__.flavor_id)
 -        if result:
 -            logger.info("Flavor id {} sucessfully deleted".format(result))
++        if test_config['vimtype'] == 'azure':
++            with self.assertRaises(Exception) as context:
++                test_config["vim_conn"].new_flavor(flavor_data)
++
++            self.assertEqual((context.exception).http_code, 401)
++        else:
++            # create new flavor
++            self.__class__.flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
++            self.assertIsInstance(self.__class__.flavor_id, (str, unicode))
++            self.assertIsInstance(uuid.UUID(self.__class__.flavor_id), uuid.UUID)
+     def test_010_delete_flavor(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         # delete flavor
 -            logger.error("Failed to delete flavor id {}".format(result))
 -            raise Exception ("Failed to delete created flavor")
++        if test_config['vimtype'] == 'azure':
++            with self.assertRaises(Exception) as context:
++                test_config["vim_conn"].delete_flavor(self.__class__.flavor_id)
++
++            self.assertEqual((context.exception).http_code, 401)
+         else:
 -
 -        self.assertEqual((context.exception).http_code, 400)
++            result = test_config["vim_conn"].delete_flavor(self.__class__.flavor_id)
++            if result:
++                logger.info("Flavor id {} sucessfully deleted".format(result))
++            else:
++                logger.error("Failed to delete flavor id {}".format(result))
++                raise Exception ("Failed to delete created flavor")
+     def test_020_new_flavor_negative(self):
+         Invalid_flavor_data = {'ram': '1024', 'vcpus': 2.0, 'disk': 2.0}
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         with self.assertRaises(Exception) as context:
+             test_config["vim_conn"].new_flavor(Invalid_flavor_data)
 -        self.assertEqual((context.exception).http_code, 404)
++        if test_config['vimtype'] != 'azure':
++            self.assertEqual((context.exception).http_code, 400)
++        else:
++            self.assertEqual((context.exception).http_code, 401)
+     def test_030_delete_flavor_negative(self):
+         Non_exist_flavor_id = str(uuid.uuid4())
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         with self.assertRaises(Exception) as context:
+             test_config["vim_conn"].delete_flavor(Non_exist_flavor_id)
 -        image_list = test_config["vim_conn"].get_image_list()
++        if test_config['vimtype'] != 'azure':
++            self.assertEqual((context.exception).http_code, 404)
++        else:
++            self.assertEqual((context.exception).http_code, 401)
+ # class test_vimconn_new_image(test_base):
+ #
+ #     def test_000_new_image(self):
+ #         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ #                                                             self.__class__.test_index,
+ #                                                 inspect.currentframe().f_code.co_name)
+ #         self.__class__.test_index += 1
+ #
+ #         image_path = test_config['image_path']
+ #         if image_path:
+ #             self.__class__.image_id = test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : image_path, 'metadata': {'upload_location':None} })
+ #             time.sleep(20)
+ #
+ #             self.assertIsInstance(self.__class__.image_id, (str, unicode))
+ #             self.assertIsInstance(uuid.UUID(self.__class__.image_id), uuid.UUID)
+ #         else:
+ #             self.skipTest("Skipping test as image file not present at RO container")
+ #
+ #     def test_010_new_image_negative(self):
+ #         Non_exist_image_path = '/temp1/cirros.ovf'
+ #
+ #         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ #                                                             self.__class__.test_index,
+ #                                                 inspect.currentframe().f_code.co_name)
+ #         self.__class__.test_index += 1
+ #
+ #         with self.assertRaises(Exception) as context:
+ #             test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path})
+ #
+ #         self.assertEqual((context.exception).http_code, 400)
+ #
+ #     def test_020_delete_image(self):
+ #         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ #                                                             self.__class__.test_index,
+ #                                                 inspect.currentframe().f_code.co_name)
+ #         self.__class__.test_index += 1
+ #
+ #         image_id = test_config["vim_conn"].delete_image(self.__class__.image_id)
+ #
+ #         self.assertIsInstance(image_id, (str, unicode))
+ #
+ #     def test_030_delete_image_negative(self):
+ #         Non_exist_image_id = str(uuid.uuid4())
+ #
+ #         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ #                                                             self.__class__.test_index,
+ #                                                 inspect.currentframe().f_code.co_name)
+ #         self.__class__.test_index += 1
+ #
+ #         with self.assertRaises(Exception) as context:
+ #             test_config["vim_conn"].delete_image(Non_exist_image_id)
+ #
+ #         self.assertEqual((context.exception).http_code, 404)
+ # class test_vimconn_get_image_id_from_path(test_base):
+ #
+ #     def test_000_get_image_id_from_path(self):
+ #         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ #                                                             self.__class__.test_index,
+ #                                                 inspect.currentframe().f_code.co_name)
+ #         self.__class__.test_index += 1
+ #
+ #         image_path = test_config['image_path']
+ #         if image_path:
+ #             image_id = test_config["vim_conn"].get_image_id_from_path( image_path )
+ #             self.assertEqual(type(image_id),str)
+ #         else:
+ #             self.skipTest("Skipping test as image file not present at RO container")
+ #
+ #     def test_010_get_image_id_from_path_negative(self):
+ #         Non_exist_image_path = '/temp1/cirros.ovf'
+ #
+ #         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+ #                                                             self.__class__.test_index,
+ #                                                 inspect.currentframe().f_code.co_name)
+ #         self.__class__.test_index += 1
+ #
+ #         with self.assertRaises(Exception) as context:
+ #             test_config["vim_conn"].new_image({ 'name': 'TestImage', 'location' : Non_exist_image_path })
+ #
+ #         self.assertEqual((context.exception).http_code, 400)
+ class test_vimconn_get_image_list(test_base):
+     image_name = None
+     image_id = None
+     def test_000_get_image_list(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -        for item in image_list:
 -            if 'name' in item:
 -                self.__class__.image_name = item['name']
 -                self.__class__.image_id = item['id']
 -                self.assertIsInstance(self.__class__.image_name, (str, unicode))
 -                self.assertIsInstance(self.__class__.image_id, (str, unicode))
 -            self.assertEqual(item['id'], self.__class__.image_id)
++        if test_config['vimtype'] != 'azure':
++            image_list = test_config["vim_conn"].get_image_list()
++            logger.debug("{}: Result image list: {}".format(self.__class__.test_text, image_list))
++
++            for item in image_list:
++                if 'name' in item:
++                    self.__class__.image_name = item['name']
++                    self.__class__.image_id = item['id']
++                    self.assertIsInstance(self.__class__.image_name, (str, unicode))
++                    self.assertIsInstance(self.__class__.image_id, (str, unicode))
++        else:
++            with self.assertRaises(Exception) as context:
++                image_list = test_config["vim_conn"].get_image_list()
++                self.assertEqual((context.exception).http_code, 401)
++                logger.debug(self.__class__.test_text + "Exception unauthorized: " + str(context.exception))
+     def test_010_get_image_list_by_name(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
++        self.__class__.image_name = test_config['image_name']
++        logger.debug("{}: Image name: {}".format(self.__class__.test_text, self.__class__.image_name))
+         image_list = test_config["vim_conn"].get_image_list({'name': self.__class__.image_name})
++        logger.debug("{}: Result image list: {}".format(self.__class__.test_text, image_list))
+         for item in image_list:
+             self.assertIsInstance(item['id'], (str, unicode))
+             self.assertIsInstance(item['name'], (str, unicode))
 -        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
++            #self.assertEqual(item['id'], self.__class__.image_id)
+             self.assertEqual(item['name'], self.__class__.image_name)
+     def test_020_get_image_list_by_id(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         filter_image_list = test_config["vim_conn"].get_image_list({'id': self.__class__.image_id})
+         for item1 in filter_image_list:
+             self.assertIsInstance(item1['id'], (str, unicode))
+             self.assertIsInstance(item1['name'], (str, unicode))
+             self.assertEqual(item1['id'], self.__class__.image_id)
+             self.assertEqual(item1['name'], self.__class__.image_name)
+     def test_030_get_image_list_negative(self):
+         Non_exist_image_id = uuid.uuid4()
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         image_list = test_config["vim_conn"].get_image_list({'name': 'Unknown_name', 'id': Non_exist_image_id})
+         self.assertIsNotNone(image_list, None)
+         self.assertEqual(image_list, [])
+ class test_vimconn_new_vminstance(test_base):
+     network_name = None
+     net_type = None
+     network_id = None
+     image_id = None
+     instance_id = None
+     def setUp(self):
+         # create network
+         self.__class__.network_name = _get_random_string(20)
+         self.__class__.net_type = 'bridge'
+         self.__class__.network_id, _ = test_config["vim_conn"].new_network(net_name=self.__class__.network_name,
+                                                                             net_type=self.__class__.net_type)
+         # find image name and image id
+         if test_config['image_name']:
+             image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
+             if len(image_list) == 0:
+                 raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
+             else:
+                 self.__class__.image_id = image_list[0]['id']
+         else:
+             image_list = test_config['vim_conn'].get_image_list()
+             if len(image_list) == 0:
+                 raise Exception("Not found any image at VIM")
+             else:
+                 self.__class__.image_id = image_list[0]['id']
+     def tearDown(self):
+         test_base.tearDown(self)
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(self.__class__.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.__class__.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.__class__.network_id))
+     def test_000_new_vminstance(self):
+         vpci = "0000:00:11.0"
+         name = "eth0"
+         flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -        self.__class__.instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
++        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci,
++                     'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
 -        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'model': model_name, 'type': 'virtual', 'net_id': self.__class__.network_id}]
++        self.__class__.instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='',
++                                                                               start=False,
++                                                                               image_id=self.__class__.image_id,
++                                                                               flavor_id=flavor_id, net_list=net_list)
+         self.assertIsInstance(self.__class__.instance_id, (str, unicode))
+     def test_010_new_vminstance_by_model(self):
+         flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+         model_name = 'e1000'
+         name = 'eth0'
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id,flavor_id=flavor_id,net_list=net_list)
++        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
++                     'model': model_name, 'type': 'virtual', 'net_id': self.__class__.network_id}]
 -        net_list = [{'use': net_use, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
++        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
++                                                                image_id=self.__class__.image_id,
++                                                                flavor_id=flavor_id,net_list=net_list)
+         self.assertIsInstance(instance_id, (str, unicode))
+         # Deleting created vm instance
+         logger.info("Deleting created vm intance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+     def test_020_new_vminstance_by_net_use(self):
+         flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+         net_use = 'data'
+         name = 'eth0'
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id,disk_list=None,
 -                                                                                           flavor_id=flavor_id,
 -                                                                                             net_list=net_list)
++        net_list = [{'use': net_use, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
++                     'net_id': self.__class__.network_id}]
 -        if test_config['vimtype'] == 'openstack':
++        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
++                                                                image_id=self.__class__.image_id,disk_list=None,
++                                                                flavor_id=flavor_id, net_list=net_list)
+         self.assertIsInstance(instance_id, (str, unicode))
+         # Deleting created vm instance
+         logger.info("Deleting created vm intance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+     def test_030_new_vminstance_by_net_type(self):
+         flavor_data = {'name':_get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+         _type = 'VF'
+         name = 'eth0'
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         if test_config['vimtype'] == 'vmware':
+             net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+                          'type': _type, 'net_id': self.__class__.network_id}]
+             instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=self.__class__.image_id,
+                                                                     flavor_id=flavor_id,
+                                                                     net_list=net_list)
+             self.assertEqual(type(instance_id),str)
 -                                                                    image_id=self.__class__.image_id, disk_list=None,
 -                                                                    flavor_id=flavor_id,
 -                                                                    net_list=net_list)
++        if test_config['vimtype'] in ('openstack', 'azure'):
+             # create network of type data
+             network_name = _get_random_string(20)
+             net_type = 'data'
+             network_id, _ = test_config["vim_conn"].new_network(net_name=network_name,
+                                                                             net_type=net_type)
+             net_list = [{'use': net_type, 'name': name, 'floating_ip': False, 'port_security': True,
+                          'type': _type, 'net_id': network_id}]
+             instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
 -        key_pairs = ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com']
++                                                                   image_id=self.__class__.image_id, disk_list=None,
++                                                                   flavor_id=flavor_id,
++                                                                   net_list=net_list)
+             self.assertEqual(type(instance_id), unicode)
+             # delete created network
+             result = test_config["vim_conn"].delete_network(network_id)
+             if result:
+                 logger.info("Network id {} sucessfully deleted".format(network_id))
+             else:
+                 logger.info("Failed to delete network id {}".format(network_id))
+         # Deleting created vm instance
+         logger.info("Deleting created vm intance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+     def test_040_new_vminstance_by_cloud_config(self):
+         flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+         name = 'eth0'
+         user_name = 'test_user'
 -        users_data = [{'key-pairs': ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCy2w9GHMKKNkpCmrDK2ovc3XBYDETuLWwaW24S+feHhLBQiZlzh3gSQoINlA+2ycM9zYbxl4BGzEzpTVyCQFZv5PidG4m6ox7LR+KYkDcITMyjsVuQJKDvt6oZvRt6KbChcCi0n2JJD/oUiJbBFagDBlRslbaFI2mmqmhLlJ5TLDtmYxzBLpjuX4m4tv+pdmQVfg7DYHsoy0hllhjtcDlt1nn05WgWYRTu7mfQTWfVTavu+OjIX3e0WN6NW7yIBWZcE/Q9lC0II3W7PZDE3QaT55se4SPIO2JTdqsx6XGbekdG1n6adlduOI27sOU5m4doiyJ8554yVbuDB/z5lRBD alfonso.tiernosepulveda@telefonica.com'], 'name': user_name}]
++        key_pairs = ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDtAjl5R+GSKP3gFrdFxgizKEUzhXKQbyjaxJH9thsK     0/fDiYlaNEjvijgPgiVZkfwvqgWeLprPcpzL2j4jvmmSJ3+7C8ihCwObWP0VUiuewmbIINBPAR0RqusjMRyPsa+q0asFBPOoZLx3Cv3vzmC1AA3mKuCNeT     EuA0rlWhDIOVwMcU5sP1grnmuexQB8HcR7BdKcA9y08pTwnCQR8vmtW77SRkaxEGXm4Gnw5qw8Z27mHdk2wWS2SnbVH7aFwWvDXc6jjf5TpEWypdr/EAPC     +eJipeS2Oa4FsntEqAu3Fz6gp/9ub8uNqgCgHfMzs6FhYpZpipwS0hXYyF6eVsSx osm@osm']
 -        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
++        users_data = [{'key-pairs': ['ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDtAjl5R+GSKP3gFrdFxgizKEUzhXKQbyjaxJH9thsK0/fDiYlaNEjvijgPgiVZkfwvqgWeLprPcpzL2j4jvmmSJ3+7C8ihCwObWP0VUiuewmbIINBPAR0RqusjMRyPsa+q0asFBPOoZLx3Cv3vzmC1AA3mKuCNeTEuA0rlWhDIOVwMcU5sP1grnmuexQB8HcR7BdKcA9y08pTwnCQR8vmtW77SRkaxEGXm4Gnw5qw8Z27mHdk2wWS2SnbVH7aFwWvDXc6jjf5TpEWypdr/EAPC+eJipeS2Oa4FsntEqAu3Fz6gp/9ub8uNqgCgHfMzs6FhYpZpipwS0hXYyF6eVsSx osm@osm'], 'name': 'cloudinit'}]
+         cloud_data = {'config-files': [{'content': 'auto enp0s3\niface enp0s3 inet dhcp\n', 'dest': '/etc/network/interfaces.d/enp0s3.cfg', 'owner': 'root:root', 'permissions': '0644'}, {'content': '#! /bin/bash\nls -al >> /var/log/osm.log\n', 'dest': '/etc/rc.local', 'permissions': '0755'}, {'content': 'file content', 'dest': '/etc/test_delete'}], 'boot-data-drive': True, 'key-pairs': key_pairs, 'users': users_data }
++        #cloud_data = {'users': users_data }
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -                                                                image_id=self.__class__.image_id, flavor_id=flavor_id,net_list=net_list,cloud_config=cloud_data)
++        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
++                     'type': 'virtual', 'net_id': self.__class__.network_id}]
+         instance_id, _ = test_config["vim_conn"].new_vminstance(name='Cloud_vm', description='', start=False,
 -        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
++                                                                image_id=self.__class__.image_id,
++                                                                flavor_id=flavor_id,net_list=net_list,
++                                                                cloud_config=cloud_data)
+         self.assertIsInstance(instance_id, (str, unicode))
+         # Deleting created vm instance
+         logger.info("Deleting created vm intance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+     def test_050_new_vminstance_by_disk_list(self):
+         flavor_data = {'name':_get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+         name = 'eth0'
+         device_data = [{'image_id': self.__class__.image_id, 'size': '10'}]
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -        instance_id, _ = test_config["vim_conn"].new_vminstance(name='VM_test1', description='', start=False, image_id=self.__class__.image_id,
 -                                                                                           flavor_id=flavor_id,
 -                                                                                             net_list=net_list,
 -                                                                                         disk_list=device_data)
++        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
++                     'type': 'virtual', 'net_id': self.__class__.network_id}]
 -        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
++        instance_id, _ = test_config["vim_conn"].new_vminstance(name='VM_test1', description='', start=False,
++                                                                image_id=self.__class__.image_id,
++                                                                flavor_id=flavor_id, net_list=net_list,
++                                                                disk_list=device_data)
+         self.assertIsInstance(instance_id, (str, unicode))
+         # Deleting created vm instance
+         logger.info("Deleting created vm intance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+     def test_060_new_vminstance_negative(self):
+         unknown_flavor_id = str(uuid.uuid4())
+         unknown_image_id = str(uuid.uuid4())
+         name = 'eth2'
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -            test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=unknown_image_id,
 -                                                                  flavor_id=unknown_flavor_id,
 -                                                                            net_list=net_list)
++        net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'port_security': True,
++                     'type': 'virtual', 'net_id': self.__class__.network_id}]
+         with self.assertRaises(Exception) as context:
 -        if test_config['vimtype'] == 'openstack':
++            test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
++                                                   image_id=unknown_image_id,
++                                                   flavor_id=unknown_flavor_id,
++                                                   net_list=net_list)
+         self.assertIn((context.exception).http_code, (400, 404))
+     def test_070_get_vminstance(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         # Get instance by its id
+         vm_info = test_config["vim_conn"].get_vminstance(self.__class__.instance_id)
+         if test_config['vimtype'] == 'vmware':
+             for attr in vm_info:
+                 if attr == 'status':
+                     self.assertEqual(vm_info[attr], 'ACTIVE')
+                 if attr == 'hostId':
+                     self.assertEqual(type(vm_info[attr]), str)
+                 if attr == 'interfaces':
+                     self.assertEqual(type(vm_info[attr]), list)
+                     self.assertEqual(vm_info[attr][0]['IsConnected'], 'true')
+                 if attr == 'IsEnabled':
+                     self.assertEqual(vm_info[attr], 'true')
+     def test_080_get_vminstance_negative(self):
+         unknown_instance_id = str(uuid.uuid4())
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         with self.assertRaises(Exception) as context:
+             test_config["vim_conn"].get_vminstance(unknown_instance_id)
+         self.assertEqual((context.exception).http_code, 404)
+     def test_090_refresh_vms_status(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         if test_config['vimtype'] == 'vmware':
+             vm_list = []
+             vm_list.append(self.__class__.instance_id)
+             # refresh vm status
+             vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
+             for attr in vm_info[self.__class__.instance_id]:
+                 if attr == 'status':
+                     self.assertEqual(vm_info[self.__class__.instance_id][attr], 'ACTIVE')
+                 if attr == 'interfaces':
+                     self.assertEqual(type(vm_info[self.__class__.instance_id][attr]), list)
 -            net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
++        if test_config['vimtype'] in ('openstack', 'azure'):
+             vpci = "0000:00:11.0"
+             name = "eth0"
+             flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+             # create new flavor
+             flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+              # create new vm instance
 -            instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
++            net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci,
++                         'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
 -        if test_config['vimtype'] == 'openstack':
++            instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
++                                                                    image_id=self.__class__.image_id,
++                                                                    flavor_id=flavor_id, net_list=net_list)
+             time.sleep(30)
+             vm_list = []
+             vm_list.append(instance_id)
+             # refresh vm status
+             vm_info = test_config["vim_conn"].refresh_vms_status(vm_list)
+             for attr in vm_info[instance_id]:
+                 if attr == 'status':
+                     self.assertEqual(vm_info[instance_id][attr], 'ACTIVE')
+                 if attr == 'interfaces':
+                     self.assertEqual(type(vm_info[instance_id][attr]), list)
+             #Deleting created vm instance
+             logger.info("Deleting created vm intance")
+             test_config["vim_conn"].delete_vminstance(instance_id)
+             time.sleep(10)
+     def test_100_refresh_vms_status_negative(self):
+         unknown_id = str(uuid.uuid4())
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         vm_dict = test_config["vim_conn"].refresh_vms_status([unknown_id])
+         if test_config['vimtype'] == 'vmware':
+             self.assertEqual(vm_dict,{})
 -        if test_config['vimtype'] == 'openstack':
++        if test_config['vimtype'] in ('openstack', 'azure'):
+             self.assertEqual(vm_dict[unknown_id]['status'], 'DELETED')
+     def test_110_action_vminstance(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         if test_config['vimtype'] == 'vmware':
+             action_list = ['shutdown', 'start', 'shutoff', 'rebuild', 'pause', 'resume']
+             # various action on vminstace
+             for action in action_list:
+                 instance_id = test_config["vim_conn"].action_vminstance(self.__class__.instance_id,
+                                                                         {action: None})
+                 self.assertEqual(instance_id, self.__class__.instance_id)
 -            net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci, 'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
++        if test_config['vimtype'] in ('openstack', 'azure'):
+             # create new vm instance
+             vpci = "0000:00:11.0"
+             name = "eth0"
+             flavor_data = {'name': _get_random_string(20), 'ram': 1024, 'vcpus': 1, 'disk': 10}
+             # create new flavor
+             flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
 -            new_instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
++            net_list = [{'use': self.__class__.net_type, 'name': name, 'floating_ip': False, 'vpci': vpci,
++                         'port_security': True, 'type': 'virtual', 'net_id': self.__class__.network_id}]
 -            action_list =  ['shutdown','start','shutoff','rebuild','start','pause','start']
++            new_instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='',
++                                                                        start=False, image_id=self.__class__.image_id,
++                                                                        flavor_id=flavor_id, net_list=net_list)
 -        net_list = [{'use': 'data', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'VF', 'net_id': self.__class__.sriov_network_id}]
++            if test_config['vimtype'] == 'openstack':
++                action_list =  ['shutdown','start','shutoff','rebuild','start','pause','start']
++            else:
++                action_list =  ['shutdown','start','stop','start','shutoff','start','reboot']
+             # various action on vminstace
+             for action in action_list:
+                 # sleep for sometime till status is changed
+                 time.sleep(25)
+                 instance_id = test_config["vim_conn"].action_vminstance(new_instance_id,
+                                                                                    { action: None})
+             self.assertTrue(instance_id is None)
+             # Deleting created vm instance
+             logger.info("Deleting created vm intance")
+             test_config["vim_conn"].delete_vminstance(new_instance_id)
+             time.sleep(10)
+     def test_120_action_vminstance_negative(self):
+         non_exist_id = str(uuid.uuid4())
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         action = 'start'
+         with self.assertRaises(Exception) as context:
+             test_config["vim_conn"].action_vminstance(non_exist_id, { action: None})
+         self.assertEqual((context.exception).http_code, 404)
+     def test_130_delete_vminstance(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         # Deleting created vm instance
+         logger.info("Deleting created vm instance")
+         test_config["vim_conn"].delete_vminstance(self.__class__.instance_id)
+         time.sleep(10)
+     def test_140_new_vminstance_sriov(self):
+         logger.info("Testing creation of sriov vm instance using {}".format(test_config['sriov_net_name']))
+         flavor_data = {'name': _get_random_string(20),'ram': 1024, 'vcpus': 2, 'disk': 10}
+         name = 'eth0'
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         sriov_net_name = test_config['sriov_net_name']
+         new_network_list = test_config["vim_conn"].get_network_list({'name': sriov_net_name})
+         for list_item in new_network_list:
+             self.assertEqual(sriov_net_name, list_item.get('name'))
+             self.__class__.sriov_network_id = list_item.get('id')
 -        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_sriov_vm', description='', start=False, image_id=self.__class__.image_id, flavor_id=flavor_id, net_list=net_list)
++        net_list = [{'use': 'data', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'VF',
++                     'net_id': self.__class__.sriov_network_id}]
 -        self.__class__.tenant_id = test_config["vim_conn"].new_tenant(tenant_name, "")
 -        time.sleep(15)
++        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_sriov_vm', description='', start=False,
++                                                                image_id=self.__class__.image_id, flavor_id=flavor_id,
++                                                                net_list=net_list)
+         self.assertIsInstance(instance_id, (str, unicode))
+         logger.info("Waiting for created sriov-vm intance")
+         time.sleep(10)
+         # Deleting created vm instance
+         logger.info("Deleting created sriov-vm intance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+ class test_vimconn_get_tenant_list(test_base):
+     tenant_id = None
+     def test_000_get_tenant_list(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         # Getting tenant list
+         tenant_list = test_config["vim_conn"].get_tenant_list()
++        logger.debug(self.__class__.test_text + "Tenant list: " + str(tenant_list))
+         for item in tenant_list:
+             if test_config['tenant'] == item['name']:
+                 self.__class__.tenant_id = item['id']
+                 self.assertIsInstance(item['name'], (str, unicode))
+                 self.assertIsInstance(item['id'], (str, unicode))
+     def test_010_get_tenant_list_by_id(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         # Getting filter tenant list by its id
+         filter_tenant_list = test_config["vim_conn"].get_tenant_list({'id': self.__class__.tenant_id})
++        logger.debug(self.__class__.test_text + "Tenant list: " + str(filter_tenant_list))
+         for item in filter_tenant_list:
+             self.assertIsInstance(item['id'], (str, unicode))
+             self.assertEqual(item['id'], self.__class__.tenant_id)
+     def test_020_get_tenant_list_by_name(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         # Getting filter tenant list by its name
+         filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': test_config['tenant']})
++        logger.debug(self.__class__.test_text + "Tenant list: " + str(filter_tenant_list))
+         for item in filter_tenant_list:
+             self.assertIsInstance(item['name'], (str, unicode))
+             self.assertEqual(item['name'], test_config['tenant'])
+     def test_030_get_tenant_list_by_name_and_id(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         # Getting filter tenant list by its name and id
+         filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': test_config['tenant'],
+                                                                     'id': self.__class__.tenant_id})
++        logger.debug(self.__class__.test_text + "Tenant list: " + str(filter_tenant_list))
+         for item in filter_tenant_list:
+             self.assertIsInstance(item['name'], (str, unicode))
+             self.assertIsInstance(item['id'], (str, unicode))
+             self.assertEqual(item['name'], test_config['tenant'])
+             self.assertEqual(item['id'], self.__class__.tenant_id)
+     def test_040_get_tenant_list_negative(self):
+         non_exist_tenant_name = "Tenant_123"
+         non_exist_tenant_id = "kjhgrt456-45345kjhdfgnbdk-34dsfjdfg"
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         filter_tenant_list = test_config["vim_conn"].get_tenant_list({'name': non_exist_tenant_name,
+                                                                          'id': non_exist_tenant_id})
++        logger.debug(self.__class__.test_text + "Tenant list: " + str(filter_tenant_list))
+         self.assertEqual(filter_tenant_list, [])
+ class test_vimconn_new_tenant(test_base):
+     tenant_id = None
+     def test_000_new_tenant(self):
+         tenant_name = _get_random_string(20)
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -        self.assertIsInstance(self.__class__.tenant_id, (str, unicode))
++        if test_config['vimtype'] != 'azure':
++            self.__class__.tenant_id = test_config["vim_conn"].new_tenant(tenant_name, "")
++            time.sleep(15)
 -        self.assertEqual((context.exception).http_code, 400)
++            self.assertIsInstance(self.__class__.tenant_id, (str, unicode))
++        else:
++            with self.assertRaises(Exception) as context:
++                test_config["vim_conn"].new_tenant(self.__class__.tenant_id, "")
++            self.assertEqual((context.exception).http_code, 401)
++            logger.debug(self.__class__.test_text + "Exception unauthorized: " + str(context.exception))
+     def test_010_new_tenant_negative(self):
+         Invalid_tenant_name = 10121
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         with self.assertRaises(Exception) as context:
+             test_config["vim_conn"].new_tenant(Invalid_tenant_name, "")
 -        tenant_id = test_config["vim_conn"].delete_tenant(self.__class__.tenant_id)
 -
 -        self.assertIsInstance(tenant_id, (str, unicode))
++        if test_config['vimtype'] != 'azure':
++            self.assertEqual((context.exception).http_code, 400)
++        else:
++            self.assertEqual((context.exception).http_code, 401)
++            logger.debug(self.__class__.test_text + "Exception unauthorized: " + str(context.exception))
+     def test_020_delete_tenant(self):
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
 -        Non_exist_tenant_name = 'Test_30_tenant'
++        if test_config['vimtype'] != 'azure':
++            tenant_id = test_config["vim_conn"].delete_tenant(self.__class__.tenant_id)
++            self.assertIsInstance(tenant_id, (str, unicode))
++        else:
++            with self.assertRaises(Exception) as context:
++                test_config["vim_conn"].delete_tenant(self.__class__.tenant_id)
++            self.assertEqual((context.exception).http_code, 401)
++            logger.debug(self.__class__.test_text + "Exception unauthorized: " + str(context.exception))
+     def test_030_delete_tenant_negative(self):
 -            test_config["vim_conn"].delete_tenant(Non_exist_tenant_name)
++        non_exist_tenant_name = 'Test_30_tenant'
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         with self.assertRaises(Exception) as context:
 -        self.assertEqual((context.exception).http_code, 404)
++            test_config["vim_conn"].delete_tenant(non_exist_tenant_name)
 -        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
++        if test_config['vimtype'] != 'azure':
++            self.assertEqual((context.exception).http_code, 404)
++        else:
++            self.assertEqual((context.exception).http_code, 401)
++            logger.debug(self.__class__.test_text + "Exception unauthorized: " + str(context.exception))
+ def get_image_id():
+     if test_config['image_name']:
+         image_list = test_config['vim_conn'].get_image_list({'name': test_config['image_name']})
+         if len(image_list) == 0:
+             raise Exception("Image {} is not found at VIM".format(test_config['image_name']))
+         else:
+             image_id = image_list[0]['id']
+     else:
+         image_list = test_config['vim_conn'].get_image_list()
+         if len(image_list) == 0:
+             raise Exception("Not found any image at VIM")
+         else:
+             image_id = image_list[0]['id']
+     return image_id
+ class test_vimconn_vminstance_by_ip_address(test_base):
+     network_name = None
+     network_id = None
+     def setUp(self):
+         # create network
+         self.network_name = _get_random_string(20)
+         self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+                                                                        net_type='bridge')
+     def tearDown(self):
+         test_base.tearDown(self)
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(self.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.network_id))
+     def test_000_vminstance_by_ip_address(self):
+         """
+            This test case will deploy VM with provided IP address
+            Pre-requesite: provided IP address should be from IP pool range which has used for network creation
+         """
+         name = "eth0"
+         # provide ip address  
+         ip_address = '' 
+         flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         # find image id
+         image_id = get_image_id()
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+                                                     'net_id': self.network_id, 'ip_address': ip_address}]
+         instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                                             flavor_id=flavor_id, net_list=net_list)
+         self.assertEqual(type(instance_id),str)
+         logger.info("Deleting created vm instance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+     def test_010_vminstance_by_ip_address_negative(self):
+         name = "eth1"
+         # IP address not from subnet range
+         invalid_ip_address = '10.10.12.1'
+         flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         # find image name and image id
+         image_id = get_image_id()
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+                                                       'net_id': self.network_id, 'ip_address': invalid_ip_address}]
+         with self.assertRaises(Exception) as context:
+             test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                                                   flavor_id=flavor_id,
+                                                                     net_list=net_list)
+         self.assertEqual((context.exception).http_code, 400)
+     def test_020_vminstance_by_floating_ip(self):
+         name = "eth1"
+         flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         # find image name and image id
+         image_id = get_image_id()
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         net_list = [{'use': 'bridge', 'name': name, 'floating_ip': True, 'port_security': True, 'type': 'virtual',
+                                                                                        'net_id': self.network_id}]
+         instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                                             flavor_id=flavor_id, net_list=net_list)
+         self.assertEqual(type(instance_id),str)
+         logger.info("Deleting created vm instance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+     def test_030_vminstance_by_mac_address(self):
+         name = "eth1"
+         mac_address = "74:54:2f:21:da:8c" 
+         flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         # find image name and image id
+         image_id = get_image_id()
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True, 'type': 'virtual',
+                                                              'net_id': self.network_id,'mac_address': mac_address}]
+         instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                                             flavor_id=flavor_id, net_list=net_list)
+         self.assertEqual(type(instance_id),str)
+         logger.info("Deleting created vm instance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+ class test_vimconn_vminstance_by_adding_10_nics(test_base):
+     network_name = None
+     net_ids = [] 
+     def setUp(self):
+         # create network
+         i = 0
+         for i in range(10):
+             self.network_name = _get_random_string(20)
+             network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+                                                                       net_type='bridge')
+             self.net_ids.append(network_id)
+     def tearDown(self):
+         test_base.tearDown(self)
+         # Deleting created network
+         for net_id in self.net_ids:
+             result = test_config["vim_conn"].delete_network(net_id)
+             if result:
+                 logger.info("Network id {} sucessfully deleted".format(net_id))
+             else:
+                 logger.info("Failed to delete network id {}".format(net_id))
+     def test_000_vminstance_by_adding_10_nics(self):
+         flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         # find image name and image id
+         image_id = get_image_id()
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         net_list = []
+         c = 1
+         for net_id in self.net_ids:
+             name = "eth{}".format(c)
+             net_list.append({'use': 'bridge', 'name': name, 'floating_ip': False,
+                                     'port_security': True, 'type': 'virtual', 'net_id': net_id})
+             c = c+1
+         instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
+                                                             flavor_id=flavor_id, net_list=net_list)
+         self.assertEqual(type(instance_id),str)
+         logger.info("Deleting created vm instance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+ class test_vimconn_vminstance_by_existing_disk(test_base):
+     network_name = None
+     network_id = None
+     def setUp(self):
+         # create network
+         self.network_name = _get_random_string(20)
+         self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+                                                                        net_type='bridge')
+     def tearDown(self):
+         test_base.tearDown(self)
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(self.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.network_id))
+     def test_000_vminstance_by_existing_disk(self):
+         """ This testcase will add existing disk only if given catalog/image is free 
+             means not used by any other VM
+         """
+         flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+         name = "eth10"
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         # find image name and image id
+         image_id = get_image_id()
+         cirros_image = test_config["vim_conn"].get_image_list({'name': 'cirros'})
+         disk_list = [{'image_id': cirros_image[0]['id'],'size': 5}]
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+                                         'type': 'virtual', 'net_id': self.network_id}]
 -        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
++        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, 
++                                        image_id=image_id,
+                                         flavor_id=flavor_id, net_list=net_list,disk_list=disk_list)
+         self.assertEqual(type(instance_id),str)
+         logger.info("Deleting created vm instance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+     def test_010_vminstance_by_new_disk(self):
+         flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+         name = "eth10"
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         # find image name and image id
+         image_id = get_image_id()
+         disk_list = [{'size': '5'}]
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+                                                   'type': 'virtual', 'net_id': self.network_id}]
 -        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
++        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, 
++                                        image_id=image_id,
+                                         flavor_id=flavor_id, net_list=net_list,disk_list=disk_list)
+         self.assertEqual(type(instance_id),str)
+         logger.info("Deleting created vm instance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+     def test_020_vminstance_by_CDROM(self):
+         """ This testcase will insert media file only if provided catalog
+             has pre-created ISO media file into vCD
+         """
+         flavor_data ={'ram': 1024, 'vcpus': 1, 'disk': 10}
+         name = "eth10"
+         image_list = test_config["vim_conn"].get_image_list({'name':'Ubuntu'})
+         disk_list = [{'image_id':image_list[0]['id'],'device_type':'cdrom'}]
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         # find image name and image id
+         image_id = get_image_id()
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+                                                   'type': 'virtual', 'net_id': self.network_id}]
 -        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
++        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
++                                       image_id=image_id,
+                                        flavor_id=flavor_id, net_list=net_list,disk_list=disk_list )
+         self.assertEqual(type(instance_id),str)
+         logger.info("Deleting created vm instance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+ class test_vimconn_vminstance_by_affinity_anti_affinity(test_base):
+     network_name = None
+     network_id = None
+     def setUp(self):
+         # create network
+         self.network_name = _get_random_string(20)
+         self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+                                                                        net_type='bridge')
+     def tearDown(self):
+         test_base.tearDown(self)
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(self.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.network_id))
+     def test_000_vminstance_by_affinity_anti_affinity(self):
+         """ This testcase will deploy VM into provided HOSTGROUP in VIM config
+             Pre-requisites: User has created Hosh Groups in vCenter with respective Hosts to be used
+             While creating VIM account user has to pass the Host Group names in availability_zone list
+         """
+         flavor_data = {'ram': 1024, 'vcpus': 1, 'disk': 10}
+         name = "eth10"
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         # find image name and image id
+         image_id = get_image_id()
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+                                         'type': 'virtual', 'net_id': self.network_id}]
 -                                                                        ' paired-threads': 2,                                                                                                                                  'memory': 1}]},
 -                                                         'ram': 1024, 'vcpus': 1, 'disk': 10}
++        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False,
++                                  image_id=image_id,
+                                   flavor_id=flavor_id, net_list=net_list,availability_zone_index=1,
+                                                         availability_zone_list=['HG_174','HG_175'])
+         self.assertEqual(type(instance_id),str)
+         time.sleep(10)
+         logger.info("Deleting created vm instance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+ class test_vimconn_vminstance_by_numa_affinity(test_base):
+     network_name = None
+     network_id = None
+     def setUp(self):
+         # create network
+         self.network_name = _get_random_string(20)
+         self.network_id, _ = test_config["vim_conn"].new_network(net_name=self.network_name,
+                                                                        net_type='bridge')
+     def tearDown(self):
+         test_base.tearDown(self)
+         # Deleting created network
+         result = test_config["vim_conn"].delete_network(self.network_id)
+         if result:
+             logger.info("Network id {} sucessfully deleted".format(self.network_id))
+         else:
+             logger.info("Failed to delete network id {}".format(self.network_id))
+     def test_000_vminstance_by_numa_affinity(self):
+         flavor_data = {'extended': {'numas': [{'paired-threads-id': [['1', '3'], ['2', '4']],
 -        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', image_id=image_id,
++                                               ' paired-threads': 2, 'memory': 1}]},
++                                               'ram': 1024, 'vcpus': 1, 'disk': 10}
+         name = "eth10"
+         # create new flavor
+         flavor_id = test_config["vim_conn"].new_flavor(flavor_data)
+         # find image name and image id
+         image_id = get_image_id()
+         self.__class__.test_text = "{}.{}. TEST {}".format(test_config["test_number"],
+                                                             self.__class__.test_index,
+                                                 inspect.currentframe().f_code.co_name)
+         self.__class__.test_index += 1
+         net_list = [{'use': 'bridge', 'name': name, 'floating_ip': False, 'port_security': True,
+                                         'type': 'virtual', 'net_id': self.network_id}]
 -        test_config['vim_conn'] = vim.vimconnector(name=org_name, tenant_name=tenant_name, user=org_user,passwd=org_passwd, url=vim_url, config=config_params)
++        instance_id, _ = test_config["vim_conn"].new_vminstance(name='Test1_vm', description='', start=False, 
++                                                            image_id=image_id,
+                                                             flavor_id=flavor_id, net_list=net_list)
+         self.assertEqual(type(instance_id),str)
+         logger.info("Deleting created vm instance")
+         test_config["vim_conn"].delete_vminstance(instance_id)
+         time.sleep(10)
+ '''
+ IMPORTANT NOTE
+ The following unittest class does not have the 'test_' on purpose. This test is the one used for the
+ scenario based tests.
+ '''
+ class descriptor_based_scenario_test(test_base):
+     test_index = 0
+     scenario_test_path = None
+     @classmethod
+     def setUpClass(cls):
+         cls.test_index = 1
+         cls.to_delete_list = []
+         cls.scenario_uuids = []
+         cls.instance_scenario_uuids = []
+         cls.scenario_test_path = test_config["test_directory"] + '/' + test_config["test_folder"]
+         logger.info("{}. {} {}".format(test_config["test_number"], cls.__name__, test_config["test_folder"]))
+     @classmethod
+     def tearDownClass(cls):
+          test_config["test_number"] += 1
+     def test_000_load_scenario(self):
+         self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name,
+                                                            test_config["test_folder"])
+         self.__class__.test_index += 1
+         # load VNFD and NSD
+         descriptor_files = glob.glob(self.__class__.scenario_test_path+'/*.yaml')
+         vnf_descriptors = []
+         scenario_descriptors = []
+         for descriptor_file in descriptor_files:
+             with open(descriptor_file, 'r') as stream:
+                 descriptor = yaml.load(stream, Loader=yaml.Loader)
+                 if "vnf" in descriptor or "vnfd:vnfd-catalog" in descriptor or "vnfd-catalog" in descriptor:
+                     vnf_descriptors.append(descriptor)
+                 else:
+                     scenario_descriptors.append(descriptor)
+         scenario_file = glob.glob(self.__class__.scenario_test_path + '/scenario_*.yaml')
+         if not vnf_descriptors or not scenario_descriptors or len(scenario_descriptors) > 1:
+             raise Exception("Test '{}' not valid. It must contain an scenario file and at least one vnfd file'".format(
+                 test_config["test_folder"]))
+         # load all vnfd
+         for vnf_descriptor in vnf_descriptors:
+             logger.debug("VNF descriptor: {}".format(vnf_descriptor))
+             vnf = test_config["client"].create_vnf(descriptor=vnf_descriptor, image_name=test_config["image_name"])
+             logger.debug(vnf)
+             if 'vnf' in vnf:
+                 vnf_uuid = vnf['vnf']['uuid']
+             else:
+                 vnf_uuid = vnf['vnfd'][0]['uuid']
+             self.__class__.to_delete_list.insert(0, {"item": "vnf", "function": test_config["client"].delete_vnf,
+                                                      "params": {"uuid": vnf_uuid}})
+         # load the scenario definition
+         for scenario_descriptor in scenario_descriptors:
+             # networks = scenario_descriptor['scenario']['networks']
+             # networks[test_config["mgmt_net"]] = networks.pop('mgmt')
+             logger.debug("Scenario descriptor: {}".format(scenario_descriptor))
+             scenario = test_config["client"].create_scenario(descriptor=scenario_descriptor)
+             logger.debug(scenario)
+             if 'scenario' in scenario:
+                 scenario_uuid = scenario['scenario']['uuid']
+             else:
+                 scenario_uuid = scenario['nsd'][0]['uuid']
+             self.__class__.to_delete_list.insert(0, {"item": "scenario",
+                                                      "function": test_config["client"].delete_scenario,
+                                                      "params": {"uuid": scenario_uuid}})
+             self.__class__.scenario_uuids.append(scenario_uuid)
+     def test_010_instantiate_scenario(self):
+         self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name,
+                                                            test_config["test_folder"])
+         self.__class__.test_index += 1
+         for scenario_uuid in self.__class__.scenario_uuids:
+             instance_descriptor = {
+                 "instance":{
+                     "name": self.__class__.test_text,
+                     "scenario": scenario_uuid,
+                     "networks":{
+                         "mgmt": {"sites": [ { "netmap-use": test_config["mgmt_net"]} ]}
+                     }
+                 }
+             }
+             instance = test_config["client"].create_instance(instance_descriptor)
+             self.__class__.instance_scenario_uuids.append(instance['uuid'])
+             logger.debug(instance)
+             self.__class__.to_delete_list.insert(0, {"item": "instance",
+                                                      "function": test_config["client"].delete_instance,
+                                                      "params": {"uuid": instance['uuid']}})
+     def test_020_check_deployent(self):
+         self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+                                                            inspect.currentframe().f_code.co_name,
+                                                            test_config["test_folder"])
+         self.__class__.test_index += 1
+         if test_config["manual"]:
+             input('Scenario has been deployed. Perform manual check and press any key to resume')
+             return
+         keep_waiting = test_config["timeout"]
+         pending_instance_scenario_uuids = list(self.__class__.instance_scenario_uuids)   # make a copy
+         while pending_instance_scenario_uuids:
+             index = 0
+             while index < len(pending_instance_scenario_uuids):
+                 result = check_instance_scenario_active(pending_instance_scenario_uuids[index])
+                 if result[0]:
+                     del pending_instance_scenario_uuids[index]
+                     break
+                 elif 'ERROR' in result[1]:
+                     msg = 'Got error while waiting for the instance to get active: '+result[1]
+                     logging.error(msg)
+                     raise Exception(msg)
+                 index += 1
+             if keep_waiting >= 5:
+                 time.sleep(5)
+                 keep_waiting -= 5
+             elif keep_waiting > 0:
+                 time.sleep(keep_waiting)
+                 keep_waiting = 0
+             else:
+                 msg = 'Timeout reached while waiting instance scenario to get active'
+                 logging.error(msg)
+                 raise Exception(msg)
+     def test_030_clean_deployment(self):
+         self.__class__.test_text = "{}.{}. TEST {} {}".format(test_config["test_number"], self.__class__.test_index,
+                                                               inspect.currentframe().f_code.co_name,
+                                                               test_config["test_folder"])
+         self.__class__.test_index += 1
+         #At the moment if you delete an scenario right after creating it, in openstack datacenters
+         #sometimes scenario ports get orphaned. This sleep is just a dirty workaround
+         time.sleep(5)
+         for item in self.__class__.to_delete_list:
+             response = item["function"](**item["params"])
+             logger.debug(response)
+ def _get_random_string(maxLength):
+     '''generates a string with random characters string.letters and string.digits
+     with a random length up to maxLength characters. If maxLength is <15 it will be changed automatically to 15
+     '''
+     prefix = 'testing_'
+     min_string = 15
+     minLength = min_string - len(prefix)
+     if maxLength < min_string: maxLength = min_string
+     maxLength -= len(prefix)
+     length = random.randint(minLength,maxLength)
+     return 'testing_'+"".join([random.choice(string.letters+string.digits) for i in xrange(length)])
+ def test_vimconnector(args):
+     global test_config
+     sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+     test_config['vimtype'] = args.vimtype
+     if args.vimtype == "vmware":
+         import vimconn_vmware as vim
+         test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
+         tenant_name = args.tenant_name
+         test_config['tenant'] = tenant_name
+         config_params = yaml.load(args.config_param, Loader=yaml.Loader)
+         org_name = config_params.get('orgname')
+         org_user = config_params.get('user')
+         org_passwd = config_params.get('passwd')
+         vim_url = args.endpoint_url
+         test_config['image_path'] = args.image_path
+         test_config['image_name'] = args.image_name
+         test_config['sriov_net_name'] = args.sriov_net_name
+         # vmware connector obj
 -    mandatory_arguments.add_argument('--vimtype', choices=['vmware', 'aws', 'openstack', 'openvim'], required=True,
++        test_config['vim_conn'] = vim.vimconnector(name=org_name, tenant_name=tenant_name, user=org_user,
++                                                   passwd=org_passwd, url=vim_url, config=config_params)
+     elif args.vimtype == "aws":
+         import vimconn_aws as vim
+     elif args.vimtype == "openstack":
+         import vimconn_openstack as vim
+         test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
+         tenant_name = args.tenant_name
+         test_config['tenant'] = tenant_name
+         config_params = yaml.load(args.config_param, Loader=yaml.Loader)
+         os_user = config_params.get('user')
+         os_passwd = config_params.get('passwd')
+         vim_url = args.endpoint_url
+         test_config['image_path'] = args.image_path
+         test_config['image_name'] = args.image_name
+         test_config['sriov_net_name'] = args.sriov_net_name
+         # openstack connector obj
+         vim_persistent_info = {}
+         test_config['vim_conn'] = vim.vimconnector(
+             uuid="test-uuid-1", name="VIO-openstack",
+             tenant_id=None, tenant_name=tenant_name,
+             url=vim_url, url_admin=None,
+             user=os_user, passwd=os_passwd,
+             config=config_params, persistent_info=vim_persistent_info
+         )
+         test_config['vim_conn'].debug = "true"
+     elif args.vimtype == "openvim":
+         import vimconn_openvim as vim
++    elif args.vimtype == "azure":
++        import vimconn_azure as vim
++
++        test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
++
++        tenant_name = args.tenant_name
++        test_config['tenant'] = tenant_name
++        config_params = yaml.load(args.config_param)
++        os_user = config_params.get('user')
++        os_passwd = config_params.get('passwd')
++        vim_url = args.endpoint_url
++        test_config['image_path'] = args.image_path
++        test_config['image_name'] = args.image_name
++        #test_config['sriov_net_name'] = args.sriov_net_name
++        args_log_level = "DEBUG" if args.debug else "INFO"
++
++        # azure connector obj
++        vim_persistent_info = {}
++        test_config['vim_conn'] = vim.vimconnector(
++            uuid="test-uuid-1", name="VIO-azure",
++            tenant_id=None, tenant_name=tenant_name,
++            url=vim_url, url_admin=None,
++            user=os_user, passwd=os_passwd, log_level= args_log_level,
++            config=config_params, persistent_info=vim_persistent_info
++        )
++        test_config['vim_conn'].debug = "true"
++
+     else:
+         logger.critical("vimtype '{}' not supported".format(args.vimtype))
+         sys.exit(1)
+     executed = 0
+     failed = 0
+     clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
+     # If only want to obtain a tests list print it and exit
+     if args.list_tests:
+         tests_names = []
+         for cls in clsmembers:
+             if cls[0].startswith('test_vimconn'):
+                 tests_names.append(cls[0])
+         msg = "The 'vim' set tests are:\n\t" + ', '.join(sorted(tests_names))
+         print(msg)
+         logger.info(msg)
+         sys.exit(0)
+     # Create the list of tests to be run
+     code_based_tests = []
+     if args.tests:
+         for test in args.tests:
+             for t in test.split(','):
+                 matches_code_based_tests = [item for item in clsmembers if item[0] == t]
+                 if len(matches_code_based_tests) > 0:
+                     code_based_tests.append(matches_code_based_tests[0][1])
+                 else:
+                     logger.critical("Test '{}' is not among the possible ones".format(t))
+                     sys.exit(1)
+     if not code_based_tests:
+         # include all tests
+         for cls in clsmembers:
+             # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
+             if cls[0].startswith('test_vimconn'):
+                 code_based_tests.append(cls[1])
+     logger.debug("tests to be executed: {}".format(code_based_tests))
+     # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+     # This is handled in the tests using logging.
+     stream = open('/dev/null', 'w')
+     # Run code based tests
+     basic_tests_suite = unittest.TestSuite()
+     for test in code_based_tests:
+         basic_tests_suite.addTest(unittest.makeSuite(test))
+     result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
+     executed += result.testsRun
+     failed += len(result.failures) + len(result.errors)
+     if failfast and failed:
+         sys.exit(1)
+     if len(result.failures) > 0:
+         logger.debug("failures : {}".format(result.failures))
+     if len(result.errors) > 0:
+         logger.debug("errors : {}".format(result.errors))
+     return executed, failed
+ def test_vim(args):
+     global test_config
+     sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+     import openmanoclient
+     executed = 0
+     failed = 0
+     test_config["client"] = openmanoclient.openmanoclient(
+         endpoint_url=args.endpoint_url,
+         tenant_name=args.tenant_name,
+         datacenter_name=args.datacenter,
+         debug=args.debug, logger=test_config["logger_name"])
+     clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
+     # If only want to obtain a tests list print it and exit
+     if args.list_tests:
+         tests_names = []
+         for cls in clsmembers:
+             if cls[0].startswith('test_VIM'):
+                 tests_names.append(cls[0])
+         msg = "The 'vim' set tests are:\n\t" + ', '.join(sorted(tests_names)) +\
+               "\nNOTE: The test test_VIM_tenant_operations will fail in case the used datacenter is type OpenStack " \
+               "unless RO has access to the admin endpoint. Therefore this test is excluded by default"
+         print(msg)
+         logger.info(msg)
+         sys.exit(0)
+     # Create the list of tests to be run
+     code_based_tests = []
+     if args.tests:
+         for test in args.tests:
+             for t in test.split(','):
+                 matches_code_based_tests = [item for item in clsmembers if item[0] == t]
+                 if len(matches_code_based_tests) > 0:
+                     code_based_tests.append(matches_code_based_tests[0][1])
+                 else:
+                     logger.critical("Test '{}' is not among the possible ones".format(t))
+                     sys.exit(1)
+     if not code_based_tests:
+         # include all tests
+         for cls in clsmembers:
+             # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
+             if cls[0].startswith('test_VIM') and cls[0] != 'test_VIM_tenant_operations':
+                 code_based_tests.append(cls[1])
+     logger.debug("tests to be executed: {}".format(code_based_tests))
+     # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+     # This is handled in the tests using logging.
+     stream = open('/dev/null', 'w')
+     # Run code based tests
+     basic_tests_suite = unittest.TestSuite()
+     for test in code_based_tests:
+         basic_tests_suite.addTest(unittest.makeSuite(test))
+     result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
+     executed += result.testsRun
+     failed += len(result.failures) + len(result.errors)
+     if failfast and failed:
+         sys.exit(1)
+     if len(result.failures) > 0:
+         logger.debug("failures : {}".format(result.failures))
+     if len(result.errors) > 0:
+         logger.debug("errors : {}".format(result.errors))
+     return executed, failed
+ def test_wim(args):
+     global test_config
+     sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+     import openmanoclient
+     executed = 0
+     failed = 0
+     test_config["client"] = openmanoclient.openmanoclient(
+         endpoint_url=args.endpoint_url,
+         tenant_name=args.tenant_name,
+         datacenter_name=args.datacenter,
+         debug=args.debug, logger=test_config["logger_name"])
+     clsmembers = inspect.getmembers(sys.modules[__name__], inspect.isclass)
+     # If only want to obtain a tests list print it and exit
+     if args.list_tests:
+         tests_names = []
+         for cls in clsmembers:
+             if cls[0].startswith('test_WIM'):
+                 tests_names.append(cls[0])
+         msg = "The 'wim' set tests are:\n\t" + ', '.join(sorted(tests_names)) +\
+               "\nNOTE: The test test_VIM_tenant_operations will fail in case the used datacenter is type OpenStack " \
+               "unless RO has access to the admin endpoint. Therefore this test is excluded by default"
+         print(msg)
+         logger.info(msg)
+         sys.exit(0)
+     # Create the list of tests to be run
+     code_based_tests = []
+     if args.tests:
+         for test in args.tests:
+             for t in test.split(','):
+                 matches_code_based_tests = [item for item in clsmembers if item[0] == t]
+                 if len(matches_code_based_tests) > 0:
+                     code_based_tests.append(matches_code_based_tests[0][1])
+                 else:
+                     logger.critical("Test '{}' is not among the possible ones".format(t))
+                     sys.exit(1)
+     if not code_based_tests:
+         # include all tests
+         for cls in clsmembers:
+             # We exclude 'test_VIM_tenant_operations' unless it is specifically requested by the user
+             if cls[0].startswith('test_VIM') and cls[0] != 'test_VIM_tenant_operations':
+                 code_based_tests.append(cls[1])
+     logger.debug("tests to be executed: {}".format(code_based_tests))
+     # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+     # This is handled in the tests using logging.
+     stream = open('/dev/null', 'w')
+     # Run code based tests
+     basic_tests_suite = unittest.TestSuite()
+     for test in code_based_tests:
+         basic_tests_suite.addTest(unittest.makeSuite(test))
+     result = unittest.TextTestRunner(stream=stream, failfast=failfast).run(basic_tests_suite)
+     executed += result.testsRun
+     failed += len(result.failures) + len(result.errors)
+     if failfast and failed:
+         sys.exit(1)
+     if len(result.failures) > 0:
+         logger.debug("failures : {}".format(result.failures))
+     if len(result.errors) > 0:
+         logger.debug("errors : {}".format(result.errors))
+     return executed, failed
+ def test_deploy(args):
+     global test_config
+     sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + "/osm_ro")
+     import openmanoclient
+     executed = 0
+     failed = 0
+     test_config["test_directory"] = os.path.dirname(__file__) + "/RO_tests"
+     test_config["image_name"] = args.image_name
+     test_config["mgmt_net"] = args.mgmt_net
+     test_config["manual"] = args.manual
+     test_directory_content = os.listdir(test_config["test_directory"])
+     # If only want to obtain a tests list print it and exit
+     if args.list_tests:
+         msg = "the 'deploy' set tests are:\n\t" + ', '.join(sorted(test_directory_content))
+         print(msg)
+         # logger.info(msg)
+         sys.exit(0)
+     descriptor_based_tests = []
+     # Create the list of tests to be run
+     code_based_tests = []
+     if args.tests:
+         for test in args.tests:
+             for t in test.split(','):
+                 if t in test_directory_content:
+                     descriptor_based_tests.append(t)
+                 else:
+                     logger.critical("Test '{}' is not among the possible ones".format(t))
+                     sys.exit(1)
+     if not descriptor_based_tests:
+         # include all tests
+         descriptor_based_tests = test_directory_content
+     logger.debug("tests to be executed: {}".format(code_based_tests))
+     # import openmanoclient from relative path
+     test_config["client"] = openmanoclient.openmanoclient(
+         endpoint_url=args.endpoint_url,
+         tenant_name=args.tenant_name,
+         datacenter_name=args.datacenter,
+         debug=args.debug, logger=test_config["logger_name"])
+     # TextTestRunner stream is set to /dev/null in order to avoid the method to directly print the result of tests.
+     # This is handled in the tests using logging.
+     stream = open('/dev/null', 'w')
+     # This scenario based tests are defined as directories inside the directory defined in 'test_directory'
+     for test in descriptor_based_tests:
+         test_config["test_folder"] = test
+         test_suite = unittest.TestSuite()
+         test_suite.addTest(unittest.makeSuite(descriptor_based_scenario_test))
+         result = unittest.TextTestRunner(stream=stream, failfast=False).run(test_suite)
+         executed += result.testsRun
+         failed += len(result.failures) + len(result.errors)
+         if failfast and failed:
+             sys.exit(1)
+         if len(result.failures) > 0:
+             logger.debug("failures : {}".format(result.failures))
+         if len(result.errors) > 0:
+             logger.debug("errors : {}".format(result.errors))
+     return executed, failed
+ if __name__=="__main__":
+     parser = ArgumentParser(description='Test RO module')
+     parser.add_argument('-v','--version', action='version', help="Show current version",
+                              version='%(prog)s version ' + __version__  + ' ' + version_date)
+     # Common parameters
+     parent_parser = ArgumentParser(add_help=False)
+     parent_parser.add_argument('--failfast', help='Stop when a test fails rather than execute all tests',
+                       dest='failfast', action="store_true", default=False)
+     parent_parser.add_argument('--failed', help='Set logs to show only failed tests. --debug disables this option',
+                       dest='failed', action="store_true", default=False)
+     default_logger_file = os.path.dirname(__file__)+'/'+os.path.splitext(os.path.basename(__file__))[0]+'.log'
+     parent_parser.add_argument('--list-tests', help='List all available tests', dest='list_tests', action="store_true",
+                       default=False)
+     parent_parser.add_argument('--logger_file', dest='logger_file', default=default_logger_file,
+                                help='Set the logger file. By default '+default_logger_file)
+     parent_parser.add_argument("-t", '--tenant', dest='tenant_name', default="osm",
+                                help="Set the openmano tenant to use for the test. By default 'osm'")
+     parent_parser.add_argument('--debug', help='Set logs to debug level', dest='debug', action="store_true")
+     parent_parser.add_argument('--timeout', help='Specify the instantiation timeout in seconds. By default 300',
+                           dest='timeout', type=int, default=300)
+     parent_parser.add_argument('--test', '--tests', help='Specify the tests to run', dest='tests', action="append")
+     subparsers = parser.add_subparsers(help='test sets')
+     # Deployment test set
+     # -------------------
+     deploy_parser = subparsers.add_parser('deploy', parents=[parent_parser],
+                                           help="test deployment using descriptors at RO_test folder ")
+     deploy_parser.set_defaults(func=test_deploy)
+     # Mandatory arguments
+     mandatory_arguments = deploy_parser.add_argument_group('mandatory arguments')
+     mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
+     mandatory_arguments.add_argument("-i", '--image-name', required=True, dest="image_name",
+                                      help='Image name available at datacenter used for the tests')
+     mandatory_arguments.add_argument("-n", '--mgmt-net-name', required=True, dest='mgmt_net',
+                                      help='Set the vim management network to use for tests')
+     # Optional arguments
+     deploy_parser.add_argument('-m', '--manual-check', dest='manual', action="store_true", default=False,
+                                help='Pause execution once deployed to allow manual checking of the '
+                                     'deployed instance scenario')
+     deploy_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
+                                help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
+     # Vimconn test set
+     # -------------------
+     vimconn_parser = subparsers.add_parser('vimconn', parents=[parent_parser], help="test vimconnector plugin")
+     vimconn_parser.set_defaults(func=test_vimconnector)
+     # Mandatory arguments
+     mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
++    mandatory_arguments.add_argument('--vimtype', choices=['vmware', 'aws', 'openstack', 'openvim','azure'], required=True,
+                                      help='Set the vimconnector type to test')
+     mandatory_arguments.add_argument('-c', '--config', dest='config_param', required=True,
+                                     help='Set the vimconnector specific config parameters in dictionary format')
+     mandatory_arguments.add_argument('-u', '--url', dest='endpoint_url',required=True, help="Set the vim connector url or Host IP")
+     # Optional arguments
+     vimconn_parser.add_argument('-i', '--image-path', dest='image_path', help="Provide image path present at RO container")
+     vimconn_parser.add_argument('-n', '--image-name', dest='image_name', help="Provide image name for test")
+     # TODO add optional arguments for vimconn tests
+     # vimconn_parser.add_argument("-i", '--image-name', dest='image_name', help='<HELP>'))
+     vimconn_parser.add_argument('-s', '--sriov-net-name', dest='sriov_net_name', help="Provide SRIOV network name for test")
+     # Datacenter test set
+     # -------------------
+     vimconn_parser = subparsers.add_parser('vim', parents=[parent_parser], help="test vim")
+     vimconn_parser.set_defaults(func=test_vim)
+     # Mandatory arguments
+     mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
+     mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
+     # Optional arguments
+     vimconn_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
+                                help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
+     # WIM test set
+     # -------------------
+     vimconn_parser = subparsers.add_parser('wim', parents=[parent_parser], help="test wim")
+     vimconn_parser.set_defaults(func=test_wim)
+     # Mandatory arguments
+     mandatory_arguments = vimconn_parser.add_argument_group('mandatory arguments')
+     mandatory_arguments.add_argument('-d', '--datacenter', required=True, help='Set the datacenter to test')
+     # Optional arguments
+     vimconn_parser.add_argument('-u', '--url', dest='endpoint_url', default='http://localhost:9090/openmano',
+                                 help="Set the openmano server url. By default 'http://localhost:9090/openmano'")
+     argcomplete.autocomplete(parser)
+     args = parser.parse_args()
+     # print str(args)
+     test_config = {}
+     # default logger level is INFO. Options --debug and --failed override this, being --debug prioritary
+     logger_level = 'INFO'
+     if args.debug:
+         logger_level = 'DEBUG'
+     elif args.failed:
+         logger_level = 'WARNING'
+     logger_name = os.path.basename(__file__)
+     test_config["logger_name"] = logger_name
+     logger = logging.getLogger(logger_name)
+     logger.setLevel(logger_level)
+     failfast = args.failfast
+     # Configure a logging handler to store in a logging file
+     if args.logger_file:
+         fileHandler = logging.FileHandler(args.logger_file)
+         formatter_fileHandler = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
+         fileHandler.setFormatter(formatter_fileHandler)
+         logger.addHandler(fileHandler)
+     # Configure a handler to print to stdout
+     consoleHandler = logging.StreamHandler(sys.stdout)
+     formatter_consoleHandler = logging.Formatter('%(asctime)s %(name)s %(levelname)s: %(message)s')
+     consoleHandler.setFormatter(formatter_consoleHandler)
+     logger.addHandler(consoleHandler)
+     logger.debug('Program started with the following arguments: ' + str(args))
+     # set test config parameters
+     test_config["timeout"] = args.timeout
+     test_config["test_number"] = 1
+     executed, failed = args.func(args)
+     # Log summary
+     logger.warning("Total number of tests: {}; Total number of failures/errors: {}".format(executed, failed))
+     sys.exit(1 if failed else 0)
index 0000000,3f79787..3d04c1f
mode 000000,100755..100755
--- /dev/null
@@@ -1,0 -1,65 +1,66 @@@
+ #!/bin/bash
+ ##
+ # Licensed under the Apache License, Version 2.0 (the "License"); you may
+ # not use this file except in compliance with the License. You may obtain
+ # a copy of the License at
+ #
+ #         http://www.apache.org/licenses/LICENSE-2.0
+ #
+ # Unless required by applicable law or agreed to in writing, software
+ # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ # License for the specific language governing permissions and limitations
+ # under the License.
+ #
+ # For those usages not covered by the Apache License, Version 2.0 please
+ # contact with: nfvlabs@tid.es
+ ##
+ # Generates the debian packages; and then generates a docker image base on Dockerfile-devops and update a
+ # running docker stack with the generated image
+ HERE=$(dirname $(readlink -f ${BASH_SOURCE[0]}))
+ export RO_BASE=$(dirname $HERE)
+ # clean
+ docker rm -f ro_pkg 2>/dev/null && echo docker ro_pkg removed
+ rm -rf $HERE/temp/*
++find $RO_BASE  -name "*.pyc" -exec rm {} ";"
+ mkdir -p $HERE/temp
+ echo -e "\n\n[STAGE 1] Builind dockerfile userd for the package generation"
+ docker build $RO_BASE -f $RO_BASE/Dockerfile  -t opensourcemano/ro_pkg
+ sleep 2
+ echo "[STAGE 1.1] Generting packages inside docker ro_pkg"
+ docker run -d --name ro_pkg opensourcemano/ro_pkg bash -c 'sleep 3600'
+ docker cp $RO_BASE ro_pkg:/RO
+ docker exec ro_pkg bash -c 'cd /RO;  ./devops-stages/stage-build.sh'
+ deb_files=`docker exec ro_pkg bash -c 'ls /RO/deb_dist/'`
+ [ -z "$deb_files" ] && echo "No packages generated" >&2 && exit 1
+ echo $deb_files
+ echo -e "\n\n[STAGE 1.2] Print package information and copy to '$HERE/temp/'"
+ # print package information and copy to "$HERE/temp/"
+ for deb_file in $deb_files ; do
+    echo; echo; echo
+    echo $deb_file info:
+    echo "===========================" 
+    docker cp ro_pkg:/RO/deb_dist/$deb_file $HERE/temp/
+    dpkg -I $HERE/temp/$(basename $deb_file)
+ done
+ # docker rm -f ro_pkg
+ echo -e "\n\n[STAGE 2] Building docker image opensourcemano/ro:py3_devops based on debian packages"
+ docker build $HERE -f $HERE/Dockerfile-devops  -t opensourcemano/ro:py3_devops ||
+     ! echo "error generating devops dockerfile" >&2 || exit 1
+ sleep 2
+ # docker run -d --name ro_devops opensourcemano/ro:py3_devops
+ # docker run -ti exec ro_devops ro tenant-list  || ! echo "Cannot exec ro client to get server tenants" >&2 || exit 1
+ echo -e "\n\n[STAGE 3] Update service osm_ro with generated docker image"
+ docker service update osm_ro --force --image opensourcemano/ro:py3_devops
+ sleep 2
+ docker container prune -f
+ docker service logs osm_ro