New README.rst and requirements.txt files, setup.py and Makefile updated
[osm/RO.git] / vimconn_openstack.py
index 6396b77..b501d9d 100644 (file)
@@ -24,7 +24,7 @@
 '''
 osconnector implements all the methods to interact with openstack using the python-client.
 '''
 '''
 osconnector implements all the methods to interact with openstack using the python-client.
 '''
-__author__="Alfonso Tierno, Gerardo Garcia"
+__author__="Alfonso Tierno, Gerardo Garcia, Pablo Montes, xFlow Research"
 __date__ ="$22-jun-2014 11:19:29$"
 
 import vimconn
 __date__ ="$22-jun-2014 11:19:29$"
 
 import vimconn
@@ -32,15 +32,23 @@ import json
 import yaml
 import logging
 import netaddr
 import yaml
 import logging
 import netaddr
+import time
+import yaml
+import random
 
 
-from novaclient import client as nClient, exceptions as nvExceptions
-import keystoneclient.v2_0.client as ksClient
+from novaclient import client as nClient_v2, exceptions as nvExceptions
+from novaclient import api_versions
+import keystoneclient.v2_0.client as ksClient_v2
+from novaclient.v2.client import Client as nClient
+import keystoneclient.v3.client as ksClient
 import keystoneclient.exceptions as ksExceptions
 import glanceclient.v2.client as glClient
 import glanceclient.client as gl1Client
 import glanceclient.exc as gl1Exceptions
 import keystoneclient.exceptions as ksExceptions
 import glanceclient.v2.client as glClient
 import glanceclient.client as gl1Client
 import glanceclient.exc as gl1Exceptions
+import cinderclient.v2.client as cClient_v2
 from httplib import HTTPException
 from httplib import HTTPException
-from neutronclient.neutron import client as neClient
+from neutronclient.neutron import client as neClient_v2
+from neutronclient.v2_0 import client as neClient
 from neutronclient.common import exceptions as neExceptions
 from requests.exceptions import ConnectionError
 
 from neutronclient.common import exceptions as neExceptions
 from requests.exceptions import ConnectionError
 
@@ -55,16 +63,28 @@ vmStatus2manoFormat={'ACTIVE':'ACTIVE',
 netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED'
                      }
 
 netStatus2manoFormat={'ACTIVE':'ACTIVE','PAUSED':'PAUSED','INACTIVE':'INACTIVE','BUILD':'BUILD','ERROR':'ERROR','DELETED':'DELETED'
                      }
 
+#global var to have a timeout creating and deleting volumes
+volume_timeout = 60
+server_timeout = 60
+
 class vimconnector(vimconn.vimconnector):
 class vimconnector(vimconn.vimconnector):
-    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None, log_level=None, config={}):
-        '''using common constructor parameters. In this case 
+    def __init__(self, uuid, name, tenant_id, tenant_name, url, url_admin=None, user=None, passwd=None,
+                 log_level=None, config={}, persistent_info={}):
+        '''using common constructor parameters. In this case
         'url' is the keystone authorization url,
         'url_admin' is not use
         '''
         'url' is the keystone authorization url,
         'url_admin' is not use
         '''
+        self.osc_api_version = 'v2.0'
+        if config.get('APIversion') == 'v3.3':
+            self.osc_api_version = 'v3.3'
         vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
         vimconn.vimconnector.__init__(self, uuid, name, tenant_id, tenant_name, url, url_admin, user, passwd, log_level, config)
-        
+
+        self.persistent_info = persistent_info
         self.k_creds={}
         self.n_creds={}
         self.k_creds={}
         self.n_creds={}
+        if self.config.get("insecure"):
+            self.k_creds["insecure"] = True
+            self.n_creds["insecure"] = True
         if not url:
             raise TypeError, 'url param can not be NoneType'
         self.k_creds['auth_url'] = url
         if not url:
             raise TypeError, 'url param can not be NoneType'
         self.k_creds['auth_url'] = url
@@ -81,6 +101,13 @@ class vimconnector(vimconn.vimconnector):
         if passwd:
             self.k_creds['password'] = passwd
             self.n_creds['api_key']  = passwd
         if passwd:
             self.k_creds['password'] = passwd
             self.n_creds['api_key']  = passwd
+        if self.osc_api_version == 'v3.3':
+            self.k_creds['project_name'] = tenant_name
+            self.k_creds['project_id'] = tenant_id
+        if config.get('region_name'):
+            self.k_creds['region_name'] = config.get('region_name')
+            self.n_creds['region_name'] = config.get('region_name')
+
         self.reload_client       = True
         self.logger = logging.getLogger('openmano.vim.openstack')
         if log_level:
         self.reload_client       = True
         self.logger = logging.getLogger('openmano.vim.openstack')
         if log_level:
@@ -93,21 +120,37 @@ class vimconnector(vimconn.vimconnector):
         if index=='tenant_id':
             self.reload_client=True
             self.tenant_id = value
         if index=='tenant_id':
             self.reload_client=True
             self.tenant_id = value
-            if value:
-                self.k_creds['tenant_id'] = value
-                self.n_creds['tenant_id']  = value
+            if self.osc_api_version == 'v3.3':
+                if value:
+                    self.k_creds['project_id'] = value
+                    self.n_creds['project_id']  = value
+                else:
+                    del self.k_creds['project_id']
+                    del self.n_creds['project_id']
             else:
             else:
-                del self.k_creds['tenant_name']
-                del self.n_creds['project_id']
+                if value:
+                    self.k_creds['tenant_id'] = value
+                    self.n_creds['tenant_id']  = value
+                else:
+                    del self.k_creds['tenant_id']
+                    del self.n_creds['tenant_id']
         elif index=='tenant_name':
             self.reload_client=True
             self.tenant_name = value
         elif index=='tenant_name':
             self.reload_client=True
             self.tenant_name = value
-            if value:
-                self.k_creds['tenant_name'] = value
-                self.n_creds['project_id']  = value
+            if self.osc_api_version == 'v3.3':
+                if value:
+                    self.k_creds['project_name'] = value
+                    self.n_creds['project_name']  = value
+                else:
+                    del self.k_creds['project_name']
+                    del self.n_creds['project_name']
             else:
             else:
-                del self.k_creds['tenant_name']
-                del self.n_creds['project_id']
+                if value:
+                    self.k_creds['tenant_name'] = value
+                    self.n_creds['project_id']  = value
+                else:
+                    del self.k_creds['tenant_name']
+                    del self.n_creds['project_id']
         elif index=='user':
             self.reload_client=True
             self.user = value
         elif index=='user':
             self.reload_client=True
             self.user = value
@@ -146,14 +189,23 @@ class vimconnector(vimconn.vimconnector):
             #test valid params
             if len(self.n_creds) <4:
                 raise ksExceptions.ClientException("Not enough parameters to connect to openstack")
             #test valid params
             if len(self.n_creds) <4:
                 raise ksExceptions.ClientException("Not enough parameters to connect to openstack")
-            self.nova = nClient.Client(2, **self.n_creds)
-            self.keystone = ksClient.Client(**self.k_creds)
+            if self.osc_api_version == 'v3.3':
+                self.nova = nClient(api_version=api_versions.APIVersion(version_str='2.0'), **self.n_creds)
+                #TODO To be updated for v3
+                #self.cinder = cClient.Client(**self.n_creds)
+                self.keystone = ksClient.Client(**self.k_creds)
+                self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
+                self.neutron = neClient.Client(api_version=api_versions.APIVersion(version_str='2.0'), endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
+            else:
+                self.nova = nClient_v2.Client(version='2', **self.n_creds)
+                self.cinder = cClient_v2.Client(**self.n_creds)
+                self.keystone = ksClient_v2.Client(**self.k_creds)
+                self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
+                self.neutron = neClient_v2.Client('2.0', endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
             self.glance_endpoint = self.keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
             self.glance = glClient.Client(self.glance_endpoint, token=self.keystone.auth_token, **self.k_creds)  #TODO check k_creds vs n_creds
             self.glance_endpoint = self.keystone.service_catalog.url_for(service_type='image', endpoint_type='publicURL')
             self.glance = glClient.Client(self.glance_endpoint, token=self.keystone.auth_token, **self.k_creds)  #TODO check k_creds vs n_creds
-            self.ne_endpoint=self.keystone.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
-            self.neutron = neClient.Client('2.0', endpoint_url=self.ne_endpoint, token=self.keystone.auth_token, **self.k_creds)
             self.reload_client = False
             self.reload_client = False
-        
+
     def __net_os2mano(self, net_list_dict):
         '''Transform the net openstack format to mano format
         net_list_dict can be a list of dict or a single dict'''
     def __net_os2mano(self, net_list_dict):
         '''Transform the net openstack format to mano format
         net_list_dict can be a list of dict or a single dict'''
@@ -195,14 +247,17 @@ class vimconnector(vimconn.vimconnector):
             <other VIM specific>
         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
         '''
             <other VIM specific>
         Returns the tenant list of dictionaries: [{'name':'<name>, 'id':'<id>, ...}, ...]
         '''
-        self.logger.debug("Getting tenant from VIM filter: '%s'", str(filter_dict))
+        self.logger.debug("Getting tenants from VIM filter: '%s'", str(filter_dict))
         try:
             self._reload_connection()
         try:
             self._reload_connection()
-            tenant_class_list=self.keystone.tenants.findall(**filter_dict)
-            tenant_list=[]
-            for tenant in tenant_class_list:
-                tenant_list.append(tenant.to_dict())
-            return tenant_list
+            if self.osc_api_version == 'v3.3':
+                project_class_list=self.keystone.projects.findall(**filter_dict)
+            else:
+                project_class_list=self.keystone.tenants.findall(**filter_dict)
+            project_list=[]
+            for project in project_class_list:
+                project_list.append(project.to_dict())
+            return project_list
         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError)  as e:
             self._format_exception(e)
 
         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError)  as e:
             self._format_exception(e)
 
@@ -211,8 +266,11 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("Adding a new tenant name: %s", tenant_name)
         try:
             self._reload_connection()
         self.logger.debug("Adding a new tenant name: %s", tenant_name)
         try:
             self._reload_connection()
-            tenant=self.keystone.tenants.create(tenant_name, tenant_description)
-            return tenant.id
+            if self.osc_api_version == 'v3.3':
+                project=self.keystone.projects.create(tenant_name, tenant_description)
+            else:
+                project=self.keystone.tenants.create(tenant_name, tenant_description)
+            return project.id
         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError)  as e:
             self._format_exception(e)
 
         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError)  as e:
             self._format_exception(e)
 
@@ -221,11 +279,14 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
         try:
             self._reload_connection()
         self.logger.debug("Deleting tenant %s from VIM", tenant_id)
         try:
             self._reload_connection()
-            self.keystone.tenants.delete(tenant_id)
+            if self.osc_api_version == 'v3.3':
+                self.keystone.projects.delete(tenant_id)
+            else:
+                self.keystone.tenants.delete(tenant_id)
             return tenant_id
         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError)  as e:
             self._format_exception(e)
             return tenant_id
         except (ksExceptions.ConnectionError, ksExceptions.ClientException, ConnectionError)  as e:
             self._format_exception(e)
-        
+
     def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None):
         '''Adds a tenant network to VIM. Returns the network identifier'''
         self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type)
     def new_network(self,net_name, net_type, ip_profile=None, shared=False, vlan=None):
         '''Adds a tenant network to VIM. Returns the network identifier'''
         self.logger.debug("Adding a new network to VIM name '%s', type '%s'", net_name, net_type)
@@ -248,8 +309,9 @@ class vimconnector(vimconn.vimconnector):
             if not ip_profile:
                 ip_profile = {}
             if 'subnet_address' not in ip_profile:
             if not ip_profile:
                 ip_profile = {}
             if 'subnet_address' not in ip_profile:
-                #Fake subnet is required 
-                ip_profile['subnet_address'] = "192.168.111.0/24"
+                #Fake subnet is required
+                subnet_rand = random.randint(0, 255)
+                ip_profile['subnet_address'] = "192.168.{}.0/24".format(subnet_rand)
             if 'ip_version' not in ip_profile: 
                 ip_profile['ip_version'] = "IPv4"
             subnet={"name":net_name+"-subnet",
             if 'ip_version' not in ip_profile: 
                 ip_profile['ip_version'] = "IPv4"
             subnet={"name":net_name+"-subnet",
@@ -273,7 +335,7 @@ class vimconnector(vimconn.vimconnector):
                 #parts = ip_profile['dhcp_start_address'].split('.')
                 #ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
                 ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address']))
                 #parts = ip_profile['dhcp_start_address'].split('.')
                 #ip_int = (int(parts[0]) << 24) + (int(parts[1]) << 16) + (int(parts[2]) << 8) + int(parts[3])
                 ip_int = int(netaddr.IPAddress(ip_profile['dhcp_start_address']))
-                ip_int += ip_profile['dhcp_count']
+                ip_int += ip_profile['dhcp_count'] - 1
                 ip_str = str(netaddr.IPAddress(ip_int))
                 subnet['allocation_pools'][0]['end'] = ip_str
             #self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
                 ip_str = str(netaddr.IPAddress(ip_int))
                 subnet['allocation_pools'][0]['end'] = ip_str
             #self.logger.debug(">>>>>>>>>>>>>>>>>> Subnet: %s", str(subnet))
@@ -298,6 +360,8 @@ class vimconnector(vimconn.vimconnector):
         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
         try:
             self._reload_connection()
         self.logger.debug("Getting network from VIM filter: '%s'", str(filter_dict))
         try:
             self._reload_connection()
+            if self.osc_api_version == 'v3.3' and "tenant_id" in filter_dict:
+                filter_dict['project_id'] = filter_dict.pop('tenant_id')
             net_dict=self.neutron.list_networks(**filter_dict)
             net_list=net_dict["networks"]
             self.__net_os2mano(net_list)
             net_dict=self.neutron.list_networks(**filter_dict)
             net_list=net_dict["networks"]
             self.__net_os2mano(net_list)
@@ -403,6 +467,38 @@ class vimconnector(vimconn.vimconnector):
         except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
             self._format_exception(e)
 
         except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
             self._format_exception(e)
 
+    def get_flavor_id_from_data(self, flavor_dict):
+        """Obtain flavor id that match the flavor description
+           Returns the flavor_id or raises a vimconnNotFoundException
+        """
+        try:
+            self._reload_connection()
+            numa=None
+            numas = flavor_dict.get("extended",{}).get("numas")
+            if numas:
+                #TODO
+                raise vimconn.vimconnNotFoundException("Flavor with EPA still not implemted")
+                # if len(numas) > 1:
+                #     raise vimconn.vimconnNotFoundException("Cannot find any flavor with more than one numa")
+                # numa=numas[0]
+                # numas = extended.get("numas")
+            for flavor in self.nova.flavors.list():
+                epa = flavor.get_keys()
+                if epa:
+                    continue
+                    #TODO 
+                if flavor.ram != flavor_dict["ram"]:
+                    continue
+                if flavor.vcpus != flavor_dict["vcpus"]:
+                    continue
+                if flavor.disk != flavor_dict["disk"]:
+                    continue
+                return flavor.id
+            raise vimconn.vimconnNotFoundException("Cannot find any flavor matching '{}'".format(str(flavor_dict)))
+        except (nvExceptions.NotFound, nvExceptions.ClientException, ksExceptions.ClientException, ConnectionError) as e:
+            self._format_exception(e)
+
+
     def new_flavor(self, flavor_data, change_name_if_used=True):
         '''Adds a tenant flavor to openstack VIM
         if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition
     def new_flavor(self, flavor_data, change_name_if_used=True):
         '''Adds a tenant flavor to openstack VIM
         if change_name_if_used is True, it will change name in case of conflict, because it is not supported name repetition
@@ -510,7 +606,7 @@ class vimconnector(vimconn.vimconnector):
                 #determine format  http://docs.openstack.org/developer/glance/formats.html
                 if "disk_format" in image_dict:
                     disk_format=image_dict["disk_format"]
                 #determine format  http://docs.openstack.org/developer/glance/formats.html
                 if "disk_format" in image_dict:
                     disk_format=image_dict["disk_format"]
-                else: #autodiscover base on extention
+                else: #autodiscover based on extension
                     if image_dict['location'][-6:]==".qcow2":
                         disk_format="qcow2"
                     elif image_dict['location'][-4:]==".vhd":
                     if image_dict['location'][-6:]==".qcow2":
                         disk_format="qcow2"
                     elif image_dict['location'][-4:]==".vhd":
@@ -600,14 +696,14 @@ class vimconnector(vimconn.vimconnector):
             #Then we filter by the rest of filter fields: checksum
             filtered_list = []
             for image in image_list:
             #Then we filter by the rest of filter fields: checksum
             filtered_list = []
             for image in image_list:
-                image_dict=glance.images.get(image.id)
-                if image_dict['checksum']==filter_dict.get('checksum'):
-                    filtered_list.append(image)
+                image_class=self.glance.images.get(image.id)
+                if 'checksum' not in filter_dict or image_class['checksum']==filter_dict.get('checksum'):
+                    filtered_list.append(image_class.copy())
             return filtered_list
         except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
             self._format_exception(e)
 
             return filtered_list
         except (ksExceptions.ClientException, nvExceptions.ClientException, gl1Exceptions.CommunicationError, ConnectionError) as e:
             self._format_exception(e)
 
-    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None):
+    def new_vminstance(self,name,description,start,image_id,flavor_id,net_list,cloud_config=None,disk_list=None):
         '''Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
         '''Adds a VM instance to VIM
         Params:
             start: indicates if VM must start or boot in pause mode. Ignored
@@ -621,48 +717,56 @@ class vimconnector(vimconn.vimconnector):
                 use: 'data', 'bridge',  'mgmt'
                 type: 'virtual', 'PF', 'VF', 'VFnotShared'
                 vim_id: filled/added by this function
                 use: 'data', 'bridge',  'mgmt'
                 type: 'virtual', 'PF', 'VF', 'VFnotShared'
                 vim_id: filled/added by this function
+                floating_ip: True/False (or it can be None)
                 #TODO ip, security groups
         Returns the instance identifier
         '''
                 #TODO ip, security groups
         Returns the instance identifier
         '''
-        self.logger.debug("Creating VM image '%s' flavor '%s' nics='%s'",image_id, flavor_id,str(net_list))
+        self.logger.debug("new_vminstance input: image='%s' flavor='%s' nics='%s'",image_id, flavor_id,str(net_list))
         try:
             metadata={}
             net_list_vim=[]
         try:
             metadata={}
             net_list_vim=[]
+            external_network=[] #list of external networks to be connected to instance, later on used to create floating_ip
             self._reload_connection()
             metadata_vpci={} #For a specific neutron plugin 
             for net in net_list:
                 if not net.get("net_id"): #skip non connected iface
                     continue
             self._reload_connection()
             metadata_vpci={} #For a specific neutron plugin 
             for net in net_list:
                 if not net.get("net_id"): #skip non connected iface
                     continue
-                if net["type"]=="virtual":
-                    net_list_vim.append({'net-id': net["net_id"]})
-                    if "vpci" in net:
-                        metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
-                elif net["type"]=="PF":
-                    self.logger.warn("new_vminstance: Warning, can not connect a passthrough interface ")
-                    #TODO insert this when openstack consider passthrough ports as openstack neutron ports
-                else: #VF
-                    if "vpci" in net:
-                        if "VF" not in metadata_vpci:
-                            metadata_vpci["VF"]=[]
-                        metadata_vpci["VF"].append([ net["vpci"], "" ])
+                if net["type"]=="virtual" or net["type"]=="VF":
                     port_dict={
                     port_dict={
-                         "network_id": net["net_id"],
-                         "name": net.get("name"),
-                         "binding:vnic_type": "direct", 
-                         "admin_state_up": True
-                    }
+                        "network_id": net["net_id"],
+                        "name": net.get("name"),
+                        "admin_state_up": True
+                    }    
+                    if net["type"]=="virtual":
+                        if "vpci" in net:
+                            metadata_vpci[ net["net_id"] ] = [[ net["vpci"], "" ]]
+                    else: # for VF
+                        if "vpci" in net:
+                            if "VF" not in metadata_vpci:
+                                metadata_vpci["VF"]=[]
+                            metadata_vpci["VF"].append([ net["vpci"], "" ])
+                        port_dict["binding:vnic_type"]="direct"
                     if not port_dict["name"]:
                     if not port_dict["name"]:
-                        port_dict["name"] = name
+                        port_dict["name"]=name
                     if net.get("mac_address"):
                         port_dict["mac_address"]=net["mac_address"]
                     if net.get("mac_address"):
                         port_dict["mac_address"]=net["mac_address"]
-                    #TODO: manage having SRIOV without vlan tag
-                    #if net["type"] == "VFnotShared"
-                    #    port_dict["vlan"]=0
+                    if net.get("port_security") == False:
+                        port_dict["port_security_enabled"]=net["port_security"]
                     new_port = self.neutron.create_port({"port": port_dict })
                     net["mac_adress"] = new_port["port"]["mac_address"]
                     net["vim_id"] = new_port["port"]["id"]
                     new_port = self.neutron.create_port({"port": port_dict })
                     net["mac_adress"] = new_port["port"]["mac_address"]
                     net["vim_id"] = new_port["port"]["id"]
-                    net["ip"] = new_port["port"].get("fixed_ips",[{}])[0].get("ip_address")
+                    net["ip"] = new_port["port"].get("fixed_ips", [{}])[0].get("ip_address")
                     net_list_vim.append({"port-id": new_port["port"]["id"]})
                     net_list_vim.append({"port-id": new_port["port"]["id"]})
+                else:   # for PF
+                    self.logger.warn("new_vminstance: Warning, can not connect a passthrough interface ")
+                    #TODO insert this when openstack consider passthrough ports as openstack neutron ports
+                if net.get('floating_ip', False):
+                    net['exit_on_floating_ip_error'] = True
+                    external_network.append(net)
+                elif net['use'] == 'mgmt' and self.config.get('use_floating_ip'):
+                    net['exit_on_floating_ip_error'] = False
+                    external_network.append(net)
+
             if metadata_vpci:
                 metadata = {"pci_assignement": json.dumps(metadata_vpci)}
                 if len(metadata["pci_assignement"]) >255:
             if metadata_vpci:
                 metadata = {"pci_assignement": json.dumps(metadata_vpci)}
                 if len(metadata["pci_assignement"]) >255:
@@ -677,54 +781,184 @@ class vimconnector(vimconn.vimconnector):
             security_groups   = self.config.get('security_groups')
             if type(security_groups) is str:
                 security_groups = ( security_groups, )
             security_groups   = self.config.get('security_groups')
             if type(security_groups) is str:
                 security_groups = ( security_groups, )
+            #cloud config
+            userdata=None
+            config_drive = None
             if isinstance(cloud_config, dict):
             if isinstance(cloud_config, dict):
-                userdata="#cloud-config\nusers:\n"
-                #default user
-                if "key-pairs" in cloud_config:
-                    userdata += "  - default:\n    ssh-authorized-keys:\n"
-                    for key in cloud_config["key-pairs"]:
-                        userdata += "      - '{key}'\n".format(key=key)
-                for user in cloud_config.get("users",[]):
-                    userdata += "  - name: {name}\n    sudo: ALL=(ALL) NOPASSWD:ALL\n".format(name=user["name"])
-                    if "user-info" in user:
-                        userdata += "    gecos: {}'\n".format(user["user-info"])
-                    if user.get("key-pairs"):
-                        userdata += "    ssh-authorized-keys:\n"
-                        for key in user["key-pairs"]:
-                            userdata += "      - '{key}'\n".format(key=key)
+                if cloud_config.get("user-data"):
+                    userdata=cloud_config["user-data"]
+                if cloud_config.get("boot-data-drive") != None:
+                    config_drive = cloud_config["boot-data-drive"]
+                if cloud_config.get("config-files") or cloud_config.get("users") or cloud_config.get("key-pairs"):
+                    if userdata:
+                        raise vimconn.vimconnConflictException("Cloud-config cannot contain both 'userdata' and 'config-files'/'users'/'key-pairs'")
+                    userdata_dict={}
+                    #default user
+                    if cloud_config.get("key-pairs"):
+                        userdata_dict["ssh-authorized-keys"] = cloud_config["key-pairs"]
+                        userdata_dict["users"] = [{"default": None, "ssh-authorized-keys": cloud_config["key-pairs"] }]
+                    if cloud_config.get("users"):
+                        if "users" not in userdata_dict:
+                            userdata_dict["users"] = [ "default" ]
+                        for user in cloud_config["users"]:
+                            user_info = {
+                                "name" : user["name"],
+                                "sudo": "ALL = (ALL)NOPASSWD:ALL"
+                            }
+                            if "user-info" in user:
+                                user_info["gecos"] = user["user-info"]
+                            if user.get("key-pairs"):
+                                user_info["ssh-authorized-keys"] = user["key-pairs"]
+                            userdata_dict["users"].append(user_info)
+
+                    if cloud_config.get("config-files"):
+                        userdata_dict["write_files"] = []
+                        for file in cloud_config["config-files"]:
+                            file_info = {
+                                "path" : file["dest"],
+                                "content": file["content"]
+                            }
+                            if file.get("encoding"):
+                                file_info["encoding"] = file["encoding"]
+                            if file.get("permissions"):
+                                file_info["permissions"] = file["permissions"]
+                            if file.get("owner"):
+                                file_info["owner"] = file["owner"]
+                            userdata_dict["write_files"].append(file_info)
+                    userdata = "#cloud-config\n"
+                    userdata += yaml.safe_dump(userdata_dict, indent=4, default_flow_style=False)
                 self.logger.debug("userdata: %s", userdata)
             elif isinstance(cloud_config, str):
                 userdata = cloud_config
                 self.logger.debug("userdata: %s", userdata)
             elif isinstance(cloud_config, str):
                 userdata = cloud_config
-            else:
-                userdata=None    
-            
+
+            #Create additional volumes in case these are present in disk_list
+            block_device_mapping = None
+            base_disk_index = ord('b')
+            if disk_list != None:
+                block_device_mapping = dict()
+                for disk in disk_list:
+                    if 'image_id' in disk:
+                        volume = self.cinder.volumes.create(size = disk['size'],name = name + '_vd' +
+                                    chr(base_disk_index), imageRef = disk['image_id'])
+                    else:
+                        volume = self.cinder.volumes.create(size=disk['size'], name=name + '_vd' +
+                                    chr(base_disk_index))
+                    block_device_mapping['_vd' +  chr(base_disk_index)] = volume.id
+                    base_disk_index += 1
+
+                #wait until volumes are with status available
+                keep_waiting = True
+                elapsed_time = 0
+                while keep_waiting and elapsed_time < volume_timeout:
+                    keep_waiting = False
+                    for volume_id in block_device_mapping.itervalues():
+                        if self.cinder.volumes.get(volume_id).status != 'available':
+                            keep_waiting = True
+                    if keep_waiting:
+                        time.sleep(1)
+                        elapsed_time += 1
+
+                #if we exceeded the timeout rollback
+                if elapsed_time >= volume_timeout:
+                    #delete the volumes we just created
+                    for volume_id in block_device_mapping.itervalues():
+                        self.cinder.volumes.delete(volume_id)
+
+                    #delete ports we just created
+                    for net_item  in net_list_vim:
+                        if 'port-id' in net_item:
+                            self.neutron.delete_port(net_item['port-id'])
+
+                    raise vimconn.vimconnException('Timeout creating volumes for instance ' + name,
+                                                   http_code=vimconn.HTTP_Request_Timeout)
+
             server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata,
             server = self.nova.servers.create(name, image_id, flavor_id, nics=net_list_vim, meta=metadata,
-                                              security_groups   = security_groups,
-                                              availability_zone = self.config.get('availability_zone'),
-                                              key_name          = self.config.get('keypair'),
-                                              userdata=userdata
-                                        ) #, description=description)
-            
-            
+                                              security_groups=security_groups,
+                                              availability_zone=self.config.get('availability_zone'),
+                                              key_name=self.config.get('keypair'),
+                                              userdata=userdata,
+                                              config_drive = config_drive,
+                                              block_device_mapping = block_device_mapping
+                                              )  # , description=description)
             #print "DONE :-)", server
             #print "DONE :-)", server
-            
-#             #TODO   server.add_floating_ip("10.95.87.209")
-#             #To look for a free floating_ip
-#             free_floating_ip = None
-#             for floating_ip in self.neutron.list_floatingips().get("floatingips", () ):
-#                 if not floating_ip["port_id"]:
-#                     free_floating_ip = floating_ip["floating_ip_address"]
-#                     break
-#             if free_floating_ip:
-#                 server.add_floating_ip(free_floating_ip)
-                
-            
+            pool_id = None
+            floating_ips = self.neutron.list_floatingips().get("floatingips", ())
+            for floating_network in external_network:
+                try:
+                    # wait until vm is active
+                    elapsed_time = 0
+                    while elapsed_time < server_timeout:
+                        status = self.nova.servers.get(server.id).status
+                        if status == 'ACTIVE':
+                            break
+                        time.sleep(1)
+                        elapsed_time += 1
+
+                    #if we exceeded the timeout rollback
+                    if elapsed_time >= server_timeout:
+                        raise vimconn.vimconnException('Timeout creating instance ' + name,
+                                                       http_code=vimconn.HTTP_Request_Timeout)
+
+                    assigned = False
+                    while(assigned == False):
+                        if floating_ips:
+                            ip = floating_ips.pop(0)
+                            if not ip.get("port_id", False) and ip.get('tenant_id') == server.tenant_id:
+                                free_floating_ip = ip.get("floating_ip_address")
+                                try:
+                                    fix_ip = floating_network.get('ip')
+                                    server.add_floating_ip(free_floating_ip, fix_ip)
+                                    assigned = True
+                                except Exception as e:
+                                    raise vimconn.vimconnException(type(e).__name__ + ": Cannot create floating_ip "+  str(e), http_code=vimconn.HTTP_Conflict)
+                        else:
+                            #Find the external network
+                            external_nets = list()
+                            for net in self.neutron.list_networks()['networks']:
+                                if net['router:external']:
+                                        external_nets.append(net)
+
+                            if len(external_nets) == 0:
+                                raise vimconn.vimconnException("Cannot create floating_ip automatically since no external "
+                                                               "network is present",
+                                                                http_code=vimconn.HTTP_Conflict)
+                            if len(external_nets) > 1:
+                                raise vimconn.vimconnException("Cannot create floating_ip automatically since multiple "
+                                                               "external networks are present",
+                                                               http_code=vimconn.HTTP_Conflict)
+
+                            pool_id = external_nets[0].get('id')
+                            param = {'floatingip': {'floating_network_id': pool_id, 'tenant_id': server.tenant_id}}
+                            try:
+                                #self.logger.debug("Creating floating IP")
+                                new_floating_ip = self.neutron.create_floatingip(param)
+                                free_floating_ip = new_floating_ip['floatingip']['floating_ip_address']
+                                fix_ip = floating_network.get('ip')
+                                server.add_floating_ip(free_floating_ip, fix_ip)
+                                assigned=True
+                            except Exception as e:
+                                raise vimconn.vimconnException(type(e).__name__ + ": Cannot assign floating_ip "+  str(e), http_code=vimconn.HTTP_Conflict)
+                except Exception as e:
+                    if not floating_network['exit_on_floating_ip_error']:
+                        self.logger.warn("Cannot create floating_ip. %s", str(e))
+                        continue
+                    self.delete_vminstance(server.id)
+                    raise
+
             return server.id
 #        except nvExceptions.NotFound as e:
 #            error_value=-vimconn.HTTP_Not_Found
 #            error_text= "vm instance %s not found" % vm_id
             return server.id
 #        except nvExceptions.NotFound as e:
 #            error_value=-vimconn.HTTP_Not_Found
 #            error_text= "vm instance %s not found" % vm_id
-        except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError
-                ) as e:
+        except (ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
+            # delete the volumes we just created
+            if block_device_mapping != None:
+                for volume_id in block_device_mapping.itervalues():
+                    self.cinder.volumes.delete(volume_id)
+
+            # delete ports we just created
+            for net_item in net_list_vim:
+                if 'port-id' in net_item:
+                    self.neutron.delete_port(net_item['port-id'])
             self._format_exception(e)
         except TypeError as e:
             raise vimconn.vimconnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
             self._format_exception(e)
         except TypeError as e:
             raise vimconn.vimconnException(type(e).__name__ + ": "+  str(e), http_code=vimconn.HTTP_Bad_Request)
@@ -804,7 +1038,32 @@ class vimconnector(vimconn.vimconnector):
                     self.neutron.delete_port(p["id"])
                 except Exception as e:
                     self.logger.error("Error deleting port: " + type(e).__name__ + ": "+  str(e))
                     self.neutron.delete_port(p["id"])
                 except Exception as e:
                     self.logger.error("Error deleting port: " + type(e).__name__ + ": "+  str(e))
+
+            #commented because detaching the volumes makes the servers.delete not work properly ?!?
+            #dettach volumes attached
+            server = self.nova.servers.get(vm_id)
+            volumes_attached_dict = server._info['os-extended-volumes:volumes_attached']
+            #for volume in volumes_attached_dict:
+            #    self.cinder.volumes.detach(volume['id'])
+
             self.nova.servers.delete(vm_id)
             self.nova.servers.delete(vm_id)
+
+            #delete volumes.
+            #Although having detached them should have them  in active status
+            #we ensure in this loop
+            keep_waiting = True
+            elapsed_time = 0
+            while keep_waiting and elapsed_time < volume_timeout:
+                keep_waiting = False
+                for volume in volumes_attached_dict:
+                    if self.cinder.volumes.get(volume['id']).status != 'available':
+                        keep_waiting = True
+                    else:
+                        self.cinder.volumes.delete(volume['id'])
+                if keep_waiting:
+                    time.sleep(1)
+                    elapsed_time += 1
+
             return vm_id
         except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
             self._format_exception(e)
             return vm_id
         except (nvExceptions.NotFound, ksExceptions.ClientException, nvExceptions.ClientException, ConnectionError) as e:
             self._format_exception(e)